From 0e61a2d51e13497314a365636cbcf67aa1cd1d12 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 4 Dec 2019 14:58:08 +0100 Subject: [PATCH 001/207] feat(nks): colecting timecodes with otio --- .../nukestudio/publish/collect_timecodes.py | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 pype/plugins/nukestudio/publish/collect_timecodes.py diff --git a/pype/plugins/nukestudio/publish/collect_timecodes.py b/pype/plugins/nukestudio/publish/collect_timecodes.py new file mode 100644 index 0000000000..b3d4a5e8c5 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_timecodes.py @@ -0,0 +1,88 @@ +import pyblish.api +import opentimelineio.opentime as otio_ot + + +class CollectClipTimecodes(pyblish.api.InstancePlugin): + """Collect time with OpenTimelineIO: source_h(In,Out)[timecode, sec], timeline(In,Out)[timecode, sec]""" + + order = pyblish.api.CollectorOrder + 0.101 + label = "Collect Timecodes" + hosts = ["nukestudio"] + + def process(self, instance): + + data = dict() + self.log.debug("__ instance.data: {}".format(instance.data)) + # Timeline data. + handle_start = instance.data["handleStart"] + handle_end = instance.data["handleEnd"] + + source_in_h = instance.data("sourceInH", + instance.data("sourceIn") - handle_start) + source_out_h = instance.data("sourceOutH", + instance.data("sourceOut") + handle_end) + + timeline_in = instance.data["clipIn"] + timeline_out = instance.data["clipOut"] + + # set frame start with tag or take it from timeline + frame_start = instance.data.get("startingFrame") + + if not frame_start: + frame_start = timeline_in + + source = instance.data.get("source") + + otio_data = dict() + self.log.debug("__ source: `{}`".format(source)) + + rate_fps = instance.context.data["fps"] + + otio_in_h_ratio = otio_ot.RationalTime( + value=(source.timecodeStart() + ( + source_in_h + (source_out_h - source_in_h))), + rate=rate_fps) + + otio_out_h_ratio = otio_ot.RationalTime( + value=(source.timecodeStart() + source_in_h), + rate=rate_fps) + + otio_timeline_in_ratio = otio_ot.RationalTime( + value=int( + instance.data.get("timelineTimecodeStart", 0)) + timeline_in, + rate=rate_fps) + + otio_timeline_out_ratio = otio_ot.RationalTime( + value=int( + instance.data.get("timelineTimecodeStart", 0)) + timeline_out, + rate=rate_fps) + + otio_data.update({ + + "otioClipInHTimecode": otio_ot.to_timecode(otio_in_h_ratio), + + "otioClipOutHTimecode": otio_ot.to_timecode(otio_out_h_ratio), + + "otioClipInHSec": otio_ot.to_seconds(otio_in_h_ratio), + + "otioClipOutHSec": otio_ot.to_seconds(otio_out_h_ratio), + + "otioTimelineInTimecode": otio_ot.to_timecode( + otio_timeline_in_ratio), + + "otioTimelineOutTimecode": otio_ot.to_timecode( + otio_timeline_out_ratio), + + "otioTimelineInSec": otio_ot.to_seconds(otio_timeline_in_ratio), + + "otioTimelineOutSec": otio_ot.to_seconds(otio_timeline_out_ratio) + }) + + data.update({ + "otioData": otio_data, + "sourceTimecodeIn": otio_ot.to_timecode(otio_in_h_ratio), + "sourceTimecodeOut": otio_ot.to_timecode(otio_out_h_ratio), + } + ) + instance.data.update(data) + self.log.debug("data: {}".format(instance.data)) From 89e5e72059c7e27b091ee36ab147aa369505787c Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 4 Dec 2019 15:05:51 +0100 Subject: [PATCH 002/207] feat(nks): adding media/clip duration to collect frame ranges --- pype/plugins/nukestudio/publish/collect_frame_ranges.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nukestudio/publish/collect_frame_ranges.py b/pype/plugins/nukestudio/publish/collect_frame_ranges.py index 38224f683d..e0a53d7501 100644 --- a/pype/plugins/nukestudio/publish/collect_frame_ranges.py +++ b/pype/plugins/nukestudio/publish/collect_frame_ranges.py @@ -34,15 +34,22 @@ class CollectClipFrameRanges(pyblish.api.InstancePlugin): frame_start = timeline_in frame_end = frame_start + (timeline_out - timeline_in) + source = instance.data.get("source") data.update( { + data.update({ + "sourceFirst": source_in_h, "sourceInH": source_in_h, "sourceOutH": source_out_h, "frameStart": frame_start, "frameEnd": frame_end, "clipInH": timeline_in_h, - "clipOutH": timeline_out_h + "clipOutH": timeline_out_h, + "mediaDurationH": instance.data.get( + "mediaDuration") + handle_start + handle_end, + "clipDurationH": instance.data.get( + "clipDuration") + handle_start + handle_end } ) self.log.debug("__ data: {}".format(data)) From cf01f447265170fbd009d88e24cf2686018de793 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 4 Dec 2019 15:09:16 +0100 Subject: [PATCH 003/207] feat(nks): collect review identify if mediaSource needs to be trimed --- pype/plugins/nukestudio/publish/collect_reviews.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index f9032b2ca4..c127b977e6 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -100,6 +100,19 @@ class CollectReviews(api.InstancePlugin): "name": "preview", "ext": ext } + + # if int(rev_inst.data.get("sourceIn")) > + mediaDuration = instance.data.get("mediaDuration") + clipDuration = instance.data.get("clipDuration") + + if mediaDuration > clipDuration: + self.log.debug("Media duration higher: {}".format( + (mediaDuration - clipDuration))) + # representation.update({ + # "frameStart": instance.data.get("sourceInH"), + # "frameEnd": instance.data.get("sourceOutH") + # }) + instance.data["representations"].append(representation) self.log.debug("Added representation: {}".format(representation)) From 128719aeb12a314ac0122dc612000ba33a758dbd Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 4 Dec 2019 15:09:50 +0100 Subject: [PATCH 004/207] feat(nks): improving calculation of fps --- pype/plugins/nukestudio/publish/collect_framerate.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nukestudio/publish/collect_framerate.py b/pype/plugins/nukestudio/publish/collect_framerate.py index a0fd4df599..694052f802 100644 --- a/pype/plugins/nukestudio/publish/collect_framerate.py +++ b/pype/plugins/nukestudio/publish/collect_framerate.py @@ -1,5 +1,6 @@ from pyblish import api + class CollectFramerate(api.ContextPlugin): """Collect framerate from selected sequence.""" @@ -9,4 +10,13 @@ class CollectFramerate(api.ContextPlugin): def process(self, context): sequence = context.data["activeSequence"] - context.data["fps"] = sequence.framerate().toFloat() + context.data["fps"] = self.get_rate(sequence) + + def get_rate(self, sequence): + num, den = sequence.framerate().toRational() + rate = float(num) / float(den) + + if rate.is_integer(): + return rate + + return round(rate, 3) From 49e166044563b9fdced37747fa69ef31f3986e09 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 17 Mar 2020 18:22:12 +0100 Subject: [PATCH 005/207] initial commit `PYPE_STUDIO_CORE_*` environment keys replaced with `PYPE_CORE_*` --- pype/plugins/global/publish/submit_publish_job.py | 4 ++-- pype/plugins/maya/publish/submit_maya_muster.py | 15 +++++++-------- pype/plugins/nuke/publish/submit_nuke_deadline.py | 9 ++++----- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 47c0272254..bc2a5384e6 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -21,8 +21,8 @@ def _get_script(): module_path = module_path[: -len(".pyc")] + ".py" module_path = os.path.normpath(module_path) - mount_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_MOUNT"]) - network_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_PATH"]) + mount_root = os.path.normpath(os.environ["PYPE_CORE_MOUNT"]) + network_root = os.path.normpath(os.environ["PYPE_CORE_PATH"]) module_path = module_path.replace(mount_root, network_root) diff --git a/pype/plugins/maya/publish/submit_maya_muster.py b/pype/plugins/maya/publish/submit_maya_muster.py index ac60c40bf7..af968ed773 100644 --- a/pype/plugins/maya/publish/submit_maya_muster.py +++ b/pype/plugins/maya/publish/submit_maya_muster.py @@ -311,12 +311,11 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): # replace path for UNC / network share paths, co PYPE is found # over network. It assumes PYPE is located somewhere in - # PYPE_STUDIO_CORE_PATH + # PYPE_CORE_PATH pype_root = os.environ["PYPE_ROOT"].replace( - os.path.normpath( - os.environ['PYPE_STUDIO_CORE_MOUNT']), # noqa - os.path.normpath( - os.environ['PYPE_STUDIO_CORE_PATH'])) # noqa + os.path.normpath(os.environ['PYPE_CORE_MOUNT']), + os.path.normpath(os.environ['PYPE_CORE_PATH']) + ) # we must provide either full path to executable or use musters own # python named MPython.exe, residing directly in muster bin @@ -521,7 +520,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): clean_path = "" self.log.debug("key: {}".format(key)) to_process = environment[key] - if key == "PYPE_STUDIO_CORE_MOUNT": + if key == "PYPE_CORE_MOUNT": clean_path = environment[key] elif "://" in environment[key]: clean_path = environment[key] @@ -542,8 +541,8 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): # this should replace paths so they are pointing to network share clean_path = clean_path.replace( - os.path.normpath(environment['PYPE_STUDIO_CORE_MOUNT']), - os.path.normpath(environment['PYPE_STUDIO_CORE_PATH'])) + os.path.normpath(environment['PYPE_CORE_MOUNT']), + os.path.normpath(environment['PYPE_CORE_PATH'])) clean_environment[key] = clean_path return clean_environment diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index 0a9ef33398..9ee988b5ae 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -198,7 +198,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): clean_path = "" self.log.debug("key: {}".format(key)) to_process = environment[key] - if key == "PYPE_STUDIO_CORE_MOUNT": + if key == "PYPE_CORE_MOUNT": clean_path = environment[key] elif "://" in environment[key]: clean_path = environment[key] @@ -221,10 +221,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): clean_path = clean_path.replace('python2', 'python3') clean_path = clean_path.replace( - os.path.normpath( - environment['PYPE_STUDIO_CORE_MOUNT']), # noqa - os.path.normpath( - environment['PYPE_STUDIO_CORE_PATH'])) # noqa + os.path.normpath(environment['PYPE_CORE_MOUNT']), + os.path.normpath(environment['PYPE_CORE_PATH']) + ) clean_environment[key] = clean_path environment = clean_environment From f4df58986c195133b9e2d6cf77da78fc247a1579 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 18 Mar 2020 17:39:36 +0100 Subject: [PATCH 006/207] pype init sets root by current project --- pype/__init__.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pype/__init__.py b/pype/__init__.py index 5cd9832558..34d2d90649 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -3,7 +3,7 @@ import os from pyblish import api as pyblish from avalon import api as avalon from .lib import filter_pyblish_plugins -from pypeapp import config +from pypeapp import config, Roots import logging @@ -85,6 +85,10 @@ def install(): avalon.register_plugin_path(avalon.Loader, plugin_path) avalon.register_plugin_path(avalon.Creator, plugin_path) + if project_name: + root_obj = Roots(project_name) + root = root_obj.roots + avalon.register_root(root) # apply monkey patched discover to original one avalon.discover = patched_discover From bda292a64fce755de40774b897e3364c3ccc6087 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 20 Mar 2020 14:46:59 +0100 Subject: [PATCH 007/207] feat(global): review workflow accepting nks host --- pype/plugins/global/publish/extract_burnin.py | 4 ++-- pype/plugins/global/publish/extract_review.py | 7 ++++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 086a1fdfb2..f136a2fdac 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -18,7 +18,7 @@ class ExtractBurnin(pype.api.Extractor): label = "Extract burnins" order = pyblish.api.ExtractorOrder + 0.03 families = ["review", "burnin"] - hosts = ["nuke", "maya", "shell"] + hosts = ["nuke", "maya", "shell", "nukestudio"] optional = True def process(self, instance): @@ -185,7 +185,7 @@ class ExtractBurnin(pype.api.Extractor): self.log.debug("Output: {}".format(output)) repre_update = { - "anatomy_template": "render", + "anatomy_template": repre.get("anatomy_template", "render"), "files": movieFileBurnin, "name": repre["name"], "tags": [x for x in repre["tags"] if x != "delete"] diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index c8a8510fb2..7c5f90b135 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -20,15 +20,15 @@ class ExtractReview(pyblish.api.InstancePlugin): label = "Extract Review" order = pyblish.api.ExtractorOrder + 0.02 families = ["review"] - hosts = ["nuke", "maya", "shell"] + hosts = ["nuke", "maya", "shell", "nukestudio"] outputs = {} ext_filter = [] to_width = 1920 to_height = 1080 - def process(self, instance): + def process(self, instance): output_profiles = self.outputs or {} inst_data = instance.data @@ -170,7 +170,8 @@ class ExtractReview(pyblish.api.InstancePlugin): frame_start_handle = frame_start - handle_start frame_end_handle = frame_end + handle_end if isinstance(repre["files"], list): - if frame_start_handle != repre.get("detectedStart", frame_start_handle): + if frame_start_handle != repre.get( + "detectedStart", frame_start_handle): frame_start_handle = repre.get("detectedStart") # exclude handle if no handles defined From b149388be34b1c1a9722c2d685a15a0510873876 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 20 Mar 2020 14:47:18 +0100 Subject: [PATCH 008/207] feat(nks): adding plugin for cutting video parts --- .../publish/extract_review_cutup_video.py | 107 ++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 pype/plugins/nukestudio/publish/extract_review_cutup_video.py diff --git a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py new file mode 100644 index 0000000000..445a26a184 --- /dev/null +++ b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py @@ -0,0 +1,107 @@ +import os +from pyblish import api +import pype + + +class ExtractReviewCutUpVideo(pype.api.Extractor): + """Cut up clips from long video file""" + + order = api.ExtractorOrder + # order = api.CollectorOrder + 0.1023 + label = "Extract Review CutUp Video" + hosts = ["nukestudio"] + families = ["review"] + + # presets + tags_addition = [] + + def process(self, instance): + inst_data = instance.data + asset = inst_data['asset'] + + # get representation and loop them + representations = inst_data["representations"] + + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + + # filter out mov and img sequences + representations_new = representations[:] + for repre in representations: + input_args = list() + output_args = list() + + tags = repre.get("tags", []) + + if "cut-up" not in tags: + continue + + self.log.debug("__ repre: {}".format(repre)) + + file = repre.get("files") + staging_dir = repre.get("stagingDir") + frame_start = repre.get("frameStart") + frame_end = repre.get("frameEnd") + fps = repre.get("fps") + ext = repre.get("ext") + + new_file_name = "{}_{}".format(asset, file) + + full_input_path = os.path.join( + staging_dir, file) + + full_output_path = os.path.join( + staging_dir, new_file_name) + + self.log.debug("__ full_input_path: {}".format(full_input_path)) + self.log.debug("__ full_output_path: {}".format(full_output_path)) + + input_args.append("-y") + input_args.append("-i {}".format(full_input_path)) + + start_sec = float(frame_start) / fps + input_args.append("-ss {:0.2f}".format(start_sec)) + + output_args.append("-c copy") + duration_sec = float(frame_end - frame_start + 1) / fps + output_args.append("-t {:0.2f}".format(duration_sec)) + + # output filename + output_args.append(full_output_path) + + mov_args = [ + ffmpeg_path, + " ".join(input_args), + " ".join(output_args) + ] + subprcs_cmd = " ".join(mov_args) + + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) + output = pype.api.subprocess(subprcs_cmd) + self.log.debug("Output: {}".format(output)) + + repre_new = { + "files": new_file_name, + "stagingDir": staging_dir, + "frameStart": frame_start, + "frameEnd": frame_end, + "frameStartFtrack": frame_start, + "frameEndFtrack": frame_end, + "step": 1, + "fps": fps, + "name": "cut_up_preview", + "tags": ["cut-up", "review", "delete"] + self.tags_addition, + "ext": ext, + "anatomy_template": "publish" + } + + representations_new.append(repre_new) + + for repre in representations_new: + if ("delete" in repre.get("tags", [])) and ( + "cut_up_preview" not in repre["name"]): + representations_new.remove(repre) + + self.log.debug( + "new representations: {}".format(representations_new)) + instance.data["representations"] = representations_new From 55b3b41fd944f4c41c400a79e60f948aa4652b9b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 20 Mar 2020 14:47:54 +0100 Subject: [PATCH 009/207] feat(nks): adjusting collect review for cutting --- .../nukestudio/publish/collect_reviews.py | 27 +++++++++---------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index ed9b7a3636..72e0c03009 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -78,8 +78,6 @@ class CollectReviews(api.InstancePlugin): file_dir = os.path.dirname(file_path) file = os.path.basename(file_path) ext = os.path.splitext(file)[-1][1:] - handleStart = rev_inst.data.get("handleStart") - handleEnd = rev_inst.data.get("handleEnd") # change label instance.data["label"] = "{0} - {1} - ({2}) - review".format( @@ -94,27 +92,26 @@ class CollectReviews(api.InstancePlugin): "stagingDir": file_dir, "frameStart": rev_inst.data.get("sourceIn"), "frameEnd": rev_inst.data.get("sourceOut"), - "frameStartFtrack": rev_inst.data.get("sourceIn") - handleStart, - "frameEndFtrack": rev_inst.data.get("sourceOut") + handleEnd, + "frameStartFtrack": rev_inst.data.get("sourceInH"), + "frameEndFtrack": rev_inst.data.get("sourceOutH"), "step": 1, "fps": rev_inst.data.get("fps"), - "preview": True, - "thumbnail": False, "name": "preview", + "tags": ["preview"], "ext": ext } - # if int(rev_inst.data.get("sourceIn")) > - mediaDuration = instance.data.get("mediaDuration") - clipDuration = instance.data.get("clipDuration") + media_duration = instance.data.get("mediaDuration") + clip_duration_h = instance.data.get("clipDurationH") - if mediaDuration > clipDuration: + if media_duration > clip_duration_h: self.log.debug("Media duration higher: {}".format( - (mediaDuration - clipDuration))) - # representation.update({ - # "frameStart": instance.data.get("sourceInH"), - # "frameEnd": instance.data.get("sourceOutH") - # }) + (media_duration - clip_duration_h))) + representation.update({ + "frameStart": instance.data.get("sourceInH"), + "frameEnd": instance.data.get("sourceOutH"), + "tags": ["cut-up", "delete"] + }) instance.data["representations"].append(representation) From 18f6acc794ef999067585380bd9583242d2bd6d1 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 20 Mar 2020 14:48:20 +0100 Subject: [PATCH 010/207] clean(nks): old way of representation data --- pype/plugins/nukestudio/publish/collect_plates.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index 4ed281f0ee..56d6db0166 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -147,7 +147,6 @@ class CollectPlatesData(api.InstancePlugin): "version": version }) - try: basename, ext = os.path.splitext(source_file) head, padding = os.path.splitext(basename) @@ -192,8 +191,7 @@ class CollectPlatesData(api.InstancePlugin): "frameEnd": instance.data["sourceOut"] - instance.data["sourceIn"] + 1, 'step': 1, 'fps': instance.context.data["fps"], - 'preview': True, - 'thumbnail': False, + 'tags': ["preview"], 'name': "preview", 'ext': "mov", } From ac9a20cf08dded766a453eb2115fd8e069a1bc28 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 20 Mar 2020 14:48:48 +0100 Subject: [PATCH 011/207] fix(nks): improving data --- .../publish/collect_frame_ranges.py | 22 ++++++++----------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_frame_ranges.py b/pype/plugins/nukestudio/publish/collect_frame_ranges.py index e0a53d7501..24b23fae01 100644 --- a/pype/plugins/nukestudio/publish/collect_frame_ranges.py +++ b/pype/plugins/nukestudio/publish/collect_frame_ranges.py @@ -36,20 +36,16 @@ class CollectClipFrameRanges(pyblish.api.InstancePlugin): frame_end = frame_start + (timeline_out - timeline_in) source = instance.data.get("source") - data.update( - { data.update({ - "sourceFirst": source_in_h, - "sourceInH": source_in_h, - "sourceOutH": source_out_h, - "frameStart": frame_start, - "frameEnd": frame_end, - "clipInH": timeline_in_h, - "clipOutH": timeline_out_h, - "mediaDurationH": instance.data.get( - "mediaDuration") + handle_start + handle_end, - "clipDurationH": instance.data.get( - "clipDuration") + handle_start + handle_end + "sourceFirst": source_in_h, + "sourceInH": source_in_h, + "sourceOutH": source_out_h, + "frameStart": frame_start, + "frameEnd": frame_end, + "clipInH": timeline_in_h, + "clipOutH": timeline_out_h, + "clipDurationH": instance.data.get( + "clipDuration") + handle_start + handle_end } ) self.log.debug("__ data: {}".format(data)) From 31a7c4405b835eff4f6d82fb3b4acd0d063be02e Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 20 Mar 2020 14:49:14 +0100 Subject: [PATCH 012/207] fix(nks): duration method was wrong --- pype/plugins/nukestudio/publish/collect_clips.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 6a1dad9a6d..78920fdbb9 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -97,8 +97,7 @@ class CollectClips(api.ContextPlugin): "effects": effects, "sourceIn": int(item.sourceIn()), "sourceOut": int(item.sourceOut()), - "mediaDuration": (int(item.sourceOut()) - - int(item.sourceIn())) + 1, + "mediaDuration": source.duration(), "clipIn": int(item.timelineIn()), "clipOut": int(item.timelineOut()), "clipDuration": (int(item.timelineOut()) - From b503870d3f93164495048c0b0b0a246bd0720788 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 20 Mar 2020 14:49:51 +0100 Subject: [PATCH 013/207] fix(global): original repre tags are included to preset tags --- pype/plugins/global/publish/extract_review.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 7c5f90b135..ea05360250 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -77,6 +77,12 @@ class ExtractReview(pyblish.api.InstancePlugin): repre_new = repre.copy() ext = profile.get("ext", None) p_tags = profile.get('tags', []) + + # append repre tags into profile tags + for t in tags: + if t not in p_tags: + p_tags.append(t) + self.log.info("p_tags: `{}`".format(p_tags)) # adding control for presets to be sequence From ee99289ff31d5339405b867d5cc7c9abf1bf373d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 20 Mar 2020 15:30:27 +0100 Subject: [PATCH 014/207] formatting changes --- pype/ftrack/actions/action_create_folders.py | 123 ++++++------------- 1 file changed, 37 insertions(+), 86 deletions(-) diff --git a/pype/ftrack/actions/action_create_folders.py b/pype/ftrack/actions/action_create_folders.py index 68cf837469..80618e67e8 100644 --- a/pype/ftrack/actions/action_create_folders.py +++ b/pype/ftrack/actions/action_create_folders.py @@ -12,9 +12,6 @@ from pypeapp import config, Anatomy class CreateFolders(BaseAction): - - '''Custom action.''' - #: Action identifier. identifier = 'create.folders' @@ -29,75 +26,69 @@ class CreateFolders(BaseAction): db = DbConnector() def discover(self, session, entities, event): - ''' Validation ''' if len(entities) != 1: return False - not_allowed = ['assetversion', 'project'] + not_allowed = ["assetversion", "project"] if entities[0].entity_type.lower() in not_allowed: return False return True def interface(self, session, entities, event): - if event['data'].get('values', {}): + if event["data"].get("values", {}): return entity = entities[0] without_interface = True - for child in entity['children']: - if child['object_type']['name'].lower() != 'task': + for child in entity["children"]: + if child["object_type"]["name"].lower() != "task": without_interface = False break self.without_interface = without_interface if without_interface: return - title = 'Create folders' + title = "Create folders" - entity_name = entity['name'] + entity_name = entity["name"] msg = ( - '

Do you want create folders also' - ' for all children of "{}"?

' + "

Do you want create folders also" + " for all children of \"{}\"?

" ) - if entity.entity_type.lower() == 'project': - entity_name = entity['full_name'] - msg = msg.replace(' also', '') - msg += '

(Project root won\'t be created if not checked)

' + if entity.entity_type.lower() == "project": + entity_name = entity["full_name"] + msg = msg.replace(" also", "") + msg += "

(Project root won't be created if not checked)

" items = [] item_msg = { - 'type': 'label', - 'value': msg.format(entity_name) + "type": "label", + "value": msg.format(entity_name) } item_label = { - 'type': 'label', - 'value': 'With all chilren entities' + "type": "label", + "value": "With all chilren entities" } item = { - 'name': 'children_included', - 'type': 'boolean', - 'value': False + "name": "children_included", + "type": "boolean", + "value": False } items.append(item_msg) items.append(item_label) items.append(item) - if len(items) == 0: - return { - 'success': False, - 'message': 'Didn\'t found any running jobs' - } - else: - return { - 'items': items, - 'title': title - } + return { + "items": items, + "title": title + } def launch(self, session, entities, event): '''Callback method for custom action.''' with_childrens = True if self.without_interface is False: - if 'values' not in event['data']: + if "values" not in event["data"]: return - with_childrens = event['data']['values']['children_included'] + with_childrens = event["data"]["values"]["children_included"] + entity = entities[0] if entity.entity_type.lower() == 'project': proj = entity @@ -105,6 +96,7 @@ class CreateFolders(BaseAction): proj = entity['project'] project_name = proj['full_name'] project_code = proj['name'] + if entity.entity_type.lower() == 'project' and with_childrens == False: return { 'success': True, @@ -136,21 +128,20 @@ class CreateFolders(BaseAction): template_publish = templates["avalon"]["publish"] collected_paths = [] - presets = config.get_presets()['tools']['sw_folders'] + presets = config.get_presets()["tools"]["sw_folders"] for entity in all_entities: - if entity.entity_type.lower() == 'project': + if entity.entity_type.lower() == "project": continue ent_data = data.copy() - asset_name = entity['name'] - ent_data['asset'] = asset_name + ent_data["asset"] = entity["name"] - parents = entity['link'] - hierarchy_names = [p['name'] for p in parents[1:-1]] - hierarchy = '' + parents = entity["link"][1:-1] + hierarchy_names = [p["name"] for p in parents] + hierarchy = "" if hierarchy_names: hierarchy = os.path.sep.join(hierarchy_names) - ent_data['hierarchy'] = hierarchy + ent_data["hierarchy"] = hierarchy tasks_created = False if entity['children']: @@ -222,8 +213,8 @@ class CreateFolders(BaseAction): os.makedirs(path) return { - 'success': True, - 'message': 'Created Folders Successfully!' + "success": True, + "message": "Successfully created project folders." } def get_notask_children(self, entity): @@ -325,45 +316,5 @@ class PartialDict(dict): def register(session, plugins_presets={}): - '''Register plugin. Called when used as an plugin.''' - + """Register plugin. Called when used as an plugin.""" CreateFolders(session, plugins_presets).register() - - -def main(arguments=None): - '''Set up logging and register action.''' - if arguments is None: - arguments = [] - - parser = argparse.ArgumentParser() - # Allow setting of logging level from arguments. - loggingLevels = {} - for level in ( - logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, - logging.ERROR, logging.CRITICAL - ): - loggingLevels[logging.getLevelName(level).lower()] = level - - parser.add_argument( - '-v', '--verbosity', - help='Set the logging output verbosity.', - choices=loggingLevels.keys(), - default='info' - ) - namespace = parser.parse_args(arguments) - - # Set up basic logging - logging.basicConfig(level=loggingLevels[namespace.verbosity]) - - session = ftrack_api.Session() - register(session) - - # Wait for events - logging.info( - 'Registered actions and listening for events. Use Ctrl-C to abort.' - ) - session.event_hub.wait() - - -if __name__ == '__main__': - raise SystemExit(main(sys.argv[1:])) From 639896f4a8aedfe75f74a99f2a382a337626da48 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 20 Mar 2020 16:37:16 +0100 Subject: [PATCH 015/207] app launcher uses roots --- pype/ftrack/lib/ftrack_app_handler.py | 275 +++++++++++--------------- 1 file changed, 118 insertions(+), 157 deletions(-) diff --git a/pype/ftrack/lib/ftrack_app_handler.py b/pype/ftrack/lib/ftrack_app_handler.py index eebffda280..aa57672f09 100644 --- a/pype/ftrack/lib/ftrack_app_handler.py +++ b/pype/ftrack/lib/ftrack_app_handler.py @@ -1,14 +1,14 @@ import os import sys +import copy import platform from avalon import lib as avalonlib import acre -from pype import api as pype from pype import lib as pypelib from pypeapp import config from .ftrack_base_handler import BaseHandler -from pypeapp import Anatomy +from pypeapp import Anatomy, Roots class AppAction(BaseHandler): @@ -157,209 +157,176 @@ class AppAction(BaseHandler): ''' entity = entities[0] - project_name = entity['project']['full_name'] + project_name = entity["project"]["full_name"] database = pypelib.get_avalon_database() - # Get current environments - env_list = [ - 'AVALON_PROJECT', - 'AVALON_SILO', - 'AVALON_ASSET', - 'AVALON_TASK', - 'AVALON_APP', - 'AVALON_APP_NAME' - ] - env_origin = {} - for env in env_list: - env_origin[env] = os.environ.get(env, None) - - # set environments for Avalon - os.environ["AVALON_PROJECT"] = project_name - os.environ["AVALON_SILO"] = entity['ancestors'][0]['name'] - os.environ["AVALON_ASSET"] = entity['parent']['name'] - os.environ["AVALON_TASK"] = entity['name'] - os.environ["AVALON_APP"] = self.identifier.split("_")[0] - os.environ["AVALON_APP_NAME"] = self.identifier - - anatomy = Anatomy() + asset_name = entity["parent"]["name"] + asset_document = database[project_name].find_one({ + "type": "asset", + "name": asset_name + }) hierarchy = "" - parents = database[project_name].find_one({ - "type": 'asset', - "name": entity['parent']['name'] - })['data']['parents'] - - if parents: - hierarchy = os.path.join(*parents) - - os.environ["AVALON_HIERARCHY"] = hierarchy - - application = avalonlib.get_application(os.environ["AVALON_APP_NAME"]) + asset_doc_parents = asset_document["data"].get("parents") + if len(asset_doc_parents) > 0: + hierarchy = os.path.join(*asset_doc_parents) + application = avalonlib.get_application(self.identifier) data = { - "root": os.environ.get("PYPE_STUDIO_PROJECTS_MOUNT"), + "root": Roots(project_name).roots, "project": { - "name": entity['project']['full_name'], - "code": entity['project']['name'] + "name": entity["project"]["full_name"], + "code": entity["project"]["name"] }, - "task": entity['name'], - "asset": entity['parent']['name'], + "task": entity["name"], + "asset": asset_name, "app": application["application_dir"], - "hierarchy": hierarchy, + "hierarchy": hierarchy } - av_project = database[project_name].find_one({"type": 'project'}) - templates = None - if av_project: - work_template = av_project.get('config', {}).get('template', {}).get( - 'work', None - ) - work_template = None try: - work_template = work_template.format(**data) - except Exception: - try: - anatomy = anatomy.format(data) - work_template = anatomy["work"]["folder"] + anatomy = Anatomy(project_name) + anatomy_filled = anatomy.format(data) + workdir = os.path.normpath(anatomy_filled["work"]["folder"]) - except Exception as exc: - msg = "{} Error in anatomy.format: {}".format( - __name__, str(exc) - ) - self.log.error(msg, exc_info=True) - return { - 'success': False, - 'message': msg - } + except Exception as exc: + msg = "Error in anatomy.format: {}".format( + str(exc) + ) + self.log.error(msg, exc_info=True) + return { + "success": False, + "message": msg + } - workdir = os.path.normpath(work_template) - os.environ["AVALON_WORKDIR"] = workdir try: os.makedirs(workdir) except FileExistsError: pass + # set environments for Avalon + prep_env = copy.deepcopy(os.environ) + prep_env.update({ + "AVALON_PROJECT": project_name, + "AVALON_ASSET": asset_name, + "AVALON_TASK": entity["name"], + "AVALON_APP": self.identifier.split("_")[0], + "AVALON_APP_NAME": self.identifier, + "AVALON_HIERARCHY": hierarchy, + "AVALON_WORKDIR": workdir + }) + # collect all parents from the task parents = [] for item in entity['link']: parents.append(session.get(item['type'], item['id'])) # collect all the 'environment' attributes from parents - tools_attr = [os.environ["AVALON_APP"], os.environ["AVALON_APP_NAME"]] - for parent in reversed(parents): - # check if the attribute is empty, if not use it - if parent['custom_attributes']['tools_env']: - tools_attr.extend(parent['custom_attributes']['tools_env']) - break + tools_attr = [prep_env["AVALON_APP"], prep_env["AVALON_APP_NAME"]] + tools_env = asset_document["data"].get("tools_env") or [] + tools_attr.extend(tools_env) tools_env = acre.get_tools(tools_attr) env = acre.compute(tools_env) - env = acre.merge(env, current_env=dict(os.environ)) - env = acre.append(dict(os.environ), env) - - - # - # tools_env = acre.get_tools(tools) - # env = acre.compute(dict(tools_env)) - # env = acre.merge(env, dict(os.environ)) - # os.environ = acre.append(dict(os.environ), env) - # os.environ = acre.compute(os.environ) + env = acre.merge(env, current_env=dict(prep_env)) + env = acre.append(dict(prep_env), env) # Get path to execute - st_temp_path = os.environ['PYPE_CONFIG'] + st_temp_path = os.environ["PYPE_CONFIG"] os_plat = platform.system().lower() # Path to folder with launchers - path = os.path.join(st_temp_path, 'launchers', os_plat) + path = os.path.join(st_temp_path, "launchers", os_plat) + # Full path to executable launcher execfile = None - if sys.platform == "win32": - for ext in os.environ["PATHEXT"].split(os.pathsep): fpath = os.path.join(path.strip('"'), self.executable + ext) if os.path.isfile(fpath) and os.access(fpath, os.X_OK): execfile = fpath break - pass # Run SW if was found executable - if execfile is not None: - popen = avalonlib.launch( - executable=execfile, args=[], environment=env - ) - else: + if execfile is None: return { - 'success': False, - 'message': "We didn't found launcher for {0}" - .format(self.label) - } - pass - - if sys.platform.startswith('linux'): - execfile = os.path.join(path.strip('"'), self.executable) - if os.path.isfile(execfile): - try: - fp = open(execfile) - except PermissionError as p: - self.log.exception('Access denied on {0} - {1}'.format( - execfile, p)) - return { - 'success': False, - 'message': "Access denied on launcher - {}".format( - execfile) - } - fp.close() - # check executable permission - if not os.access(execfile, os.X_OK): - self.log.error('No executable permission on {}'.format( - execfile)) - return { - 'success': False, - 'message': "No executable permission - {}".format( - execfile) - } - pass - else: - self.log.error('Launcher doesn\'t exist - {}'.format( - execfile)) - return { - 'success': False, - 'message': "Launcher doesn't exist - {}".format(execfile) + "success": False, + "message": "We didn't found launcher for {0}".format( + self.label + ) } - pass - # Run SW if was found executable - if execfile is not None: - avalonlib.launch( - '/usr/bin/env', args=['bash', execfile], environment=env - ) - else: + + popen = avalonlib.launch( + executable=execfile, args=[], environment=env + ) + + elif sys.platform.startswith("linux"): + execfile = os.path.join(path.strip('"'), self.executable) + if not os.path.isfile(execfile): + msg = "Launcher doesn't exist - {}".format(execfile) + + self.log.error(msg) return { - 'success': False, - 'message': "We didn't found launcher for {0}" - .format(self.label) - } - pass + "success": False, + "message": msg + } + + try: + fp = open(execfile) + except PermissionError as perm_exc: + msg = "Access denied on launcher {} - {}".format( + execfile, perm_exc + ) + + self.log.exception(msg, exc_info=True) + return { + "success": False, + "message": msg + } + + fp.close() + # check executable permission + if not os.access(execfile, os.X_OK): + msg = "No executable permission - {}".format(execfile) + + self.log.error(msg) + return { + "success": False, + "message": msg + } + + # Run SW if was found executable + if execfile is None: + return { + "success": False, + "message": "We didn't found launcher for {0}".format( + self.label + ) + } + + popen = avalonlib.launch( + "/usr/bin/env", args=["bash", execfile], environment=env + ) # Change status of task to In progress presets = config.get_presets()["ftrack"]["ftrack_config"] - if 'status_update' in presets: - statuses = presets['status_update'] + if "status_update" in presets: + statuses = presets["status_update"] - actual_status = entity['status']['name'].lower() + actual_status = entity["status"]["name"].lower() already_tested = [] ent_path = "/".join( - [ent["name"] for ent in entity['link']] + [ent["name"] for ent in entity["link"]] ) while True: next_status_name = None for key, value in statuses.items(): if key in already_tested: continue - if actual_status in value or '_any_' in value: - if key != '_ignore_': + if actual_status in value or "_any_" in value: + if key != "_ignore_": next_status_name = key already_tested.append(key) break @@ -369,12 +336,12 @@ class AppAction(BaseHandler): break try: - query = 'Status where name is "{}"'.format( + query = "Status where name is \"{}\"".format( next_status_name ) status = session.query(query).one() - entity['status'] = status + entity["status"] = status session.commit() self.log.debug("Changing status to \"{}\" <{}>".format( next_status_name, ent_path @@ -384,18 +351,12 @@ class AppAction(BaseHandler): except Exception: session.rollback() msg = ( - 'Status "{}" in presets wasn\'t found' - ' on Ftrack entity type "{}"' + "Status \"{}\" in presets wasn't found" + " on Ftrack entity type \"{}\"" ).format(next_status_name, entity.entity_type) self.log.warning(msg) - # Set origin avalon environments - for key, value in env_origin.items(): - if value == None: - value = "" - os.environ[key] = value - return { - 'success': True, - 'message': "Launching {0}".format(self.label) + "success": True, + "message": "Launching {0}".format(self.label) } From fe7a6776214db3becc119d6f28930d10e22ba5c7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 25 Mar 2020 16:27:18 +0100 Subject: [PATCH 016/207] template_data store same frame as was used in anatomy filling --- pype/plugins/global/publish/integrate_new.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 7a346f6888..b1ec8b0022 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -325,6 +325,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): test_dest_files.append( os.path.normpath(template_filled) ) + template_data["frame"] = repre_context["frame"] self.log.debug( "test_dest_files: {}".format(str(test_dest_files))) From d9686af193304b5bf2068fd889ef97b00ef5b9cc Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 25 Mar 2020 16:27:53 +0100 Subject: [PATCH 017/207] template_name is 100% per representation now --- pype/plugins/global/publish/integrate_new.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index b1ec8b0022..6c36b00fa8 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -88,6 +88,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "project", "asset", "task", "subset", "version", "representation", "family", "hierarchy", "task", "username" ] + default_template_name = "publish" def process(self, instance): @@ -260,7 +261,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Each should be a single representation (as such, a single extension) representations = [] destination_list = [] - template_name = 'publish' + if 'transfers' not in instance.data: instance.data['transfers'] = [] @@ -287,8 +288,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): files = repre['files'] if repre.get('stagingDir'): stagingdir = repre['stagingDir'] - if repre.get('anatomy_template'): - template_name = repre['anatomy_template'] + + template_name = ( + repre.get('anatomy_template') or self.default_template_name + ) if repre.get("outputName"): template_data["output"] = repre['outputName'] From b6c25f90c79574b4ce75b696e1f2a6c3916841a1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 25 Mar 2020 16:58:26 +0100 Subject: [PATCH 018/207] unc converting removed from integrators since PYPE_STUDIO_PROJECTS_MOUNT and PYPE_STUDIO_PROJECTS_PATH does not exist --- .../publish/integrate_master_version.py | 72 ------------------- pype/plugins/global/publish/integrate_new.py | 26 +------ 2 files changed, 2 insertions(+), 96 deletions(-) diff --git a/pype/plugins/global/publish/integrate_master_version.py b/pype/plugins/global/publish/integrate_master_version.py index 3c7838b708..8a74f5f86a 100644 --- a/pype/plugins/global/publish/integrate_master_version.py +++ b/pype/plugins/global/publish/integrate_master_version.py @@ -481,9 +481,6 @@ class IntegrateMasterVersion(pyblish.api.InstancePlugin): def copy_file(self, src_path, dst_path): # TODO check drives if are the same to check if cas hardlink - dst_path = self.path_root_check(dst_path) - src_path = self.path_root_check(src_path) - dirname = os.path.dirname(dst_path) try: @@ -513,75 +510,6 @@ class IntegrateMasterVersion(pyblish.api.InstancePlugin): shutil.copy(src_path, dst_path) - def path_root_check(self, path): - normalized_path = os.path.normpath(path) - forward_slash_path = normalized_path.replace("\\", "/") - - drive, _path = os.path.splitdrive(normalized_path) - if os.path.exists(drive + "/"): - key = "drive_check{}".format(drive) - if key not in self.path_checks: - self.log.debug( - "Drive \"{}\" exist. Nothing to change.".format(drive) - ) - self.path_checks.append(key) - - return normalized_path - - path_env_key = "PYPE_STUDIO_PROJECTS_PATH" - mount_env_key = "PYPE_STUDIO_PROJECTS_MOUNT" - missing_envs = [] - if path_env_key not in os.environ: - missing_envs.append(path_env_key) - - if mount_env_key not in os.environ: - missing_envs.append(mount_env_key) - - if missing_envs: - key = "missing_envs" - if key not in self.path_checks: - self.path_checks.append(key) - _add_s = "" - if len(missing_envs) > 1: - _add_s = "s" - - self.log.warning(( - "Can't replace MOUNT drive path to UNC path due to missing" - " environment variable{}: `{}`. This may cause issues" - " during publishing process." - ).format(_add_s, ", ".join(missing_envs))) - - return normalized_path - - unc_root = os.environ[path_env_key].replace("\\", "/") - mount_root = os.environ[mount_env_key].replace("\\", "/") - - # --- Remove slashes at the end of mount and unc roots --- - while unc_root.endswith("/"): - unc_root = unc_root[:-1] - - while mount_root.endswith("/"): - mount_root = mount_root[:-1] - # --- - - if forward_slash_path.startswith(unc_root): - self.log.debug(( - "Path already starts with UNC root: \"{}\"" - ).format(unc_root)) - return normalized_path - - if not forward_slash_path.startswith(mount_root): - self.log.warning(( - "Path do not start with MOUNT root \"{}\" " - "set in environment variable \"{}\"" - ).format(unc_root, mount_env_key)) - return normalized_path - - # Replace Mount root with Unc root - path = unc_root + forward_slash_path[len(mount_root):] - - return os.path.normpath(path) - def version_from_representations(self, repres): for repre in repres: version = io.find_one({"_id": repre["parent"]}) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 6c36b00fa8..23bb4f1b66 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -390,7 +390,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): dst_start_frame, dst_tail ).replace("..", ".") - repre['published_path'] = self.unc_convert(dst) + repre['published_path'] = dst else: # Single file @@ -418,7 +418,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): instance.data["transfers"].append([src, dst]) published_files.append(dst) - repre['published_path'] = self.unc_convert(dst) + repre['published_path'] = dst self.log.debug("__ dst: {}".format(dst)) repre["publishedFiles"] = published_files @@ -522,23 +522,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("Hardlinking file .. {} -> {}".format(src, dest)) self.hardlink_file(src, dest) - def unc_convert(self, path): - self.log.debug("> __ path: `{}`".format(path)) - drive, _path = os.path.splitdrive(path) - self.log.debug("> __ drive, _path: `{}`, `{}`".format(drive, _path)) - - if not os.path.exists(drive + "/"): - self.log.info("Converting to unc from environments ..") - - path_replace = os.getenv("PYPE_STUDIO_PROJECTS_PATH") - path_mount = os.getenv("PYPE_STUDIO_PROJECTS_MOUNT") - - if "/" in path_mount: - path = path.replace(path_mount[0:-1], path_replace) - else: - path = path.replace(path_mount, path_replace) - return path - def copy_file(self, src, dst): """ Copy given source to destination @@ -548,8 +531,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): Returns: None """ - src = self.unc_convert(src) - dst = self.unc_convert(dst) src = os.path.normpath(src) dst = os.path.normpath(dst) self.log.debug("Copying file .. {} -> {}".format(src, dst)) @@ -572,9 +553,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): def hardlink_file(self, src, dst): dirname = os.path.dirname(dst) - src = self.unc_convert(src) - dst = self.unc_convert(dst) - try: os.makedirs(dirname) except OSError as e: From 1f2d1a55dcf9c29003cf50bc9f1b9697448de146 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 27 Mar 2020 18:59:25 +0100 Subject: [PATCH 019/207] few minor changes --- pype/ftrack/lib/ftrack_app_handler.py | 7 +++---- pype/nuke/lib.py | 6 ++---- pype/plugins/global/publish/collect_anatomy.py | 2 -- pype/plugins/global/publish/submit_publish_job.py | 4 +--- 4 files changed, 6 insertions(+), 13 deletions(-) diff --git a/pype/ftrack/lib/ftrack_app_handler.py b/pype/ftrack/lib/ftrack_app_handler.py index aa57672f09..d36ed9c479 100644 --- a/pype/ftrack/lib/ftrack_app_handler.py +++ b/pype/ftrack/lib/ftrack_app_handler.py @@ -8,7 +8,7 @@ from pype import lib as pypelib from pypeapp import config from .ftrack_base_handler import BaseHandler -from pypeapp import Anatomy, Roots +from pypeapp import Anatomy class AppAction(BaseHandler): @@ -89,8 +89,8 @@ class AppAction(BaseHandler): ''' if ( - len(entities) != 1 or - entities[0].entity_type.lower() != 'task' + len(entities) != 1 + or entities[0].entity_type.lower() != 'task' ): return False @@ -174,7 +174,6 @@ class AppAction(BaseHandler): application = avalonlib.get_application(self.identifier) data = { - "root": Roots(project_name).roots, "project": { "name": entity["project"]["full_name"], "code": entity["project"]["name"] diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index ad2d576da3..0eb6eaf282 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -192,7 +192,6 @@ def format_anatomy(data): data["version"] = pype.get_version_from_path(file) project_document = pype.get_project() data.update({ - "root": api.Session["AVALON_PROJECTS"], "subset": data["avalon"]["subset"], "asset": data["avalon"]["asset"], "task": api.Session["AVALON_TASK"], @@ -1092,7 +1091,6 @@ class BuildWorkfile(WorkfileSettings): self.to_script = to_script # collect data for formating self.data_tmp = { - "root": root_path or api.Session["AVALON_PROJECTS"], "project": {"name": self._project["name"], "code": self._project["data"].get("code", '')}, "asset": self._asset or os.environ["AVALON_ASSET"], @@ -1109,8 +1107,8 @@ class BuildWorkfile(WorkfileSettings): anatomy_filled = anatomy.format(self.data_tmp) # get dir and file for workfile - self.work_dir = anatomy_filled["avalon"]["work"] - self.work_file = anatomy_filled["avalon"]["workfile"] + ".nk" + self.work_dir = anatomy_filled["work"]["folder"] + self.work_file = anatomy_filled["work"]["path"] + ".nk" def save_script_as(self, path=None): # first clear anything in open window diff --git a/pype/plugins/global/publish/collect_anatomy.py b/pype/plugins/global/publish/collect_anatomy.py index 73ae3bb024..7fd2056213 100644 --- a/pype/plugins/global/publish/collect_anatomy.py +++ b/pype/plugins/global/publish/collect_anatomy.py @@ -26,7 +26,6 @@ class CollectAnatomy(pyblish.api.ContextPlugin): label = "Collect Anatomy" def process(self, context): - root_path = api.registered_root() task_name = api.Session["AVALON_TASK"] project_entity = context.data["projectEntity"] @@ -45,7 +44,6 @@ class CollectAnatomy(pyblish.api.ContextPlugin): hierarchy = os.path.join(*hierarchy_items) context_data = { - "root": root_path, "project": { "name": project_name, "code": project_entity["data"].get("code") diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index ecf8555c57..8b43bec544 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -152,9 +152,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "FTRACK_API_KEY", "FTRACK_SERVER", "PYPE_ROOT", - "PYPE_METADATA_FILE", - "PYPE_STUDIO_PROJECTS_PATH", - "PYPE_STUDIO_PROJECTS_MOUNT", + "PYPE_METADATA_FILE" ] # pool used to do the publishing job From 9cc0b993381329c087126f22b2a7f10b5b0dff19 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 30 Mar 2020 12:17:28 +0200 Subject: [PATCH 020/207] submit to deadline sends project, metadatafile is with `{root}` key in value and deadline has `mount` path in `OutputDirectory0` --- .../global/publish/collect_rendered_files.py | 17 +++++++++- .../global/publish/submit_publish_job.py | 33 ++++++++++++++----- pype/scripts/publish_filesequence.py | 7 ---- 3 files changed, 41 insertions(+), 16 deletions(-) diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index 552fd49f6d..35d875dc02 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -4,7 +4,7 @@ import json import pyblish.api from avalon import api -from pypeapp import PypeLauncher +from pypeapp import PypeLauncher, Roots class CollectRenderedFiles(pyblish.api.ContextPlugin): @@ -82,8 +82,23 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): "Missing `PYPE_PUBLISH_DATA`") paths = os.environ["PYPE_PUBLISH_DATA"].split(os.pathsep) + project_name = os.environ.get("AVALON_PROJECT") + if project_name is None: + root = None + self.log.warning( + "Environment `AVLAON_PROJECT` was not found." + "Could not set `root` which may cause issues." + ) + else: + self.log.info("Getting root setting for project \"{}\"".format( + project_name + )) + root = {"root": Roots(project_name)} + session_set = False for path in paths: + if root: + path = path.format(**root) data = self._load_json(path) if not session_set: self.log.info("Setting session using data from file") diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 8b43bec544..82bd0c5a78 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -185,15 +185,26 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): batch=job["Props"]["Name"], subset=subset ) - metadata_filename = "{}_metadata.json".format(subset) output_dir = instance.data["outputDir"] - metadata_path = os.path.join(output_dir, metadata_filename) - - metadata_path = os.path.normpath(metadata_path) - mount_root = os.path.normpath(os.environ["PYPE_STUDIO_PROJECTS_MOUNT"]) - network_root = os.environ["PYPE_STUDIO_PROJECTS_PATH"] - metadata_path = metadata_path.replace(mount_root, network_root) - metadata_path = os.path.normpath(metadata_path) + # Convert output dir to `{root}/rest/of/path/...` with Anatomy + anatomy_obj = instance.context.data["anatomy"] + root_name = anatomy_obj.templates["work"].get("root_name") + success, rootless_path = ( + anatomy_obj.roots.find_root_template_from_path( + output_dir, root_name + ) + ) + if not success: + # `rootless_path` is not set to `output_dir` if none of roots match + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(output_dirt)) + rootless_path = output_dir + else: + # If root was found then use `mount` root for `output_dir` + anatomy_obj.roots._root_key = "mount" + output_dir = rootless_path.format(**{"root": anatomy_obj.roots}) # Generate the payload for Deadline submission payload = { @@ -221,8 +232,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # Transfer the environment from the original job to this dependent # job so they use the same environment + metadata_filename = "{}_metadata.json".format(subset) + metadata_path = os.path.join(rootless_path, metadata_filename) + "TODO metadata_path replace root with {root[root_name]} + environment = job["Props"].get("Env", {}) environment["PYPE_METADATA_FILE"] = metadata_path + environment["AVALON_PROJECT"] = pyblish.api.Session["AVALON_PROJECT"] + i = 0 for index, key in enumerate(environment): if key.upper() in self.enviro_filter: diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py index fe795564a5..8e14b62306 100644 --- a/pype/scripts/publish_filesequence.py +++ b/pype/scripts/publish_filesequence.py @@ -89,13 +89,6 @@ def __main__(): print("Paths: {}".format(kwargs.paths or [os.getcwd()])) paths = kwargs.paths or [os.environ.get("PYPE_METADATA_FILE")] or [os.getcwd()] # noqa - - for path in paths: - data = _load_json(path) - log.info("Setting session using data from file") - os.environ["AVALON_PROJECT"] = data["session"]["AVALON_PROJECT"] - break - args = [ os.path.join(pype_root, pype_command), "publish", From be8f1bd0f54d24aa850e0cd3e63076be948c04c3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 30 Mar 2020 13:05:47 +0200 Subject: [PATCH 021/207] integrate new can find right source --- pype/plugins/global/publish/integrate_new.py | 46 ++++++++++++------- .../global/publish/submit_publish_job.py | 2 +- 2 files changed, 31 insertions(+), 17 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 23bb4f1b66..39c47fa03b 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -639,24 +639,38 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("Registered root: {}".format(api.registered_root())) # create relative source path for DB - try: - source = instance.data['source'] - except KeyError: - source = context.data["currentFile"] - source = source.replace(os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), - api.registered_root()) - relative_path = os.path.relpath(source, api.registered_root()) - source = os.path.join("{root}", relative_path).replace("\\", "/") + if "source" in instance.data: + source = instance.data["source"] + else: + current_file = context.data["currentFile"] + anatomy = instance.context.data["anatomy"] + root_name = anatomy.templates["work"].get("root_name") + success, rootless_path = ( + anatomy.roots.find_root_template_from_path( + current_file, root_name, others_on_fail=True + ) + ) + if not success: + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(current_file)) + source = current_file + else: + source = rootless_path self.log.debug("Source: {}".format(source)) - version_data = {"families": families, - "time": context.data["time"], - "author": context.data["user"], - "source": source, - "comment": context.data.get("comment"), - "machine": context.data.get("machine"), - "fps": context.data.get( - "fps", instance.data.get("fps"))} + version_data = { + "families": families, + "time": context.data["time"], + "author": context.data["user"], + "source": source, + "comment": context.data.get("comment"), + "machine": context.data.get("machine"), + "fps": context.data.get( + "fps", instance.data.get("fps") + ) + } intent_value = instance.context.data.get("intent") if intent_value and isinstance(intent_value, dict): diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 82bd0c5a78..423ae01b79 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -203,7 +203,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): rootless_path = output_dir else: # If root was found then use `mount` root for `output_dir` - anatomy_obj.roots._root_key = "mount" + anatomy_obj.roots._root_type = "mount" output_dir = rootless_path.format(**{"root": anatomy_obj.roots}) # Generate the payload for Deadline submission From ef0239fc4f5e1252abf5d0a04842002a7b5f0b98 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 30 Mar 2020 14:09:01 +0200 Subject: [PATCH 022/207] user assignment permissions event changed --- pype/ftrack/events/event_user_assigment.py | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/pype/ftrack/events/event_user_assigment.py b/pype/ftrack/events/event_user_assigment.py index eaacfd959a..a899efbe50 100644 --- a/pype/ftrack/events/event_user_assigment.py +++ b/pype/ftrack/events/event_user_assigment.py @@ -158,20 +158,10 @@ class UserAssigmentEvent(BaseEvent): """ project_name = task['project']['full_name'] project_code = task['project']['name'] - try: - root = os.environ['PYPE_STUDIO_PROJECTS_PATH'] - except KeyError: - msg = 'Project ({}) root not set'.format(project_name) - self.log.error(msg) - return { - 'success': False, - 'message': msg - } # fill in template data asset = self._get_asset(task) t_data = { - 'root': root, 'project': { 'name': project_name, 'code': project_code @@ -204,11 +194,12 @@ class UserAssigmentEvent(BaseEvent): data = self._get_template_data(task) # format directories to pass to shell script anatomy = Anatomy(data["project"]["name"]) + anatomy_filled = anatomy.format(data) # formatting work dir is easiest part as we can use whole path - work_dir = anatomy.format(data)['avalon']['work'] + work_dir = anatomy_filled["work"]["folder"] # we also need publish but not whole - filled_all = anatomy.format_all(data) - publish = filled_all['avalon']['publish'] + anatomy_filled.strict = False + publish = anatomy_filled["publosh"]["folder"] # now find path to {asset} m = re.search("(^.+?{})".format(data['asset']), From 03c1285f2d4a7d1cbf05ba4a55ff387718977505 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 30 Mar 2020 14:14:08 +0200 Subject: [PATCH 023/207] source root should work --- pype/plugins/global/publish/integrate_new.py | 14 ++++---- .../global/publish/submit_publish_job.py | 32 +++++++++++++------ 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 39c47fa03b..c8fa5b4074 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -637,27 +637,25 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): families.append(instance_family) families += current_families - self.log.debug("Registered root: {}".format(api.registered_root())) # create relative source path for DB if "source" in instance.data: source = instance.data["source"] else: - current_file = context.data["currentFile"] + source = context.data["currentFile"] anatomy = instance.context.data["anatomy"] root_name = anatomy.templates["work"].get("root_name") success, rootless_path = ( anatomy.roots.find_root_template_from_path( - current_file, root_name, others_on_fail=True + source, root_name, others_on_fail=True ) ) - if not success: + if success: + source = rootless_path + else: self.log.warning(( "Could not find root path for remapping \"{}\"." " This may cause issues on farm." - ).format(current_file)) - source = current_file - else: - source = rootless_path + ).format(source)) self.log.debug("Source: {}".format(source)) version_data = { diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 423ae01b79..f121628e01 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -187,10 +187,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): output_dir = instance.data["outputDir"] # Convert output dir to `{root}/rest/of/path/...` with Anatomy - anatomy_obj = instance.context.data["anatomy"] - root_name = anatomy_obj.templates["work"].get("root_name") + anatomy = instance.context.data["anatomy"] + root_name = anatomy.templates["work"].get("root_name") success, rootless_path = ( - anatomy_obj.roots.find_root_template_from_path( + anatomy.roots.find_root_template_from_path( output_dir, root_name ) ) @@ -203,8 +203,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): rootless_path = output_dir else: # If root was found then use `mount` root for `output_dir` - anatomy_obj.roots._root_type = "mount" - output_dir = rootless_path.format(**{"root": anatomy_obj.roots}) + anatomy.roots._root_type = "mount" + output_dir = rootless_path.format(**{"root": anatomy.roots}) # Generate the payload for Deadline submission payload = { @@ -572,11 +572,25 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): except KeyError: source = context.data["currentFile"] - source = source.replace( - os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), api.registered_root() + anatomy = context.data["anatomy"] + root_name = anatomy.templates["work"].get("root_name") + success, rootless_path = ( + anatomy.roots.find_root_template_from_path( + source, root_name + ) ) - relative_path = os.path.relpath(source, api.registered_root()) - source = os.path.join("{root}", relative_path).replace("\\", "/") + if success: + orig_root_type = anatomy.roots._root_type + anatomy.roots._root_type = "mount" + source = rootless_path.format(**{"root": anatomy.roots}) + anatomy.roots._root_type = orig_root_type + + else: + # `rootless_path` is not set to `source` if none of roots match + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(output_dirt)) families = ["render"] From 50bd855fb1e64c04c1642104fe20281bd2e208b3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 30 Mar 2020 14:17:14 +0200 Subject: [PATCH 024/207] texture copy now works without registered_root --- pype/tools/texture_copy/app.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/pype/tools/texture_copy/app.py b/pype/tools/texture_copy/app.py index a59d30ec8b..624082f00c 100644 --- a/pype/tools/texture_copy/app.py +++ b/pype/tools/texture_copy/app.py @@ -46,25 +46,25 @@ class TextureCopy: return asset def _get_destination_path(self, asset, project): - root = api.registered_root() - PROJECT = api.Session["AVALON_PROJECT"] + project_name = api.Session["AVALON_PROJECT"] hierarchy = "" parents = asset['data']['parents'] if parents and len(parents) > 0: hierarchy = os.path.join(*parents) - template_data = {"root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - "silo": asset.get('silo'), - "asset": asset['name'], - "family": 'texture', - "subset": 'Main', - "hierarchy": hierarchy} - anatomy = Anatomy() - anatomy_filled = os.path.normpath( - anatomy.format(template_data)['texture']['path']) - return anatomy_filled + template_data = { + "project": { + "name": project_name, + "code": project['data']['code'] + }, + "silo": asset.get('silo'), + "asset": asset['name'], + "family": 'texture', + "subset": 'Main', + "hierarchy": hierarchy + } + anatomy_filled = Anatomy(project_name).format(template_data) + return os.path.normpath(anatomy_filled['texture']['path']) def _get_version(self, path): versions = [0] From 082a14937ad78e514e613a5c1d5fcbc541e28d14 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 30 Mar 2020 18:41:29 +0200 Subject: [PATCH 025/207] fixed typo --- pype/ftrack/events/event_user_assigment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/events/event_user_assigment.py b/pype/ftrack/events/event_user_assigment.py index a899efbe50..bf3bec93be 100644 --- a/pype/ftrack/events/event_user_assigment.py +++ b/pype/ftrack/events/event_user_assigment.py @@ -199,7 +199,7 @@ class UserAssigmentEvent(BaseEvent): work_dir = anatomy_filled["work"]["folder"] # we also need publish but not whole anatomy_filled.strict = False - publish = anatomy_filled["publosh"]["folder"] + publish = anatomy_filled["publish"]["folder"] # now find path to {asset} m = re.search("(^.+?{})".format(data['asset']), From b7d81da40d5cea2a52e43e4bffde8250bc823974 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 30 Mar 2020 18:44:46 +0200 Subject: [PATCH 026/207] fixed nuke lib workfile filling --- pype/nuke/lib.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 3f5e43332f..7419b18710 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -1095,13 +1095,14 @@ class BuildWorkfile(WorkfileSettings): # collect data for formating self.data_tmp = { "project": {"name": self._project["name"], - "code": self._project["data"].get("code", '')}, + "code": self._project["data"].get("code", "")}, "asset": self._asset or os.environ["AVALON_ASSET"], "task": kwargs.get("task") or api.Session["AVALON_TASK"], "hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(), "version": kwargs.get("version", {}).get("name", 1), "user": getpass.getuser(), - "comment": "firstBuild" + "comment": "firstBuild", + "ext": "nk" } # get presets from anatomy @@ -1111,7 +1112,7 @@ class BuildWorkfile(WorkfileSettings): # get dir and file for workfile self.work_dir = anatomy_filled["work"]["folder"] - self.work_file = anatomy_filled["work"]["path"] + ".nk" + self.work_file = anatomy_filled["work"]["file"] def save_script_as(self, path=None): # first clear anything in open window From 1983150f165aa478c555cd0f0187e3c6355602a6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 30 Mar 2020 18:48:27 +0200 Subject: [PATCH 027/207] added avalon project to deadline env filter --- pype/plugins/global/publish/submit_publish_job.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 6f1a85de50..f85903b186 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -152,7 +152,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "FTRACK_API_KEY", "FTRACK_SERVER", "PYPE_ROOT", - "PYPE_METADATA_FILE" + "PYPE_METADATA_FILE", + "AVALON_PROJECT" ] # pool used to do the publishing job From b601c20a5c17691abde707753863489e80597891 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 30 Mar 2020 19:39:00 +0200 Subject: [PATCH 028/207] fixed typo 2 --- pype/plugins/global/publish/submit_publish_job.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index f85903b186..1fc95b3e70 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -200,7 +200,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.log.warning(( "Could not find root path for remapping \"{}\"." " This may cause issues on farm." - ).format(output_dirt)) + ).format(output_dir)) rootless_path = output_dir else: # If root was found then use `mount` root for `output_dir` From c89ed35429f706e0c4075bb63a7532c7b2ffb9a7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 30 Mar 2020 20:54:32 +0200 Subject: [PATCH 029/207] fixed varianble --- pype/plugins/global/publish/submit_publish_job.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 1fc95b3e70..244c2e840d 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -588,8 +588,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # `rootless_path` is not set to `source` if none of roots match self.log.warning(( "Could not find root path for remapping \"{}\"." - " This may cause issues on farm." - ).format(output_dirt)) + " This may cause issues." + ).format(source)) families = ["render"] From 2133dfa49c7ae8dc1adac80e25ccaa4c4d46b241 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 6 Apr 2020 15:53:31 +0200 Subject: [PATCH 030/207] fixed typos --- pype/ftrack/lib/ftrack_app_handler.py | 2 +- pype/plugins/global/publish/collect_rendered_files.py | 2 +- pype/tools/texture_copy/app.py | 7 ++++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/pype/ftrack/lib/ftrack_app_handler.py b/pype/ftrack/lib/ftrack_app_handler.py index a3d3480a5e..2430f44ae7 100644 --- a/pype/ftrack/lib/ftrack_app_handler.py +++ b/pype/ftrack/lib/ftrack_app_handler.py @@ -264,7 +264,7 @@ class AppAction(BaseHandler): if execfile is None: return { "success": False, - "message": "We didn't found launcher for {0}".format( + "message": "We didn't find launcher for {0}".format( self.label ) } diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index e79be1c4ae..1c9a8c1f94 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -86,7 +86,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): if project_name is None: root = None self.log.warning( - "Environment `AVLAON_PROJECT` was not found." + "Environment `AVALON_PROJECT` was not found." "Could not set `root` which may cause issues." ) else: diff --git a/pype/tools/texture_copy/app.py b/pype/tools/texture_copy/app.py index 624082f00c..5f89db53ff 100644 --- a/pype/tools/texture_copy/app.py +++ b/pype/tools/texture_copy/app.py @@ -46,7 +46,7 @@ class TextureCopy: return asset def _get_destination_path(self, asset, project): - project_name = api.Session["AVALON_PROJECT"] + project_name = project["name"] hierarchy = "" parents = asset['data']['parents'] if parents and len(parents) > 0: @@ -63,8 +63,9 @@ class TextureCopy: "subset": 'Main', "hierarchy": hierarchy } - anatomy_filled = Anatomy(project_name).format(template_data) - return os.path.normpath(anatomy_filled['texture']['path']) + anatomy = Anatomy(project_name) + anatomy_filled = anatomy.format(template_data) + return anatomy_filled['texture']['path'] def _get_version(self, path): versions = [0] From 004f9280d86e0b7813c3b4d6263f03cf0becf3f2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 6 Apr 2020 18:42:05 +0200 Subject: [PATCH 031/207] collect rendered files use root formatting --- .../global/publish/collect_rendered_files.py | 36 ++++++++++++++----- 1 file changed, 27 insertions(+), 9 deletions(-) diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index 1c9a8c1f94..97edc45bb7 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -32,7 +32,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): ) return data - def _process_path(self, data): + def _process_path(self, data, root): # validate basic necessary data data_err = "invalid json file - missing data" required = ["asset", "user", "comment", @@ -66,14 +66,32 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): os.environ["FTRACK_SERVER"] = ftrack["FTRACK_SERVER"] # now we can just add instances from json file and we are done - for instance in data.get("instances"): + for instance_data in data.get("instances"): self.log.info(" - processing instance for {}".format( - instance.get("subset"))) - i = self._context.create_instance(instance.get("subset")) - self.log.info("remapping paths ...") - i.data["representations"] = [PypeLauncher().path_remapper( - data=r) for r in instance.get("representations")] - i.data.update(instance) + instance_data.get("subset"))) + instance = self._context.create_instance( + instance_data.get("subset") + ) + self.log.info("Filling stagignDir...") + instance.data.update(instance_data) + + representations = [] + for repre_data in instance_data.get("representations") or []: + staging_dir = repre_data.get("stagingDir") + if ( + not root + or staging_dir is None + or "{root" not in staging_dir + ): + repre_data = PypeLauncher().path_remapper(data=repre_data) + + else: + repre_data["stagingDir"] = staging_dir.format( + **{"root": root} + ) + representations.append(repre_data) + + instance.data["representations"] = representations def process(self, context): self._context = context @@ -106,4 +124,4 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): os.environ.update(data.get("session")) session_set = True assert data, "failed to load json file" - self._process_path(data) + self._process_path(data, root) From 6801dc2118eea9a40bb4bd496dff2b9685128206 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 6 Apr 2020 19:01:04 +0200 Subject: [PATCH 032/207] stagingdirs are sent rootless --- .../global/publish/submit_publish_job.py | 53 +++++++++++-------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 244c2e840d..3d991142c7 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -189,10 +189,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): output_dir = instance.data["outputDir"] # Convert output dir to `{root}/rest/of/path/...` with Anatomy anatomy = instance.context.data["anatomy"] - root_name = anatomy.templates["work"].get("root_name") + work_root_name = anatomy.templates["work"].get("root_name") success, rootless_path = ( anatomy.roots.find_root_template_from_path( - output_dir, root_name + output_dir, work_root_name ) ) if not success: @@ -426,12 +426,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ representations = [] - cols, rem = clique.assemble(exp_files) + collections, remainders = clique.assemble(exp_files) bake_render_path = instance.get("bakeRenderPath") # create representation for every collected sequence - for c in cols: - ext = c.tail.lstrip(".") + for collection in collections: + ext = collection.tail.lstrip(".") preview = False # if filtered aov name is found in filename, toggle it for # preview video rendering @@ -440,7 +440,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): for aov in self.aov_filter[app]: if re.match( r".+(?:\.|_)({})(?:\.|_).*".format(aov), - list(c)[0] + list(collection)[0] ): preview = True break @@ -452,11 +452,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): rep = { "name": ext, "ext": ext, - "files": [os.path.basename(f) for f in list(c)], + "files": [os.path.basename(f) for f in list(collection)], "frameStart": int(instance.get("frameStartHandle")), "frameEnd": int(instance.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames - "stagingDir": os.path.dirname(list(c)[0]), + "stagingDir": os.path.dirname(list(collection)[0]), "anatomy_template": "render", "fps": instance.get("fps"), "tags": ["review", "preview"] if preview else [], @@ -467,16 +467,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self._solve_families(instance, preview) # add reminders as representations - for r in rem: - ext = r.split(".")[-1] + for remainder in remainders: + ext = remainder.split(".")[-1] rep = { "name": ext, "ext": ext, - "files": os.path.basename(r), - "stagingDir": os.path.dirname(r), + "files": os.path.basename(remainder), + "stagingDir": os.path.dirname(remainder), "anatomy_template": "publish", } - if r in bake_render_path: + if remainder in bake_render_path: rep.update({ "fps": instance.get("fps"), "anatomy_template": "render", @@ -571,18 +571,15 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): except KeyError: source = context.data["currentFile"] - anatomy = context.data["anatomy"] - root_name = anatomy.templates["work"].get("root_name") + anatomy = instance.context.data["anatomy"] + work_root_name = anatomy.templates["work"].get("root_name") success, rootless_path = ( anatomy.roots.find_root_template_from_path( - source, root_name + source, work_root_name ) ) if success: - orig_root_type = anatomy.roots._root_type - anatomy.roots._root_type = "mount" - source = rootless_path.format(**{"root": anatomy.roots}) - anatomy.roots._root_type = orig_root_type + source = rootless_path else: # `rootless_path` is not set to `source` if none of roots match @@ -627,13 +624,23 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # look into instance data if representations are not having any # which are having tag `publish_on_farm` and include them - for r in instance.data.get("representations", []): - if "publish_on_farm" in r.get("tags"): + for repre in instance.data.get("representations", []): + staging_dir = repre.get("stagingDir") + if staging_dir: + success, rootless_staging_dir = ( + anatomy.roots.find_root_template_from_path( + repre, work_root_name + ) + ) + if success: + repre["stagingDir"] = rootless_staging_dir + + if "publish_on_farm" in repre.get("tags"): # create representations attribute of not there if "representations" not in instance_skeleton_data.keys(): instance_skeleton_data["representations"] = [] - instance_skeleton_data["representations"].append(r) + instance_skeleton_data["representations"].append(repre) instances = None assert data.get("expectedFiles"), ("Submission from old Pype version" From 182502c5e4d24f9cac272e380c537cb3fa7b3740 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 9 Apr 2020 12:44:24 +0200 Subject: [PATCH 033/207] replaced "PYPE_ROOT" with "PYPE_SETUP_PATH" --- docs/source/conf.py | 2 +- make_docs.bat | 8 ++++---- pype/clockify/widget_settings.py | 2 +- pype/ftrack/tray/login_dialog.py | 2 +- pype/lib.py | 4 ++-- pype/muster/widget_login.py | 2 +- pype/plugins/global/publish/submit_publish_job.py | 2 +- pype/plugins/maya/publish/submit_maya_muster.py | 2 +- pype/scripts/publish_deadline.py | 4 ++-- pype/scripts/publish_filesequence.py | 6 +++--- 10 files changed, 17 insertions(+), 17 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index 894425e56b..d022332a56 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -22,7 +22,7 @@ from pypeapp.pypeLauncher import PypeLauncher from pypeapp.storage import Storage from pypeapp.deployment import Deployment -pype_setup = os.getenv('PYPE_ROOT') +pype_setup = os.getenv('PYPE_SETUP_PATH') d = Deployment(pype_setup) launcher = PypeLauncher() diff --git a/make_docs.bat b/make_docs.bat index f0011086e5..d2ea75562f 100644 --- a/make_docs.bat +++ b/make_docs.bat @@ -25,15 +25,15 @@ set PYTHONPATH=%%d;!PYTHONPATH! echo ^>^>^> Setting PYPE_CONFIG call :ResolvePath pypeconfig "..\pype-config" set PYPE_CONFIG=%pypeconfig% -echo ^>^>^> Setting PYPE_ROOT +echo ^>^>^> Setting PYPE_SETUP_PATH call :ResolvePath pyperoot "..\..\" -set PYPE_ROOT=%pyperoot% -set PYTHONPATH=%PYPE_ROOT%;%PYTHONPATH% +set PYPE_SETUP_PATH=%pyperoot% +set PYTHONPATH=%PYPE_SETUP_PATH%;%PYTHONPATH% echo ^>^>^> Setting PYPE_ENV set PYPE_ENV="C:\Users\Public\pype_env2" call "docs\make.bat" clean -sphinx-apidoc -M -f -d 6 --ext-autodoc --ext-intersphinx --ext-viewcode -o docs\source pype %PYPE_ROOT%\repos\pype\pype\vendor\* +sphinx-apidoc -M -f -d 6 --ext-autodoc --ext-intersphinx --ext-viewcode -o docs\source pype %PYPE_SETUP_PATH%\repos\pype\pype\vendor\* call "docs\make.bat" html echo ^>^>^> Doing cleanup ... set PYTHONPATH=%_OLD_PYTHONPATH% diff --git a/pype/clockify/widget_settings.py b/pype/clockify/widget_settings.py index 7142548fa6..027268834c 100644 --- a/pype/clockify/widget_settings.py +++ b/pype/clockify/widget_settings.py @@ -26,7 +26,7 @@ class ClockifySettings(QtWidgets.QWidget): elif hasattr(parent, 'parent') and hasattr(parent.parent, 'icon'): self.setWindowIcon(self.parent.parent.icon) else: - pype_setup = os.getenv('PYPE_ROOT') + pype_setup = os.getenv('PYPE_SETUP_PATH') items = [pype_setup, "app", "resources", "icon.png"] fname = os.path.sep.join(items) icon = QtGui.QIcon(fname) diff --git a/pype/ftrack/tray/login_dialog.py b/pype/ftrack/tray/login_dialog.py index 5f3777f93e..88c4e90374 100644 --- a/pype/ftrack/tray/login_dialog.py +++ b/pype/ftrack/tray/login_dialog.py @@ -29,7 +29,7 @@ class Login_Dialog_ui(QtWidgets.QWidget): elif hasattr(parent, 'parent') and hasattr(parent.parent, 'icon'): self.setWindowIcon(self.parent.parent.icon) else: - pype_setup = os.getenv('PYPE_ROOT') + pype_setup = os.getenv('PYPE_SETUP_PATH') items = [pype_setup, "app", "resources", "icon.png"] fname = os.path.sep.join(items) icon = QtGui.QIcon(fname) diff --git a/pype/lib.py b/pype/lib.py index 824d2e0f52..ab99e6a49c 100644 --- a/pype/lib.py +++ b/pype/lib.py @@ -660,7 +660,7 @@ def execute_hook(hook, *args, **kwargs): This will load hook file, instantiate class and call `execute` method on it. Hook must be in a form: - `$PYPE_ROOT/repos/pype/path/to/hook.py/HookClass` + `$PYPE_SETUP_PATH/repos/pype/path/to/hook.py/HookClass` This will load `hook.py`, instantiate HookClass and then execute_hook `execute(*args, **kwargs)` @@ -671,7 +671,7 @@ def execute_hook(hook, *args, **kwargs): class_name = hook.split("/")[-1] - abspath = os.path.join(os.getenv('PYPE_ROOT'), + abspath = os.path.join(os.getenv('PYPE_SETUP_PATH'), 'repos', 'pype', *hook.split("/")[:-1]) mod_name, mod_ext = os.path.splitext(os.path.basename(abspath)) diff --git a/pype/muster/widget_login.py b/pype/muster/widget_login.py index 1d0dd29d59..88d769ef93 100644 --- a/pype/muster/widget_login.py +++ b/pype/muster/widget_login.py @@ -23,7 +23,7 @@ class MusterLogin(QtWidgets.QWidget): elif hasattr(parent, 'parent') and hasattr(parent.parent, 'icon'): self.setWindowIcon(parent.parent.icon) else: - pype_setup = os.getenv('PYPE_ROOT') + pype_setup = os.getenv('PYPE_SETUP_PATH') items = [pype_setup, "app", "resources", "icon.png"] fname = os.path.sep.join(items) icon = QtGui.QIcon(fname) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 3d991142c7..f8b2c80fa3 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -151,7 +151,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "FTRACK_API_USER", "FTRACK_API_KEY", "FTRACK_SERVER", - "PYPE_ROOT", + "PYPE_SETUP_PATH", "PYPE_METADATA_FILE", "AVALON_PROJECT" ] diff --git a/pype/plugins/maya/publish/submit_maya_muster.py b/pype/plugins/maya/publish/submit_maya_muster.py index af968ed773..fdd246d012 100644 --- a/pype/plugins/maya/publish/submit_maya_muster.py +++ b/pype/plugins/maya/publish/submit_maya_muster.py @@ -312,7 +312,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): # replace path for UNC / network share paths, co PYPE is found # over network. It assumes PYPE is located somewhere in # PYPE_CORE_PATH - pype_root = os.environ["PYPE_ROOT"].replace( + pype_root = os.environ["PYPE_SETUP_PATH"].replace( os.path.normpath(os.environ['PYPE_CORE_MOUNT']), os.path.normpath(os.environ['PYPE_CORE_PATH']) ) diff --git a/pype/scripts/publish_deadline.py b/pype/scripts/publish_deadline.py index e6052dbfd2..16d097a1ea 100644 --- a/pype/scripts/publish_deadline.py +++ b/pype/scripts/publish_deadline.py @@ -14,9 +14,9 @@ def __main__(): "configuration.") kwargs, args = parser.parse_known_args() - pype_root = os.environ.get("PYPE_ROOT") + pype_root = os.environ.get("PYPE_SETUP_PATH") if not pype_root: - raise Exception("PYPE_ROOT is not set") + raise Exception("PYPE_SETUP_PATH is not set") # TODO: set correct path pype_command = "pype.ps1" diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py index 0a08e9ed4c..8b99d0560f 100644 --- a/pype/scripts/publish_filesequence.py +++ b/pype/scripts/publish_filesequence.py @@ -47,10 +47,10 @@ def __main__(): auto_pype_root = os.path.dirname(os.path.abspath(__file__)) auto_pype_root = os.path.abspath(auto_pype_root + "../../../../..") - auto_pype_root = os.environ.get('PYPE_ROOT') or auto_pype_root - if os.environ.get('PYPE_ROOT'): + auto_pype_root = os.environ.get('PYPE_SETUP_PATH') or auto_pype_root + if os.environ.get('PYPE_SETUP_PATH'): print("Got Pype location from environment: {}".format( - os.environ.get('PYPE_ROOT'))) + os.environ.get('PYPE_SETUP_PATH'))) pype_command = "pype.ps1" if platform.system().lower() == "linux": From 2e431b32e1f894a398a3865b4bb2ea4e8ecd06b4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 9 Apr 2020 14:27:02 +0200 Subject: [PATCH 034/207] collect anatomy was split into 2 plugins to be able to collect anatomy obj before collect rendered files --- ...omy.py => collect_anatomy_context_data.py} | 45 +++++++++++-------- .../global/publish/collect_anatomy_object.py | 34 ++++++++++++++ 2 files changed, 61 insertions(+), 18 deletions(-) rename pype/plugins/global/publish/{collect_anatomy.py => collect_anatomy_context_data.py} (63%) create mode 100644 pype/plugins/global/publish/collect_anatomy_object.py diff --git a/pype/plugins/global/publish/collect_anatomy.py b/pype/plugins/global/publish/collect_anatomy_context_data.py similarity index 63% rename from pype/plugins/global/publish/collect_anatomy.py rename to pype/plugins/global/publish/collect_anatomy_context_data.py index 7fd2056213..e1e6c12ee9 100644 --- a/pype/plugins/global/publish/collect_anatomy.py +++ b/pype/plugins/global/publish/collect_anatomy_context_data.py @@ -1,13 +1,14 @@ -"""Collect Anatomy and global anatomy data. +"""Collect global context Anatomy data. Requires: + context -> anatomy + context -> projectEntity + context -> assetEntity + context -> username + context -> datetimeData session -> AVALON_TASK - projectEntity, assetEntity -> collect_avalon_entities *(pyblish.api.CollectorOrder) - username -> collect_pype_user *(pyblish.api.CollectorOrder + 0.001) - datetimeData -> collect_datetime_data *(pyblish.api.CollectorOrder) Provides: - context -> anatomy (pypeapp.Anatomy) context -> anatomyData """ @@ -15,15 +16,31 @@ import os import json from avalon import api, lib -from pypeapp import Anatomy import pyblish.api -class CollectAnatomy(pyblish.api.ContextPlugin): - """Collect Anatomy into Context""" +class CollectAnatomyContextData(pyblish.api.ContextPlugin): + """Collect Anatomy Context data. + + Example: + context.data["anatomyData"] = { + "project": { + "name": "MyProject", + "code": "myproj" + }, + "asset": "AssetName", + "hierarchy": "path/to/asset", + "task": "Working", + "username": "MeDespicable", + + *** OPTIONAL *** + "app": "maya" # Current application base name + + mutliple keys from `datetimeData` # see it's collector + } + """ order = pyblish.api.CollectorOrder + 0.002 - label = "Collect Anatomy" + label = "Collect Anatomy Context Data" def process(self, context): task_name = api.Session["AVALON_TASK"] @@ -31,13 +48,6 @@ class CollectAnatomy(pyblish.api.ContextPlugin): project_entity = context.data["projectEntity"] asset_entity = context.data["assetEntity"] - project_name = project_entity["name"] - - context.data["anatomy"] = Anatomy(project_name) - self.log.info( - "Anatomy object collected for project \"{}\".".format(project_name) - ) - hierarchy_items = asset_entity["data"]["parents"] hierarchy = "" if hierarchy_items: @@ -45,13 +55,12 @@ class CollectAnatomy(pyblish.api.ContextPlugin): context_data = { "project": { - "name": project_name, + "name": project_entity["name"], "code": project_entity["data"].get("code") }, "asset": asset_entity["name"], "hierarchy": hierarchy.replace("\\", "/"), "task": task_name, - "username": context.data["user"] } diff --git a/pype/plugins/global/publish/collect_anatomy_object.py b/pype/plugins/global/publish/collect_anatomy_object.py new file mode 100644 index 0000000000..d9e6964050 --- /dev/null +++ b/pype/plugins/global/publish/collect_anatomy_object.py @@ -0,0 +1,34 @@ +"""Collect Anatomy object. + +Requires: + os.environ -> AVALON_PROJECT + +Provides: + context -> anatomy (pypeapp.Anatomy) +""" + +from avalon import io +from pypeapp import Anatomy +import pyblish.api + + +class CollectAnatomyObject(pyblish.api.ContextPlugin): + """Collect Anatomy object into Context""" + + order = pyblish.api.CollectorOrder - 0.11 + label = "Collect Anatomy Object" + + def process(self, context): + io.install() + project_name = io.Session.get("AVALON_PROJECT") + if project_name is None: + raise AssertionError( + "Environment `AVALON_PROJECT` is not set." + "Could not initialize project's Anatomy." + ) + + context.data["anatomy"] = Anatomy(project_name) + + self.log.info( + "Anatomy object collected for project \"{}\".".format(project_name) + ) From b8efca6c3f6d4e8224ced2f5ee0277221a96fbcb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 9 Apr 2020 14:27:34 +0200 Subject: [PATCH 035/207] "collect_instance_anatomy_data" renamed to "collect_anatomy_instance_data" --- ...nce_anatomy_data.py => collect_anatomy_instance_data.py} | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename pype/plugins/global/publish/{collect_instance_anatomy_data.py => collect_anatomy_instance_data.py} (96%) diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_anatomy_instance_data.py similarity index 96% rename from pype/plugins/global/publish/collect_instance_anatomy_data.py rename to pype/plugins/global/publish/collect_anatomy_instance_data.py index 06a25b7c8a..6528bede2e 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_anatomy_instance_data.py @@ -28,11 +28,11 @@ from avalon import io import pyblish.api -class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): - """Fill templates with data needed for publish""" +class CollectAnatomyInstanceData(pyblish.api.InstancePlugin): + """Collect Instance specific Anatomy data.""" order = pyblish.api.CollectorOrder + 0.49 - label = "Collect instance anatomy data" + label = "Collect Anatomy Instance data" def process(self, instance): # get all the stuff from the database From fb9f951d924688abd8d518c3766316c9a9be5c94 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 9 Apr 2020 14:28:00 +0200 Subject: [PATCH 036/207] collect rendered files use anatomy to fill metadata json path and staging dirs --- pype/__init__.py | 3 +- .../global/publish/collect_rendered_files.py | 59 ++++++++++++------- 2 files changed, 39 insertions(+), 23 deletions(-) diff --git a/pype/__init__.py b/pype/__init__.py index 34d2d90649..1dcd6f9fd6 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -87,8 +87,7 @@ def install(): if project_name: root_obj = Roots(project_name) - root = root_obj.roots - avalon.register_root(root) + avalon.register_root(root_obj.roots) # apply monkey patched discover to original one avalon.discover = patched_discover diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index 97edc45bb7..ebb1dac127 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -1,3 +1,12 @@ +"""Loads publishing context from json and continues in publish process. + +Requires: + anatomy -> context["anatomy"] *(pyblish.api.CollectorOrder - 0.11) + +Provides: + context, instances -> All data from previous publishing process. +""" + import os import json @@ -32,7 +41,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): ) return data - def _process_path(self, data, root): + def _process_path(self, data, anatomy): # validate basic necessary data data_err = "invalid json file - missing data" required = ["asset", "user", "comment", @@ -78,17 +87,26 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): representations = [] for repre_data in instance_data.get("representations") or []: staging_dir = repre_data.get("stagingDir") - if ( - not root - or staging_dir is None - or "{root" not in staging_dir - ): - repre_data = PypeLauncher().path_remapper(data=repre_data) + if not staging_dir: + pass + + elif "{root" in staging_dir: + repre_data["stagingDir"] = staging_dir.format( + **{"root": anatomy.roots} + ) + self.log.debug(( + "stagingDir was filled with root." + " To: \"{}\" From: \"{}\"" + ).format(repre_data["stagingDir"], staging_dir)) else: - repre_data["stagingDir"] = staging_dir.format( - **{"root": root} - ) + remapped = anatomy.roots_obj.path_remapper(staging_dir) + if remapped: + repre_data["stagingDir"] = remapped + self.log.debug(( + "stagingDir was remapped. To: \"{}\" From: \"{}\"" + ).format(remapped, staging_dir)) + representations.append(repre_data) instance.data["representations"] = representations @@ -102,21 +120,20 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): project_name = os.environ.get("AVALON_PROJECT") if project_name is None: - root = None - self.log.warning( + raise AssertionError( "Environment `AVALON_PROJECT` was not found." - "Could not set `root` which may cause issues." + "Could not set project `root` which may cause issues." ) - else: - self.log.info("Getting root setting for project \"{}\"".format( - project_name - )) - root = {"root": Roots(project_name)} + # TODO root filling should happen after collect Anatomy + self.log.info("Getting root setting for project \"{}\"".format( + project_name + )) + + anatomy = context.data["anatomy"] session_set = False for path in paths: - if root: - path = path.format(**root) + path = path.format(**{"root": anatomy.roots}) data = self._load_json(path) if not session_set: self.log.info("Setting session using data from file") @@ -124,4 +141,4 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): os.environ.update(data.get("session")) session_set = True assert data, "failed to load json file" - self._process_path(data, root) + self._process_path(data, anatomy) From 0370389292e03e8f1e7f8644f7f9fc0e6b775020 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 9 Apr 2020 14:46:20 +0200 Subject: [PATCH 037/207] removed unused imports --- pype/plugins/global/publish/collect_rendered_files.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index ebb1dac127..5197f71a46 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -13,8 +13,6 @@ import json import pyblish.api from avalon import api -from pypeapp import PypeLauncher, Roots - class CollectRenderedFiles(pyblish.api.ContextPlugin): """ From 56338f3d70abada98a85451f4f83ed9a3ec93b38 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 9 Apr 2020 18:10:38 +0200 Subject: [PATCH 038/207] do not use PYPE_CORE_* environments --- .../global/publish/submit_publish_job.py | 8 +--- .../maya/publish/submit_maya_muster.py | 38 ++++++------------- .../nuke/publish/submit_nuke_deadline.py | 31 ++++++--------- 3 files changed, 25 insertions(+), 52 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index f8b2c80fa3..8525657b21 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -20,13 +20,7 @@ def _get_script(): if module_path.endswith(".pyc"): module_path = module_path[: -len(".pyc")] + ".py" - module_path = os.path.normpath(module_path) - mount_root = os.path.normpath(os.environ["PYPE_CORE_MOUNT"]) - network_root = os.path.normpath(os.environ["PYPE_CORE_PATH"]) - - module_path = module_path.replace(mount_root, network_root) - - return module_path + return os.path.normpath(module_path) # Logic to retrieve latest files concerning extendFrames diff --git a/pype/plugins/maya/publish/submit_maya_muster.py b/pype/plugins/maya/publish/submit_maya_muster.py index fdd246d012..c6660fe601 100644 --- a/pype/plugins/maya/publish/submit_maya_muster.py +++ b/pype/plugins/maya/publish/submit_maya_muster.py @@ -309,13 +309,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): output_dir = instance.data["outputDir"] metadata_path = os.path.join(output_dir, metadata_filename) - # replace path for UNC / network share paths, co PYPE is found - # over network. It assumes PYPE is located somewhere in - # PYPE_CORE_PATH - pype_root = os.environ["PYPE_SETUP_PATH"].replace( - os.path.normpath(os.environ['PYPE_CORE_MOUNT']), - os.path.normpath(os.environ['PYPE_CORE_PATH']) - ) + pype_root = os.environ["PYPE_SETUP_PATH"] # we must provide either full path to executable or use musters own # python named MPython.exe, residing directly in muster bin @@ -516,33 +510,25 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin): environment["PATH"] = os.environ["PATH"] # self.log.debug("enviro: {}".format(environment['PYPE_SCRIPTS'])) clean_environment = {} - for key in environment: + for key, value in environment.items(): clean_path = "" self.log.debug("key: {}".format(key)) - to_process = environment[key] - if key == "PYPE_CORE_MOUNT": - clean_path = environment[key] - elif "://" in environment[key]: - clean_path = environment[key] - elif os.pathsep not in to_process: - try: - path = environment[key] - path.decode('UTF-8', 'strict') - clean_path = os.path.normpath(path) - except UnicodeDecodeError: - print('path contains non UTF characters') + if "://" in value: + clean_path = value else: - for path in environment[key].split(os.pathsep): + valid_paths = [] + for path in value.split(os.pathsep): + if not path: + continue try: path.decode('UTF-8', 'strict') - clean_path += os.path.normpath(path) + os.pathsep + valid_paths.append(os.path.normpath(path)) except UnicodeDecodeError: print('path contains non UTF characters') - # this should replace paths so they are pointing to network share - clean_path = clean_path.replace( - os.path.normpath(environment['PYPE_CORE_MOUNT']), - os.path.normpath(environment['PYPE_CORE_PATH'])) + if valid_paths: + clean_path = os.pathsep.join(valid_paths) + clean_environment[key] = clean_path return clean_environment diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index 9ee988b5ae..81952dcd9c 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -194,36 +194,29 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): environment["PATH"] = os.environ["PATH"] # self.log.debug("enviro: {}".format(environment['PYPE_SCRIPTS'])) clean_environment = {} - for key in environment: + for key, value in environment.items(): clean_path = "" self.log.debug("key: {}".format(key)) - to_process = environment[key] - if key == "PYPE_CORE_MOUNT": - clean_path = environment[key] - elif "://" in environment[key]: - clean_path = environment[key] - elif os.pathsep not in to_process: - try: - path = environment[key] - path.decode('UTF-8', 'strict') - clean_path = os.path.normpath(path) - except UnicodeDecodeError: - print('path contains non UTF characters') + if "://" in value: + clean_path = value else: - for path in environment[key].split(os.pathsep): + valid_paths = [] + for path in value.split(os.pathsep): + if not path: + continue try: path.decode('UTF-8', 'strict') - clean_path += os.path.normpath(path) + os.pathsep + valid_paths.append(os.path.normpath(path)) except UnicodeDecodeError: print('path contains non UTF characters') + if valid_paths: + clean_path = os.pathsep.join(valid_paths) + if key == "PYTHONPATH": clean_path = clean_path.replace('python2', 'python3') - clean_path = clean_path.replace( - os.path.normpath(environment['PYPE_CORE_MOUNT']), - os.path.normpath(environment['PYPE_CORE_PATH']) - ) + self.log.debug("clean path: {}".format(clean_path)) clean_environment[key] = clean_path environment = clean_environment From 6d81ef6cd72047fce8dc408d928dabab6783cbde Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 9 Apr 2020 18:27:37 +0200 Subject: [PATCH 039/207] storage is not used in pype --- docs/source/conf.py | 4 --- pype/logging/gui/widgets.py | 65 +------------------------------------ 2 files changed, 1 insertion(+), 68 deletions(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index d022332a56..517c441ccd 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -15,11 +15,8 @@ # import os # import sys # sys.path.insert(0, os.path.abspath('.')) -import sys import os -from pprint import pprint from pypeapp.pypeLauncher import PypeLauncher -from pypeapp.storage import Storage from pypeapp.deployment import Deployment pype_setup = os.getenv('PYPE_SETUP_PATH') @@ -32,7 +29,6 @@ os.environ['PYPE_CONFIG'] = config_path os.environ['TOOL_ENV'] = os.path.normpath(os.path.join(config_path, 'environments')) launcher._add_modules() -Storage().update_environment() launcher._load_default_environments(tools=tools) # -- Project information ----------------------------------------------------- diff --git a/pype/logging/gui/widgets.py b/pype/logging/gui/widgets.py index 10aad3c282..1daaa28326 100644 --- a/pype/logging/gui/widgets.py +++ b/pype/logging/gui/widgets.py @@ -397,7 +397,7 @@ class LogDetailWidget(QtWidgets.QWidget): layout = QtWidgets.QVBoxLayout(self) label = QtWidgets.QLabel("Detail") - detail_widget = LogDetailTextEdit() + detail_widget = QtWidgets.QTextEdit() detail_widget.setReadOnly(True) layout.addWidget(label) layout.addWidget(detail_widget) @@ -420,66 +420,3 @@ class LogDetailWidget(QtWidgets.QWidget): self.detail_widget.setHtml(self.html_text.format(**data)) - - -class LogDetailTextEdit(QtWidgets.QTextEdit): - """QTextEdit that displays version specific information. - - This also overrides the context menu to add actions like copying - source path to clipboard or copying the raw data of the version - to clipboard. - - """ - def __init__(self, parent=None): - super(LogDetailTextEdit, self).__init__(parent=parent) - - # self.data = { - # "source": None, - # "raw": None - # } - # - # def contextMenuEvent(self, event): - # """Context menu with additional actions""" - # menu = self.createStandardContextMenu() - # - # # Add additional actions when any text so we can assume - # # the version is set. - # if self.toPlainText().strip(): - # - # menu.addSeparator() - # action = QtWidgets.QAction("Copy source path to clipboard", - # menu) - # action.triggered.connect(self.on_copy_source) - # menu.addAction(action) - # - # action = QtWidgets.QAction("Copy raw data to clipboard", - # menu) - # action.triggered.connect(self.on_copy_raw) - # menu.addAction(action) - # - # menu.exec_(event.globalPos()) - # del menu - # - # def on_copy_source(self): - # """Copy formatted source path to clipboard""" - # source = self.data.get("source", None) - # if not source: - # return - # - # # path = source.format(root=api.registered_root()) - # # clipboard = QtWidgets.QApplication.clipboard() - # # clipboard.setText(path) - # - # def on_copy_raw(self): - # """Copy raw version data to clipboard - # - # The data is string formatted with `pprint.pformat`. - # - # """ - # raw = self.data.get("raw", None) - # if not raw: - # return - # - # raw_text = pprint.pformat(raw) - # clipboard = QtWidgets.QApplication.clipboard() - # clipboard.setText(raw_text) From 5b92826e1f5f9453b0e42ecb4350fea65703dafb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 9 Apr 2020 19:16:05 +0200 Subject: [PATCH 040/207] launched applications have set root environments --- pype/ftrack/lib/ftrack_app_handler.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/ftrack/lib/ftrack_app_handler.py b/pype/ftrack/lib/ftrack_app_handler.py index 2430f44ae7..407a0764a4 100644 --- a/pype/ftrack/lib/ftrack_app_handler.py +++ b/pype/ftrack/lib/ftrack_app_handler.py @@ -215,6 +215,7 @@ class AppAction(BaseHandler): "AVALON_HIERARCHY": hierarchy, "AVALON_WORKDIR": workdir }) + prep_env.update(anatomy.roots_obj.root_environments()) # collect all parents from the task parents = [] From 7b4163271be8e8535336a166ac53c9fd5f051923 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 10 Apr 2020 18:42:10 +0200 Subject: [PATCH 041/207] prepare project created roots --- pype/ftrack/actions/action_prepare_project.py | 363 +++++++++++------- 1 file changed, 226 insertions(+), 137 deletions(-) diff --git a/pype/ftrack/actions/action_prepare_project.py b/pype/ftrack/actions/action_prepare_project.py index 4cc6cfd8df..9b21febb81 100644 --- a/pype/ftrack/actions/action_prepare_project.py +++ b/pype/ftrack/actions/action_prepare_project.py @@ -1,9 +1,8 @@ import os import json -from ruamel import yaml from pype.ftrack import BaseAction -from pypeapp import config +from pypeapp import config, Anatomy, project_overrides_dir_path from pype.ftrack.lib.avalon_sync import get_avalon_attr @@ -24,6 +23,7 @@ class PrepareProject(BaseAction): # Key to store info about trigerring create folder structure create_project_structure_key = "create_folder_structure" + item_splitter = {'type': 'label', 'value': '---'} def discover(self, session, entities, event): ''' Validation ''' @@ -41,15 +41,190 @@ class PrepareProject(BaseAction): # Inform user that this may take a while self.show_message(event, "Preparing data... Please wait", True) + self.log.debug("Preparing data which will be shown") self.log.debug("Loading custom attributes") - cust_attrs, hier_cust_attrs = get_avalon_attr(session, True) - project_defaults = config.get_presets( - entities[0]["full_name"] - ).get("ftrack", {}).get("project_defaults", {}) - self.log.debug("Preparing data which will be shown") + project_name = entities[0]["full_name"] + + project_defaults = ( + config.get_presets(project_name) + .get("ftrack", {}) + .get("project_defaults", {}) + ) + + anatomy = Anatomy(project_name) + if not anatomy.roots: + return { + "success": False, + "message": ( + "Have issues with loading Roots for project \"{}\"." + ).format(anatomy.project_name) + } + + root_items = self.prepare_root_items(anatomy) + + ca_items, multiselect_enumerators = ( + self.prepare_custom_attribute_items(project_defaults) + ) + + self.log.debug("Heavy items are ready. Preparing last items group.") + + title = "Prepare Project" + items = [] + + # Add root items + items.extend(root_items) + items.append(self.item_splitter) + + # Ask if want to trigger Action Create Folder Structure + items.append({ + "type": "label", + "value": "

Want to create basic Folder Structure?

" + }) + items.append({ + "name": self.create_project_structure_key, + "type": "boolean", + "value": False, + "label": "Check if Yes" + }) + + items.append(self.item_splitter) + items.append({ + "type": "label", + "value": "

Set basic Attributes:

" + }) + + items.extend(ca_items) + + # This item will be last (before enumerators) + # - sets value of auto synchronization + auto_sync_name = "avalon_auto_sync" + auto_sync_item = { + "name": auto_sync_name, + "type": "boolean", + "value": project_defaults.get(auto_sync_name, False), + "label": "AutoSync to Avalon" + } + # Add autosync attribute + items.append(auto_sync_item) + + # Add enumerator items at the end + for item in multiselect_enumerators: + items.append(item) + + return { + "items": items, + "title": title + } + + def prepare_root_items(self, anatomy): + root_items = [] + self.log.debug("Root items preparation begins.") + + root_names = anatomy.root_names() + roots = anatomy.roots + + root_items.append({ + "type": "label", + "value": "

Check your Project root settings

" + }) + root_items.append({ + "type": "label", + "value": ( + "

NOTE: Roots are crutial for path filling" + " (and creating folder structure).

" + ) + }) + root_items.append({ + "type": "label", + "value": ( + "

WARNING: Do not change roots on running project," + " that will cause workflow issues.

" + ) + }) + + default_roots = anatomy.roots + while isinstance(default_roots, dict): + key = tuple(default_roots.keys())[0] + default_roots = default_roots[key] + + empty_text = "Enter root path here..." + + # Root names is None when anatomy templates contain "{root}" + all_platforms = ["windows", "linux", "darwin"] + if root_names is None: + root_items.append(self.item_splitter) + # find first possible key + for platform in all_platforms: + value = default_roots.raw_data.get(platform) or "" + root_items.append({ + "label": platform, + "name": "__root__{}".format(platform), + "type": "text", + "value": value, + "empty_text": empty_text + }) + return root_items + + root_name_data = {} + missing_roots = [] + for root_name in root_names: + root_name_data[root_name] = {} + if not isinstance(roots, dict): + missing_roots.append(root_name) + continue + + root_item = roots.get(root_name) + if not root_item: + missing_roots.append(root_name) + continue + + for platform in all_platforms: + root_name_data[root_name][platform] = ( + root_item.raw_data.get(platform) or "" + ) + + if missing_roots: + default_values = {} + for platform in all_platforms: + default_values[platform] = ( + default_roots.raw_data.get(platform) or "" + ) + + for root_name in missing_roots: + root_name_data[root_name] = default_values + + root_names = list(root_name_data.keys()) + root_items.append({ + "type": "hidden", + "name": "__rootnames__", + "value": json.dumps(root_names) + }) + + for root_name, values in root_name_data.items(): + root_items.append(self.item_splitter) + root_items.append({ + "type": "label", + "value": "Root: \"{}\"".format(root_name) + }) + for platform, value in values.items(): + root_items.append({ + "label": platform, + "name": "__root__{}{}".format(root_name, platform), + "type": "text", + "value": value, + "empty_text": empty_text + }) + + self.log.debug("Root items preparation ended.") + return root_items + + def _attributes_to_set(self, project_defaults): attributes_to_set = {} + + cust_attrs, hier_cust_attrs = get_avalon_attr(self.session, True) + for attr in hier_cust_attrs: key = attr["key"] if key.startswith("avalon_"): @@ -77,45 +252,17 @@ class PrepareProject(BaseAction): attributes_to_set.items(), key=lambda x: x[1]["label"] )) + return attributes_to_set + + def prepare_custom_attribute_items(self, project_defaults): + items = [] + multiselect_enumerators = [] + attributes_to_set = self._attributes_to_set(project_defaults) + self.log.debug("Preparing interface for keys: \"{}\"".format( str([key for key in attributes_to_set]) )) - item_splitter = {'type': 'label', 'value': '---'} - title = "Prepare Project" - items = [] - - # Ask if want to trigger Action Create Folder Structure - items.append({ - "type": "label", - "value": "

Want to create basic Folder Structure?

" - }) - - items.append({ - "name": self.create_project_structure_key, - "type": "boolean", - "value": False, - "label": "Check if Yes" - }) - - items.append(item_splitter) - items.append({ - "type": "label", - "value": "

Set basic Attributes:

" - }) - - multiselect_enumerators = [] - - # This item will be last (before enumerators) - # - sets value of auto synchronization - auto_sync_name = "avalon_auto_sync" - auto_sync_item = { - "name": auto_sync_name, - "type": "boolean", - "value": project_defaults.get(auto_sync_name, False), - "label": "AutoSync to Avalon" - } - for key, in_data in attributes_to_set.items(): attr = in_data["object"] @@ -139,8 +286,7 @@ class PrepareProject(BaseAction): attr_config_data = json.loads(attr_config["data"]) if attr_config["multiSelect"] is True: - multiselect_enumerators.append(item_splitter) - + multiselect_enumerators.append(self.item_splitter) multiselect_enumerators.append({ "type": "label", "value": in_data["label"] @@ -160,10 +306,7 @@ class PrepareProject(BaseAction): "label": "- {}".format(option["menu"]) } if default: - if ( - isinstance(default, list) or - isinstance(default, tuple) - ): + if isinstance(default, (list, tuple)): if name in default: item["value"] = True else: @@ -204,17 +347,7 @@ class PrepareProject(BaseAction): items.append(item) - # Add autosync attribute - items.append(auto_sync_item) - - # Add enumerator items at the end - for item in multiselect_enumerators: - items.append(item) - - return { - 'items': items, - 'title': title - } + return items, multiselect_enumerators def launch(self, session, entities, event): if not event['data'].get('values', {}): @@ -222,6 +355,35 @@ class PrepareProject(BaseAction): in_data = event['data']['values'] + root_values = {} + root_key = "__root__" + for key, value in tuple(in_data.items()): + if key.startswith(root_key): + _key = key[len(root_key):] + root_values[_key] = in_data.pop(key) + + root_names = in_data.pop("__rootnames__", None) + root_data = {} + if root_names: + for root_name in json.loads(root_names): + root_data[root_name] = {} + for key, value in tuple(root_values.items()): + if key.startswith(root_name): + _key = key[len(root_name):] + root_data[root_name][_key] = value + + else: + for key, value in root_values.items(): + root_data[key] = value + + project_name = entities[0]["full_name"] + anatomy = Anatomy(project_name) + anatomy.templates_obj.save_project_overrides(project_name) + anatomy.roots_obj.save_project_overrides( + project_name, root_data, override=True + ) + anatomy.reset() + # pop out info about creating project structure create_proj_struct = in_data.pop(self.create_project_structure_key) @@ -269,94 +431,22 @@ class PrepareProject(BaseAction): def create_project_specific_config(self, project_name, json_data): self.log.debug("*** Creating project specifig configs ***") - - path_proj_configs = os.environ.get('PYPE_PROJECT_CONFIGS', "") - - # Skip if PYPE_PROJECT_CONFIGS is not set - # TODO show user OS message - if not path_proj_configs: - self.log.warning(( - "Environment variable \"PYPE_PROJECT_CONFIGS\" is not set." - " Project specific config can't be set." - )) - return - - path_proj_configs = os.path.normpath(path_proj_configs) - # Skip if path does not exist - # TODO create if not exist?!!! - if not os.path.exists(path_proj_configs): - self.log.warning(( - "Path set in Environment variable \"PYPE_PROJECT_CONFIGS\"" - " Does not exist." - )) - return - - project_specific_path = os.path.normpath( - os.path.join(path_proj_configs, project_name) - ) + project_specific_path = project_overrides_dir_path(project_name) if not os.path.exists(project_specific_path): os.makedirs(project_specific_path) self.log.debug(( "Project specific config folder for project \"{}\" created." ).format(project_name)) - # Anatomy #################################### - self.log.debug("--- Processing Anatomy Begins: ---") - - anatomy_dir = os.path.normpath(os.path.join( - project_specific_path, "anatomy" - )) - anatomy_path = os.path.normpath(os.path.join( - anatomy_dir, "default.yaml" - )) - - anatomy = None - if os.path.exists(anatomy_path): - self.log.debug( - "Anatomy file already exist. Trying to read: \"{}\"".format( - anatomy_path - ) - ) - # Try to load data - with open(anatomy_path, 'r') as file_stream: - try: - anatomy = yaml.load(file_stream, Loader=yaml.loader.Loader) - self.log.debug("Reading Anatomy file was successful") - except yaml.YAMLError as exc: - self.log.warning( - "Reading Yaml file failed: \"{}\"".format(anatomy_path), - exc_info=True - ) - - if not anatomy: - self.log.debug("Anatomy is not set. Duplicating default.") - # Create Anatomy folder - if not os.path.exists(anatomy_dir): - self.log.debug( - "Creating Anatomy folder: \"{}\"".format(anatomy_dir) - ) - os.makedirs(anatomy_dir) - - source_items = [ - os.environ["PYPE_CONFIG"], "anatomy", "default.yaml" - ] - - source_path = os.path.normpath(os.path.join(*source_items)) - with open(source_path, 'r') as file_stream: - source_data = file_stream.read() - - with open(anatomy_path, 'w') as file_stream: - file_stream.write(source_data) - # Presets #################################### self.log.debug("--- Processing Presets Begins: ---") - project_defaults_dir = os.path.normpath(os.path.join(*[ + project_defaults_dir = os.path.normpath(os.path.join( project_specific_path, "presets", "ftrack" - ])) - project_defaults_path = os.path.normpath(os.path.join(*[ + )) + project_defaults_path = os.path.normpath(os.path.join( project_defaults_dir, "project_defaults.json" - ])) + )) # Create folder if not exist if not os.path.exists(project_defaults_dir): self.log.debug("Creating Ftrack Presets folder: \"{}\"".format( @@ -372,5 +462,4 @@ class PrepareProject(BaseAction): def register(session, plugins_presets={}): '''Register plugin. Called when used as an plugin.''' - PrepareProject(session, plugins_presets).register() From 37135599150878be9091b85980570440ac4bd338 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 10 Apr 2020 18:52:00 +0200 Subject: [PATCH 042/207] small upgrade --- pype/ftrack/actions/action_prepare_project.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/actions/action_prepare_project.py b/pype/ftrack/actions/action_prepare_project.py index 9b21febb81..2693a5750b 100644 --- a/pype/ftrack/actions/action_prepare_project.py +++ b/pype/ftrack/actions/action_prepare_project.py @@ -357,7 +357,7 @@ class PrepareProject(BaseAction): root_values = {} root_key = "__root__" - for key, value in tuple(in_data.items()): + for key in tuple(in_data.keys()): if key.startswith(root_key): _key = key[len(root_key):] root_values[_key] = in_data.pop(key) From 4a3b98c94516c9a66480f1e6ee0988dfd33a942d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 14 Apr 2020 11:08:47 +0200 Subject: [PATCH 043/207] action_create_fodler uses new anatomy --- pype/ftrack/actions/action_create_folders.py | 298 +++++++------------ 1 file changed, 109 insertions(+), 189 deletions(-) diff --git a/pype/ftrack/actions/action_create_folders.py b/pype/ftrack/actions/action_create_folders.py index 80618e67e8..8f3358cf9c 100644 --- a/pype/ftrack/actions/action_create_folders.py +++ b/pype/ftrack/actions/action_create_folders.py @@ -1,30 +1,16 @@ import os -import sys -import logging -import argparse -import re - -import ftrack_api from pype.ftrack import BaseAction from avalon import lib as avalonlib -from pype.ftrack.lib.io_nonsingleton import DbConnector from pypeapp import config, Anatomy class CreateFolders(BaseAction): - #: Action identifier. - identifier = 'create.folders' - - #: Action label. - label = 'Create Folders' - - #: Action Icon. - icon = '{}/ftrack/action_icons/CreateFolders.svg'.format( - os.environ.get('PYPE_STATICS_SERVER', '') + identifier = "create.folders" + label = "Create Folders" + icon = "{}/ftrack/action_icons/CreateFolders.svg".format( + os.environ.get("PYPE_STATICS_SERVER", "") ) - db = DbConnector() - def discover(self, session, entities, event): if len(entities) != 1: return False @@ -90,49 +76,52 @@ class CreateFolders(BaseAction): with_childrens = event["data"]["values"]["children_included"] entity = entities[0] - if entity.entity_type.lower() == 'project': + if entity.entity_type.lower() == "project": proj = entity else: - proj = entity['project'] - project_name = proj['full_name'] - project_code = proj['name'] + proj = entity["project"] + project_name = proj["full_name"] + project_code = proj["name"] - if entity.entity_type.lower() == 'project' and with_childrens == False: + if entity.entity_type.lower() == 'project' and with_childrens is False: return { 'success': True, 'message': 'Nothing was created' } - data = { - "root": os.environ["AVALON_PROJECTS"], - "project": { - "name": project_name, - "code": project_code - } - } + all_entities = [] all_entities.append(entity) if with_childrens: all_entities = self.get_notask_children(entity) - av_project = None - try: - self.db.install() - self.db.Session['AVALON_PROJECT'] = project_name - av_project = self.db.find_one({'type': 'project'}) - template_work = av_project['config']['template']['work'] - template_publish = av_project['config']['template']['publish'] - self.db.uninstall() - except Exception: - templates = Anatomy().templates - template_work = templates["avalon"]["work"] - template_publish = templates["avalon"]["publish"] + anatomy = Anatomy(project_name) + + work_keys = ["work", "folder"] + work_template = anatomy.templates + for key in work_keys: + work_template = work_template[key] + work_has_apps = "{app" in work_template + + publish_keys = ["publish", "folder"] + publish_template = anatomy.templates + for key in publish_keys: + publish_template = publish_template[key] + publish_has_apps = "{app" in publish_template + + presets = config.get_presets() + app_presets = presets.get("tools", {}).get("sw_folders") + cached_apps = {} collected_paths = [] - presets = config.get_presets()["tools"]["sw_folders"] for entity in all_entities: if entity.entity_type.lower() == "project": continue - ent_data = data.copy() + ent_data = { + "project": { + "name": project_name, + "code": project_code + } + } ent_data["asset"] = entity["name"] @@ -144,69 +133,72 @@ class CreateFolders(BaseAction): ent_data["hierarchy"] = hierarchy tasks_created = False - if entity['children']: - for child in entity['children']: - if child['object_type']['name'].lower() != 'task': - continue - tasks_created = True - task_type_name = child['type']['name'].lower() - task_data = ent_data.copy() - task_data['task'] = child['name'] - possible_apps = presets.get(task_type_name, []) - template_work_created = False - template_publish_created = False - apps = [] + for child in entity["children"]: + if child["object_type"]["name"].lower() != "task": + continue + tasks_created = True + task_type_name = child["type"]["name"].lower() + task_data = ent_data.copy() + task_data["task"] = child["name"] + + apps = [] + if app_presets and (work_has_apps or publish_has_apps): + possible_apps = app_presets.get(task_type_name, []) for app in possible_apps: - try: - app_data = avalonlib.get_application(app) - app_dir = app_data['application_dir'] - except ValueError: - app_dir = app + if app in cached_apps: + app_dir = cached_apps[app] + else: + try: + app_data = avalonlib.get_application(app) + app_dir = app_data["application_dir"] + except ValueError: + app_dir = app + cached_apps[app] = app_dir apps.append(app_dir) - # Template wok - if '{app}' in template_work: - for app in apps: - template_work_created = True - app_data = task_data.copy() - app_data['app'] = app - collected_paths.append( - self.compute_template( - template_work, app_data - ) - ) - if template_work_created is False: - collected_paths.append( - self.compute_template(template_work, task_data) - ) - # Template publish - if '{app}' in template_publish: - for app in apps: - template_publish_created = True - app_data = task_data.copy() - app_data['app'] = app - collected_paths.append( - self.compute_template( - template_publish, app_data, True - ) - ) - if template_publish_created is False: - collected_paths.append( - self.compute_template( - template_publish, task_data, True - ) - ) + # Template wok + if work_has_apps: + app_data = task_data.copy() + for app in apps: + app_data["app"] = app + collected_paths.append(self.compute_template( + anatomy, app_data, work_keys + )) + else: + collected_paths.append(self.compute_template( + anatomy, task_data, work_keys + )) + + # Template publish + if publish_has_apps: + app_data = task_data.copy() + for app in apps: + app_data["app"] = app + collected_paths.append(self.compute_template( + anatomy, app_data, publish_keys + )) + else: + collected_paths.append(self.compute_template( + anatomy, task_data, publish_keys + )) if not tasks_created: # create path for entity - collected_paths.append( - self.compute_template(template_work, ent_data) - ) - collected_paths.append( - self.compute_template(template_publish, ent_data) - ) - if len(collected_paths) > 0: - self.log.info('Creating folders:') + collected_paths.append(self.compute_template( + anatomy, ent_data, work_keys + )) + collected_paths.append(self.compute_template( + anatomy, ent_data, publish_keys + )) + + if len(collected_paths) == 0: + return { + "success": True, + "message": "No project folders to create." + } + + self.log.info("Creating folders:") + for path in set(collected_paths): self.log.info(path) if not os.path.exists(path): @@ -219,100 +211,28 @@ class CreateFolders(BaseAction): def get_notask_children(self, entity): output = [] - if entity.get('object_type', {}).get( - 'name', entity.entity_type - ).lower() == 'task': + if entity.entity_type.lower() == "task": return output - else: - output.append(entity) - if entity['children']: - for child in entity['children']: - output.extend(self.get_notask_children(child)) + + output.append(entity) + for child in entity["children"]: + output.extend(self.get_notask_children(child)) return output - def template_format(self, template, data): + def compute_template(self, anatomy, data, anatomy_keys): + filled_template = anatomy.format_all(data) + for key in anatomy_keys: + filled_template = filled_template[key] - partial_data = PartialDict(data) + if filled_template.solved: + return os.path.normpath(filled_template) - # remove subdict items from string (like 'project[name]') - subdict = PartialDict() - count = 1 - store_pattern = 5*'_'+'{:0>3}' - regex_patern = "\{\w*\[[^\}]*\]\}" - matches = re.findall(regex_patern, template) - - for match in matches: - key = store_pattern.format(count) - subdict[key] = match - template = template.replace(match, '{'+key+'}') - count += 1 - # solve fillind keys with optional keys - solved = self._solve_with_optional(template, partial_data) - # try to solve subdict and replace them back to string - for k, v in subdict.items(): - try: - v = v.format_map(data) - except (KeyError, TypeError): - pass - subdict[k] = v - - return solved.format_map(subdict) - - def _solve_with_optional(self, template, data): - # Remove optional missing keys - pattern = re.compile(r"(<.*?[^{0]*>)[^0-9]*?") - invalid_optionals = [] - for group in pattern.findall(template): - try: - group.format(**data) - except KeyError: - invalid_optionals.append(group) - for group in invalid_optionals: - template = template.replace(group, "") - - solved = template.format_map(data) - - # solving after format optional in second round - for catch in re.compile(r"(<.*?[^{0]*>)[^0-9]*?").findall(solved): - if "{" in catch: - # remove all optional - solved = solved.replace(catch, "") - else: - # Remove optional symbols - solved = solved.replace(catch, catch[1:-1]) - - return solved - - def compute_template(self, str, data, task=False): - first_result = self.template_format(str, data) - if first_result == first_result.split('{')[0]: - return os.path.normpath(first_result) - if task: - return os.path.normpath(first_result.split('{')[0]) - - index = first_result.index('{') - - regex = '\{\w*[^\}]*\}' - match = re.findall(regex, first_result[index:])[0] - without_missing = str.split(match)[0].split('}') - output_items = [] - for part in without_missing: - if '{' in part: - output_items.append(part + '}') - return os.path.normpath( - self.template_format(''.join(output_items), data) + self.log.warning( + "Template \"{}\" was not fully filled \"{}\"".format( + filled_template.template, filled_template + ) ) - - -class PartialDict(dict): - def __getitem__(self, item): - out = super().__getitem__(item) - if isinstance(out, dict): - return '{'+item+'}' - return out - - def __missing__(self, key): - return '{'+key+'}' + return os.path.normpath(filled_template.split("{")[0]) def register(session, plugins_presets={}): From 0b2c1fc99f3c10e90d7b489786991a27f41af959 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 14 Apr 2020 11:19:12 +0200 Subject: [PATCH 044/207] added get_project_from_entity to ftrack base event handler --- pype/ftrack/lib/ftrack_base_handler.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/pype/ftrack/lib/ftrack_base_handler.py b/pype/ftrack/lib/ftrack_base_handler.py index f11cb020e9..952ac2e74f 100644 --- a/pype/ftrack/lib/ftrack_base_handler.py +++ b/pype/ftrack/lib/ftrack_base_handler.py @@ -623,3 +623,19 @@ class BaseHandler(object): self.log.debug(( "Publishing event: {}" ).format(str(event.__dict__))) + + def get_project_from_entity(self, entity): + low_entity_type = entity.entity_type.lower() + if low_entity_type == "project": + return entity + + if low_entity_type == "reviewsession": + return entity["project"] + + if low_entity_type == "filecomponent": + entity = entity["version"] + + project_data = entity["link"][0] + return self.session.query( + "Project where id is {}".format(project_data["id"]) + ).one() From 1cb3f2c8474193aa692b1c23d3e415f0ae8515f8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 14 Apr 2020 12:10:12 +0200 Subject: [PATCH 045/207] formatting preparation action create project structure --- .../action_create_project_structure.py | 127 +++++------------- 1 file changed, 35 insertions(+), 92 deletions(-) diff --git a/pype/ftrack/actions/action_create_project_structure.py b/pype/ftrack/actions/action_create_project_structure.py index 6124ebe843..163ac4836e 100644 --- a/pype/ftrack/actions/action_create_project_structure.py +++ b/pype/ftrack/actions/action_create_project_structure.py @@ -1,36 +1,26 @@ import os -import sys import re -import argparse -import logging -import ftrack_api from pype.ftrack import BaseAction -from pypeapp import config +from pypeapp import config, Anatomy class CreateProjectFolders(BaseAction): - '''Edit meta data action.''' - #: Action identifier. - identifier = 'create.project.structure' - #: Action label. - label = 'Create Project Structure' - #: Action description. - description = 'Creates folder structure' - #: roles that are allowed to register this action - role_list = ['Pypeclub', 'Administrator', 'Project Manager'] - icon = '{}/ftrack/action_icons/CreateProjectFolders.svg'.format( - os.environ.get('PYPE_STATICS_SERVER', '') + identifier = "create.project.structure" + label = "Create Project Structure" + description = "Creates folder structure" + role_list = ["Pypeclub", "Administrator", "Project Manager"] + icon = "{}/ftrack/action_icons/CreateProjectFolders.svg".format( + os.environ.get("PYPE_STATICS_SERVER", "") ) - pattern_array = re.compile('\[.*\]') - pattern_ftrack = '.*\[[.]*ftrack[.]*' - pattern_ent_ftrack = 'ftrack\.[^.,\],\s,]*' - project_root_key = '__project_root__' + pattern_array = re.compile(r"\[.*\]") + pattern_ftrack = re.compile(r".*\[[.]*ftrack[.]*") + pattern_ent_ftrack = re.compile(r"ftrack\.[^.,\],\s,]*") + project_root_key = "__project_root__" def discover(self, session, entities, event): - ''' Validation ''' if len(entities) != 1: return False @@ -41,22 +31,19 @@ class CreateProjectFolders(BaseAction): def launch(self, session, entities, event): entity = entities[0] - if entity.entity_type.lower() == 'project': - project = entity - else: - project = entity['project'] - - presets = config.get_presets()['tools']['project_folder_structure'] + project = self.get_project_from_entity(entity) + presets = config.get_presets()["tools"]["project_folder_structure"] try: # Get paths based on presets basic_paths = self.get_path_items(presets) self.create_folders(basic_paths, entity) self.create_ftrack_entities(basic_paths, project) - except Exception as e: + + except Exception as exc: session.rollback() return { - 'success': False, - 'message': str(e) + "success": False, + "message": str(exc) } return True @@ -113,15 +100,15 @@ class CreateProjectFolders(BaseAction): def trigger_creation(self, separation, parent): for item, subvalues in separation.items(): matches = re.findall(self.pattern_array, item) - ent_type = 'Folder' + ent_type = "Folder" if len(matches) == 0: name = item else: match = matches[0] - name = item.replace(match, '') + name = item.replace(match, "") ent_type_match = re.findall(self.pattern_ent_ftrack, match) if len(ent_type_match) > 0: - ent_type_split = ent_type_match[0].split('.') + ent_type_split = ent_type_match[0].split(".") if len(ent_type_split) == 2: ent_type = ent_type_split[1] new_parent = self.create_ftrack_entity(name, ent_type, parent) @@ -130,22 +117,22 @@ class CreateProjectFolders(BaseAction): self.trigger_creation(subvalue, new_parent) def create_ftrack_entity(self, name, ent_type, parent): - for children in parent['children']: - if children['name'] == name: + for children in parent["children"]: + if children["name"] == name: return children data = { - 'name': name, - 'parent_id': parent['id'] + "name": name, + "parent_id": parent["id"] } - if parent.entity_type.lower() == 'project': - data['project_id'] = parent['id'] + if parent.entity_type.lower() == "project": + data["project_id"] = parent["id"] else: - data['project_id'] = parent['project']['id'] + data["project_id"] = parent["project"]["id"] existing_entity = self.session.query(( "TypedContext where name is \"{}\" and " "parent_id is \"{}\" and project_id is \"{}\"" - ).format(name, data['parent_id'], data['project_id'])).first() + ).format(name, data["parent_id"], data["project_id"])).first() if existing_entity: return existing_entity @@ -161,12 +148,11 @@ class CreateProjectFolders(BaseAction): else: paths = self.get_path_items(value) for path in paths: - if isinstance(path, str): - output.append([key, path]) - else: - p = [key] - p.extend(path) - output.append(p) + if not isinstance(path, (list, tuple)): + path = [path] + + output.append([key, *path]) + return output def compute_paths(self, basic_paths_items, project_root): @@ -176,7 +162,7 @@ class CreateProjectFolders(BaseAction): for path_item in path_items: matches = re.findall(self.pattern_array, path_item) if len(matches) > 0: - path_item = path_item.replace(matches[0], '') + path_item = path_item.replace(matches[0], "") if path_item == self.project_root_key: path_item = project_root clean_items.append(path_item) @@ -193,55 +179,12 @@ class CreateProjectFolders(BaseAction): project_root = os.path.sep.join(project_root_items) full_paths = self.compute_paths(basic_paths, project_root) - #Create folders + # Create folders for path in full_paths: if os.path.exists(path): continue os.makedirs(path.format(project_root=project_root)) - - def register(session, plugins_presets={}): - '''Register plugin. Called when used as an plugin.''' - CreateProjectFolders(session, plugins_presets).register() - - -def main(arguments=None): - '''Set up logging and register action.''' - if arguments is None: - arguments = [] - - parser = argparse.ArgumentParser() - # Allow setting of logging level from arguments. - loggingLevels = {} - for level in ( - logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, - logging.ERROR, logging.CRITICAL - ): - loggingLevels[logging.getLevelName(level).lower()] = level - - parser.add_argument( - '-v', '--verbosity', - help='Set the logging output verbosity.', - choices=loggingLevels.keys(), - default='info' - ) - namespace = parser.parse_args(arguments) - - # Set up basic logging - logging.basicConfig(level=loggingLevels[namespace.verbosity]) - - session = ftrack_api.Session() - register(session) - - # Wait for events - logging.info( - 'Registered actions and listening for events. Use Ctrl-C to abort.' - ) - session.event_hub.wait() - - -if __name__ == '__main__': - raise SystemExit(main(sys.argv[1:])) From 65098129dd592c69784451c0c95204299a46875e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 14 Apr 2020 12:11:56 +0200 Subject: [PATCH 046/207] delivery action should work with new anatomy --- pype/ftrack/actions/action_delivery.py | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/pype/ftrack/actions/action_delivery.py b/pype/ftrack/actions/action_delivery.py index 29fdfe39ae..9d686929de 100644 --- a/pype/ftrack/actions/action_delivery.py +++ b/pype/ftrack/actions/action_delivery.py @@ -2,7 +2,6 @@ import os import copy import shutil import collections -import string import clique from bson.objectid import ObjectId @@ -17,24 +16,18 @@ from pype.ftrack.lib.avalon_sync import CustAttrIdKey class Delivery(BaseAction): - '''Edit meta data action.''' - #: Action identifier. identifier = "delivery.action" - #: Action label. label = "Delivery" - #: Action description. description = "Deliver data to client" - #: roles that are allowed to register this action role_list = ["Pypeclub", "Administrator", "Project manager"] - icon = '{}/ftrack/action_icons/Delivery.svg'.format( - os.environ.get('PYPE_STATICS_SERVER', '') + icon = "{}/ftrack/action_icons/Delivery.svg".format( + os.environ.get("PYPE_STATICS_SERVER", "") ) db_con = DbConnector() def discover(self, session, entities, event): - ''' Validation ''' for entity in entities: if entity.entity_type.lower() == "assetversion": return True @@ -301,17 +294,10 @@ class Delivery(BaseAction): repre = repres_by_name.get(comp_name) repres_to_deliver.append(repre) - if not location_path: - location_path = os.environ.get("AVALON_PROJECTS") or "" - - print(location_path) - anatomy = Anatomy(project_name) for repre in repres_to_deliver: # Get destination repre path anatomy_data = copy.deepcopy(repre["context"]) - anatomy_data["root"] = location_path - anatomy_filled = anatomy.format_all(anatomy_data) test_path = anatomy_filled["delivery"][anatomy_name] @@ -341,7 +327,7 @@ class Delivery(BaseAction): self.report_items[msg].append(sub_msg) self.log.warning( "{} Representation: \"{}\" Filled: <{}>".format( - msg, str(repre["_id"]), str(result) + msg, str(repre["_id"]), str(test_path) ) ) continue @@ -352,7 +338,7 @@ class Delivery(BaseAction): if frame: repre["context"]["frame"] = len(str(frame)) * "#" - repre_path = self.path_from_represenation(repre) + repre_path = self.path_from_represenation(repre, anatomy) # TODO add backup solution where root of path from component # is repalced with AVALON_PROJECTS root if not frame: @@ -452,7 +438,7 @@ class Delivery(BaseAction): self.copy_file(src, dst) - def path_from_represenation(self, representation): + def path_from_represenation(self, representation, anatomy): try: template = representation["data"]["template"] @@ -461,7 +447,7 @@ class Delivery(BaseAction): try: context = representation["context"] - context["root"] = os.environ.get("AVALON_PROJECTS") or "" + context["root"] = anatomy.roots path = pipeline.format_template_with_optional_keys( context, template ) From 7a91ec8dce18c26bde3701539393862ae5253faf Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 14 Apr 2020 12:16:42 +0200 Subject: [PATCH 047/207] action delete old versions is ready to use new anatomy roots --- .../ftrack/actions/action_delete_old_versions.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py index f6a66318c9..c13845f58c 100644 --- a/pype/ftrack/actions/action_delete_old_versions.py +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -7,6 +7,7 @@ from pymongo import UpdateOne from pype.ftrack import BaseAction from pype.ftrack.lib.io_nonsingleton import DbConnector +from pypeapp import Anatomy import avalon.pipeline @@ -21,8 +22,8 @@ class DeleteOldVersions(BaseAction): " archived with only lates versions." ) role_list = ["Pypeclub", "Project Manager", "Administrator"] - icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format( - os.environ.get('PYPE_STATICS_SERVER', '') + icon = "{}/ftrack/action_icons/PypeAdmin.svg".format( + os.environ.get("PYPE_STATICS_SERVER", "") ) dbcon = DbConnector() @@ -194,6 +195,7 @@ class DeleteOldVersions(BaseAction): # Set Mongo collection project_name = project["full_name"] + anatomy = Anatomy(project_name) self.dbcon.Session["AVALON_PROJECT"] = project_name self.log.debug("Project is set to {}".format(project_name)) @@ -307,7 +309,7 @@ class DeleteOldVersions(BaseAction): dir_paths = {} file_paths_by_dir = collections.defaultdict(list) for repre in repres: - file_path, seq_path = self.path_from_represenation(repre) + file_path, seq_path = self.path_from_represenation(repre, anatomy) if file_path is None: self.log.warning(( "Could not format path for represenation \"{}\"" @@ -495,21 +497,17 @@ class DeleteOldVersions(BaseAction): self.log.debug("Removed folder: {}".format(dir_path)) os.rmdir(dir_path) - def path_from_represenation(self, representation): + def path_from_represenation(self, representation, anatomy): try: template = representation["data"]["template"] except KeyError: return (None, None) - root = os.environ["AVALON_PROJECTS"] - if not root: - return (None, None) - sequence_path = None try: context = representation["context"] - context["root"] = root + context["root"] = anatomy.roots path = avalon.pipeline.format_template_with_optional_keys( context, template ) From 4822fc88bcadbeb769923119fcc4f0ec8710aabf Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 14 Apr 2020 14:10:57 +0200 Subject: [PATCH 048/207] create project structure will work as before for all roots --- .../action_create_project_structure.py | 44 ++++++++++++------- 1 file changed, 28 insertions(+), 16 deletions(-) diff --git a/pype/ftrack/actions/action_create_project_structure.py b/pype/ftrack/actions/action_create_project_structure.py index 163ac4836e..d418a2e623 100644 --- a/pype/ftrack/actions/action_create_project_structure.py +++ b/pype/ftrack/actions/action_create_project_structure.py @@ -32,11 +32,22 @@ class CreateProjectFolders(BaseAction): def launch(self, session, entities, event): entity = entities[0] project = self.get_project_from_entity(entity) - presets = config.get_presets()["tools"]["project_folder_structure"] + project_folder_presets = ( + config.get_presets() + .get("tools", {}) + .get("project_folder_structure") + ) + if not project_folder_presets: + return { + "success": False, + "message": "Project structure presets are not set." + } + try: # Get paths based on presets - basic_paths = self.get_path_items(presets) - self.create_folders(basic_paths, entity) + basic_paths = self.get_path_items(project_folder_presets) + anatomy = Anatomy(project["full_name"]) + self.create_folders(basic_paths, entity, project, anatomy) self.create_ftrack_entities(basic_paths, project) except Exception as exc: @@ -169,21 +180,22 @@ class CreateProjectFolders(BaseAction): output.append(os.path.normpath(os.path.sep.join(clean_items))) return output - def create_folders(self, basic_paths, entity): - # Set project root folder - if entity.entity_type.lower() == 'project': - project_name = entity['full_name'] + def create_folders(self, basic_paths, entity, project, anatomy): + roots_paths = [] + if isinstance(anatomy.roots, dict): + for root in anatomy.roots: + roots_paths.append(root.value) else: - project_name = entity['project']['full_name'] - project_root_items = [os.environ['AVALON_PROJECTS'], project_name] - project_root = os.path.sep.join(project_root_items) + roots_paths.append(anatomy.roots.value) - full_paths = self.compute_paths(basic_paths, project_root) - # Create folders - for path in full_paths: - if os.path.exists(path): - continue - os.makedirs(path.format(project_root=project_root)) + for root_path in roots_paths: + project_root = os.path.join(root_path, project["full_name"]) + full_paths = self.compute_paths(basic_paths, project_root) + # Create folders + for path in full_paths: + if os.path.exists(path): + continue + os.makedirs(path.format(project_root=project_root)) def register(session, plugins_presets={}): From 57b33ac9870bf1d56082c3185eca1878ec3ddf48 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 14 Apr 2020 19:42:46 +0200 Subject: [PATCH 049/207] fixed few anatomy bugs --- pype/ftrack/lib/avalon_sync.py | 4 ---- pype/plugins/global/publish/integrate_new.py | 5 +---- pype/plugins/global/publish/submit_publish_job.py | 12 +++--------- 3 files changed, 4 insertions(+), 17 deletions(-) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 6f928914bf..863f447979 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -1711,14 +1711,10 @@ class SyncEntitiesFactory: except InvalidId: new_id = ObjectId() - project_name = self.entities_dict[self.ft_project_id]["name"] project_item["_id"] = new_id project_item["parent"] = None project_item["schema"] = EntitySchemas["project"] project_item["config"]["schema"] = EntitySchemas["config"] - project_item["config"]["template"] = ( - get_avalon_project_template(project_name) - ) self.ftrack_avalon_mapper[self.ft_project_id] = new_id self.avalon_ftrack_mapper[new_id] = self.ft_project_id diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 6dab0846d1..bd5e9f25f4 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -645,11 +645,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): else: source = context.data["currentFile"] anatomy = instance.context.data["anatomy"] - root_name = anatomy.templates["work"].get("root_name") success, rootless_path = ( - anatomy.roots.find_root_template_from_path( - source, root_name, others_on_fail=True - ) + anatomy.roots_obj.find_root_template_from_path(source) ) if success: source = rootless_path diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 8525657b21..e366555088 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -185,9 +185,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): anatomy = instance.context.data["anatomy"] work_root_name = anatomy.templates["work"].get("root_name") success, rootless_path = ( - anatomy.roots.find_root_template_from_path( - output_dir, work_root_name - ) + anatomy.roots_obj.find_root_template_from_path(output_dir) ) if not success: # `rootless_path` is not set to `output_dir` if none of roots match @@ -568,9 +566,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): anatomy = instance.context.data["anatomy"] work_root_name = anatomy.templates["work"].get("root_name") success, rootless_path = ( - anatomy.roots.find_root_template_from_path( - source, work_root_name - ) + anatomy.roots_obj.find_root_template_from_path(source) ) if success: source = rootless_path @@ -622,9 +618,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): staging_dir = repre.get("stagingDir") if staging_dir: success, rootless_staging_dir = ( - anatomy.roots.find_root_template_from_path( - repre, work_root_name - ) + anatomy.roots.find_root_template_from_path(staging_dir) ) if success: repre["stagingDir"] = rootless_staging_dir From 891b14caf9d10e13aec1ec5a76f1626f3a1260c7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 14 Apr 2020 19:53:05 +0200 Subject: [PATCH 050/207] template is not required in project config --- schema/config-1.0.json | 1 - 1 file changed, 1 deletion(-) diff --git a/schema/config-1.0.json b/schema/config-1.0.json index b3c4362f41..198f51e04d 100644 --- a/schema/config-1.0.json +++ b/schema/config-1.0.json @@ -8,7 +8,6 @@ "additionalProperties": false, "required": [ - "template", "tasks", "apps" ], From 98548aabd2f3064c3e7a0923a38ebbe081471fbc Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 14 Apr 2020 20:27:59 +0200 Subject: [PATCH 051/207] it is possible to have set specific root instead of thumbnail_root in thumbnail template --- .../action_store_thumbnails_to_avalon.py | 73 ++++++++++--------- 1 file changed, 38 insertions(+), 35 deletions(-) diff --git a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py index 7adc36f4b5..a8fc8cb06f 100644 --- a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py +++ b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py @@ -54,41 +54,6 @@ class StoreThumbnailsToAvalon(BaseAction): }) session.commit() - thumbnail_roots = os.environ.get(self.thumbnail_key) - if not thumbnail_roots: - msg = "`{}` environment is not set".format(self.thumbnail_key) - - action_job["status"] = "failed" - session.commit() - - self.log.warning(msg) - - return { - "success": False, - "message": msg - } - - existing_thumbnail_root = None - for path in thumbnail_roots.split(os.pathsep): - if os.path.exists(path): - existing_thumbnail_root = path - break - - if existing_thumbnail_root is None: - msg = ( - "Can't access paths, set in `{}` ({})" - ).format(self.thumbnail_key, thumbnail_roots) - - action_job["status"] = "failed" - session.commit() - - self.log.warning(msg) - - return { - "success": False, - "message": msg - } - project = get_project_from_entity(entities[0]) project_name = project["full_name"] anatomy = Anatomy(project_name) @@ -122,6 +87,44 @@ class StoreThumbnailsToAvalon(BaseAction): "message": msg } + thumbnail_roots = os.environ.get(self.thumbnail_key) + if ( + "{thumbnail_root}" in anatomy.templates["publish"]["thumbnail"] + and not thumbnail_roots + ): + msg = "`{}` environment is not set".format(self.thumbnail_key) + + action_job["status"] = "failed" + session.commit() + + self.log.warning(msg) + + return { + "success": False, + "message": msg + } + + existing_thumbnail_root = None + for path in thumbnail_roots.split(os.pathsep): + if os.path.exists(path): + existing_thumbnail_root = path + break + + if existing_thumbnail_root is None: + msg = ( + "Can't access paths, set in `{}` ({})" + ).format(self.thumbnail_key, thumbnail_roots) + + action_job["status"] = "failed" + session.commit() + + self.log.warning(msg) + + return { + "success": False, + "message": msg + } + example_template_data = { "_id": "ID", "thumbnail_root": "THUBMNAIL_ROOT", From 32d47db29813a3aa79753892da72193eb08e0df7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 15 Apr 2020 10:35:37 +0200 Subject: [PATCH 052/207] removed unused part of code in submit publish job --- pype/plugins/global/publish/submit_publish_job.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index adc061ee13..7b6730f983 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -183,7 +183,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): output_dir = instance.data["outputDir"] # Convert output dir to `{root}/rest/of/path/...` with Anatomy anatomy = instance.context.data["anatomy"] - work_root_name = anatomy.templates["work"].get("root_name") success, rootless_path = ( anatomy.roots_obj.find_root_template_from_path(output_dir) ) @@ -194,10 +193,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): " This may cause issues on farm." ).format(output_dir)) rootless_path = output_dir - else: - # If root was found then use `mount` root for `output_dir` - anatomy.roots._root_type = "mount" - output_dir = rootless_path.format(**{"root": anatomy.roots}) # Generate the payload for Deadline submission payload = { From 37d70cfbcecf9e42fe15c12aa36cb9f967a1556f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 17 Apr 2020 09:29:42 +0200 Subject: [PATCH 053/207] removed unsused line --- pype/plugins/global/publish/submit_publish_job.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 7b6730f983..7158289e77 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -561,7 +561,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): source = context.data["currentFile"] anatomy = instance.context.data["anatomy"] - work_root_name = anatomy.templates["work"].get("root_name") success, rootless_path = ( anatomy.roots_obj.find_root_template_from_path(source) ) From 5724b36407f8c16f2e1fb3c8c7832acc214167c7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 17 Apr 2020 10:39:11 +0200 Subject: [PATCH 054/207] fix roots usage --- pype/plugins/global/publish/submit_publish_job.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 7158289e77..6d04c8cb01 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -627,7 +627,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): staging_dir = repre.get("stagingDir") if staging_dir: success, rootless_staging_dir = ( - anatomy.roots.find_root_template_from_path(staging_dir) + anatomy.roots_obj.find_root_template_from_path(staging_dir) ) if success: repre["stagingDir"] = rootless_staging_dir From 688201ca1ef2fc7326d7db7fb21c3627d2c48d68 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 17 Apr 2020 18:08:28 +0200 Subject: [PATCH 055/207] collect rendered files use only path remapper --- .../global/publish/collect_rendered_files.py | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index 5197f71a46..82c1b5bfd0 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -85,19 +85,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): representations = [] for repre_data in instance_data.get("representations") or []: staging_dir = repre_data.get("stagingDir") - if not staging_dir: - pass - - elif "{root" in staging_dir: - repre_data["stagingDir"] = staging_dir.format( - **{"root": anatomy.roots} - ) - self.log.debug(( - "stagingDir was filled with root." - " To: \"{}\" From: \"{}\"" - ).format(repre_data["stagingDir"], staging_dir)) - - else: + if staging_dir: remapped = anatomy.roots_obj.path_remapper(staging_dir) if remapped: repre_data["stagingDir"] = remapped From 6326719623095f4d6499d98f5856b894dd93f7f0 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 21 Apr 2020 19:28:32 +0200 Subject: [PATCH 056/207] initial commit for review and burnin filtering --- pype/plugins/global/publish/extract_review.py | 231 +++++++++++++++++- 1 file changed, 230 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 625c96566d..c092ee4eee 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -1,10 +1,14 @@ import os +import re import pyblish.api import clique import pype.api import pype.lib +StringType = type("") + + class ExtractReview(pyblish.api.InstancePlugin): """Extracting Review mov file for Ftrack @@ -22,13 +26,238 @@ class ExtractReview(pyblish.api.InstancePlugin): families = ["review"] hosts = ["nuke", "maya", "shell"] + # Legacy attributes outputs = {} ext_filter = [] to_width = 1920 to_height = 1080 - def process(self, instance): + # New attributes + profiles = None + def process(self, instance): + if self.profiles is None: + return self.legacy_process(instance) + + profile_filter_data = { + "host": pyblish.api.registered_hosts()[-1].title(), + "family": self.main_family_from_instance(instance), + "task": os.environ["AVALON_TASK"] + } + + profile = self.filter_profiles_by_data( + self.profiles, profile_filter_data + ) + if not profile: + return + + instance_families = self.families_from_instance(instance) + outputs = self.filter_outputs_by_families(profile, instance_families) + if not outputs: + return + + # TODO repre loop + repre_tags_low = [tag.lower() for tag in repre.get("tags", [])] + # Check tag filters + tag_filters = output_filters.get("tags") + if tag_filters: + tag_filters_low = [tag.lower() for tag in tag_filters] + valid = False + for tag in repre_tags_low: + if tag in tag_filters_low: + valid = True + break + + if not valid: + continue + + def main_family_from_instance(self, instance): + family = instance.data.get("family") + if not family: + family = instance.data["families"][0] + return family + + def families_from_instance(self, instance): + families = [] + family = instance.data.get("family") + if family: + families.append(family) + + for family in (instance.data.get("families") or tuple()): + if family not in families: + families.append(family) + return families + + def compile_list_of_regexes(self, in_list): + regexes = [] + if not in_list: + return regexes + + for item in in_list: + if not item: + continue + + if not isinstance(item, StringType): + self.log.warning(( + "Invalid type \"{}\" value \"{}\"." + " Expected . Skipping." + ).format(str(type(item)), str(item))) + continue + + regexes.append(re.compile(item)) + return regexes + + def validate_value_by_regexes(self, in_list, value): + """Validates in any regexe from list match entered value. + + Args: + in_list (list): List with regexes. + value (str): String where regexes is checked. + + Returns: + int: Returns `0` when list is not set or is empty. Returns `1` when + any regex match value and returns `-1` when none of regexes + match value entered. + """ + if not in_list: + return 0 + + output = -1 + regexes = self.compile_list_of_regexes(in_list) + for regex in regexes: + if re.match(regex, value): + output = 1 + break + return output + + def filter_profiles_by_data(self, profiles, filter_data): + """ Filter profiles by Host name, Task name and main Family. + + Filtering keys are "hosts" (list), "tasks" (list), "families" (list). + If key is not find or is empty than it's expected to match. + + Args: + profiles (list): Profiles definition from presets. + filter_data (dict): Dictionary with data for filtering. + Required keys are "host" - Host name, "task" - Task name + and "family" - Main . + """ + host_name = filter_data["host"] + task_name = filter_data["task"] + family = filter_data["family"] + + matching_profiles = None + highest_profile_points = -1 + # Each profile get 1 point for each matching filter. Profile with most + # points or first in row is returnd. + for profile in profiles: + profile_points = 0 + + # Host filtering + host_names = profile.get("hosts") + match = self.validate_value_by_regexes(host_names, host_name) + if match == -1: + continue + profile_points += match + + # Task filtering + task_names = profile.get("tasks") + match = self.validate_value_by_regexes(task_names, task_name) + if match == -1: + continue + profile_points += match + + # Family filtering + families = profile.get("families") + match = self.validate_value_by_regexes(families, family) + if match == -1: + continue + profile_points += match + + if profile_points == highest_profile_points: + matching_profiles.append(profile) + + elif profile_points > highest_profile_points: + highest_profile_points = profile_points + matching_profiles = [] + matching_profiles.append(profile) + + if not matching_profiles: + self.log.info(( + "None of profiles match your setup." + " Host \"{host}\" | Task: \"{task}\" | Family: \"{family}\"" + ).format(**filter_data)) + return + + if len(matching_profiles) > 1: + self.log.warning(( + "More than one profile match your setup." + " Using first found profile." + " Host \"{host}\" | Task: \"{task}\" | Family: \"{family}\"" + ).format(**filter_data)) + + return matching_profiles[0] + + def families_filter_validation(self, families, output_families_filter): + if not output_families_filter: + return True + + single_families = [] + combination_families = [] + for family_filter in output_families_filter: + if not family_filter: + continue + if isinstance(family_filter, (list, tuple)): + _family_filter = [] + for family in family_filter: + if family: + _family_filter.append(family.lower()) + combination_families.append(_family_filter) + else: + single_families.append(family_filter.lower()) + + for family in single_families: + if family in families: + return True + + for family_combination in combination_families: + valid = True + for family in family_combination: + if family not in families: + valid = False + break + + if valid: + return True + + return False + + def filter_outputs_by_families(self, profile, families): + outputs = profile.get("outputs") or [] + if not outputs: + return outputs + + # lower values + # QUESTION is this valid operation? + families = [family.lower() for family in families] + + filtered_outputs = {} + for filename_suffix, output_def in outputs.items(): + output_filters = output_def.get("output_filter") + # When filters not set then skip filtering process + if not output_filters: + filtered_outputs[filename_suffix] = output_def + continue + + families_filters = output_filters.get("families") + if not self.families_filter_validation(families, families_filters): + continue + + filtered_outputs[filename_suffix] = output_def + + return filtered_outputs + + def legacy_process(self, instance): output_profiles = self.outputs or {} inst_data = instance.data From 60403273daef3e756c71c9c917428c8f6ab661bb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 22 Apr 2020 11:18:16 +0200 Subject: [PATCH 057/207] filter_profiles_by_data renamed to find_matching_profile and swaped arguments in validate_value_by_regexes --- pype/plugins/global/publish/extract_review.py | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index c092ee4eee..366745d8a8 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -5,7 +5,6 @@ import clique import pype.api import pype.lib - StringType = type("") @@ -26,16 +25,17 @@ class ExtractReview(pyblish.api.InstancePlugin): families = ["review"] hosts = ["nuke", "maya", "shell"] + # Preset attributes + profiles = None + # Legacy attributes outputs = {} ext_filter = [] to_width = 1920 to_height = 1080 - # New attributes - profiles = None - def process(self, instance): + # Use legacy processing when `profiles` is not set. if self.profiles is None: return self.legacy_process(instance) @@ -45,7 +45,7 @@ class ExtractReview(pyblish.api.InstancePlugin): "task": os.environ["AVALON_TASK"] } - profile = self.filter_profiles_by_data( + profile = self.find_matching_profile( self.profiles, profile_filter_data ) if not profile: @@ -107,7 +107,7 @@ class ExtractReview(pyblish.api.InstancePlugin): regexes.append(re.compile(item)) return regexes - def validate_value_by_regexes(self, in_list, value): + def validate_value_by_regexes(self, value, in_list): """Validates in any regexe from list match entered value. Args: @@ -130,7 +130,7 @@ class ExtractReview(pyblish.api.InstancePlugin): break return output - def filter_profiles_by_data(self, profiles, filter_data): + def find_matching_profile(self, profiles, filter_data): """ Filter profiles by Host name, Task name and main Family. Filtering keys are "hosts" (list), "tasks" (list), "families" (list). @@ -155,21 +155,21 @@ class ExtractReview(pyblish.api.InstancePlugin): # Host filtering host_names = profile.get("hosts") - match = self.validate_value_by_regexes(host_names, host_name) + match = self.validate_value_by_regexes(host_name, host_names) if match == -1: continue profile_points += match # Task filtering task_names = profile.get("tasks") - match = self.validate_value_by_regexes(task_names, task_name) + match = self.validate_value_by_regexes(task_name, task_names) if match == -1: continue profile_points += match # Family filtering families = profile.get("families") - match = self.validate_value_by_regexes(families, family) + match = self.validate_value_by_regexes(family, families) if match == -1: continue profile_points += match From 06f0312191c0017722086a0557107f418aa7ba22 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 22 Apr 2020 11:19:35 +0200 Subject: [PATCH 058/207] find_matching_profile has more complex system of getting most matching profile when more than one profile was filtered --- pype/plugins/global/publish/extract_review.py | 68 ++++++++++++++++--- 1 file changed, 60 insertions(+), 8 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 366745d8a8..3514339ae8 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -148,10 +148,13 @@ class ExtractReview(pyblish.api.InstancePlugin): matching_profiles = None highest_profile_points = -1 + profile_values = {} # Each profile get 1 point for each matching filter. Profile with most - # points or first in row is returnd. + # points is returnd. For cases when more than one profile will match + # are also stored ordered lists of matching values. for profile in profiles: profile_points = 0 + profile_value = [] # Host filtering host_names = profile.get("hosts") @@ -159,6 +162,7 @@ class ExtractReview(pyblish.api.InstancePlugin): if match == -1: continue profile_points += match + profile_value.append(bool(match)) # Task filtering task_names = profile.get("tasks") @@ -166,6 +170,7 @@ class ExtractReview(pyblish.api.InstancePlugin): if match == -1: continue profile_points += match + profile_value.append(bool(match)) # Family filtering families = profile.get("families") @@ -173,7 +178,12 @@ class ExtractReview(pyblish.api.InstancePlugin): if match == -1: continue profile_points += match + profile_value.append(bool(match)) + if profile_points < highest_profile_points: + continue + + profile["__value__"] = profile_value if profile_points == highest_profile_points: matching_profiles.append(profile) @@ -189,14 +199,56 @@ class ExtractReview(pyblish.api.InstancePlugin): ).format(**filter_data)) return - if len(matching_profiles) > 1: - self.log.warning(( - "More than one profile match your setup." - " Using first found profile." - " Host \"{host}\" | Task: \"{task}\" | Family: \"{family}\"" - ).format(**filter_data)) + if len(matching_profiles) == 1: + # Pop temporary key `__value__` + matching_profiles[0].pop("__value__") + return matching_profiles[0] - return matching_profiles[0] + self.log.warning(( + "More than one profile match your setup." + " Host \"{host}\" | Task: \"{task}\" | Family: \"{family}\"" + ).format(**filter_data)) + + # Filter all profiles with highest points value. First filter profiles + # with matching host if there are any then filter profiles by task + # name if there are any and lastly filter by family. Else use first in + # list. + idx = 0 + final_profile = None + while True: + profiles_true = [] + profiles_false = [] + for profile in matching_profiles: + value = profile["__value__"] + # Just use first profile when idx is greater than values. + if not idx < len(value): + final_profile = profile + break + + if value[idx]: + profiles_true.append(profile) + else: + profiles_false.append(profile) + + if final_profile is not None: + break + + if profiles_true: + matching_profiles = profiles_true + else: + matching_profiles = profiles_false + + if len(matching_profiles) == 1: + final_profile = matching_profiles[0] + break + idx += 1 + + final_profile.pop("__value__") + self.log.info( + "Using first most matching profile in match order:" + " Host name -> Task name -> Family." + ) + return final_profile def families_filter_validation(self, families, output_families_filter): if not output_families_filter: From f84d9dc61646438d24d160c997897c0d7c51e13a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 22 Apr 2020 11:19:59 +0200 Subject: [PATCH 059/207] added filter_outputs_by_tags to filter outputs per representation --- pype/plugins/global/publish/extract_review.py | 25 +++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 3514339ae8..2e089fb7cf 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -309,6 +309,31 @@ class ExtractReview(pyblish.api.InstancePlugin): return filtered_outputs + def filter_outputs_by_tags(self, outputs, tags): + filtered_outputs = {} + repre_tags_low = [tag.lower() for tag in tags] + for filename_suffix, output_def in outputs.values(): + valid = True + output_filters = output_def.get("output_filter") + if output_filters: + # Check tag filters + tag_filters = output_filters.get("tags") + if tag_filters: + tag_filters_low = [tag.lower() for tag in tag_filters] + valid = False + for tag in repre_tags_low: + if tag in tag_filters_low: + valid = True + break + + if not valid: + continue + + if valid: + filtered_outputs[filename_suffix] = output_def + + return filtered_outputs + def legacy_process(self, instance): output_profiles = self.outputs or {} From a4a1d0bea68fbadeffc70e600328a82c2191345e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 22 Apr 2020 11:20:51 +0200 Subject: [PATCH 060/207] added first step of representation loop --- pype/plugins/global/publish/extract_review.py | 53 +++++++++++++------ 1 file changed, 38 insertions(+), 15 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 2e089fb7cf..1da9eb186e 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -52,25 +52,44 @@ class ExtractReview(pyblish.api.InstancePlugin): return instance_families = self.families_from_instance(instance) - outputs = self.filter_outputs_by_families(profile, instance_families) - if not outputs: + profile_outputs = self.filter_outputs_by_families(profile, instance_families) + if not profile_outputs: return - # TODO repre loop - repre_tags_low = [tag.lower() for tag in repre.get("tags", [])] - # Check tag filters - tag_filters = output_filters.get("tags") - if tag_filters: - tag_filters_low = [tag.lower() for tag in tag_filters] - valid = False - for tag in repre_tags_low: - if tag in tag_filters_low: - valid = True - break + context = instance.context - if not valid: + fps = float(instance.data["fps"]) + frame_start = instance.data.get("frameStart") + frame_end = instance.data.get("frameEnd") + handle_start = instance.data.get( + "handleStart", + context.data.get("handleStart") + ) + handle_end = instance.data.get( + "handleEnd", + context.data.get("handleEnd") + ) + pixel_aspect = instance.data.get("pixelAspect", 1) + resolution_width = instance.data.get("resolutionWidth") + resolution_height = instance.data.get("resolutionHeight") + + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + + # get representation and loop them + representations = instance.data["representations"] + + for repre in tuple(representations): + tags = repre.get("tags", []) + if ( + "review" not in tags + or "multipartExr" in tags + or "thumbnail" in tags + ): continue + outputs = self.filter_outputs_by_tags(profile_outputs, tags) + if not outputs: + continue def main_family_from_instance(self, instance): family = instance.data.get("family") if not family: @@ -140,7 +159,11 @@ class ExtractReview(pyblish.api.InstancePlugin): profiles (list): Profiles definition from presets. filter_data (dict): Dictionary with data for filtering. Required keys are "host" - Host name, "task" - Task name - and "family" - Main . + and "family" - Main instance family. + + Returns: + dict/None: Return most matching profile or None if none of profiles + match at least one criteria. """ host_name = filter_data["host"] task_name = filter_data["task"] From bf59ec9df72ebc02708cb8059648a911ddba098f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 22 Apr 2020 17:23:41 +0200 Subject: [PATCH 061/207] added image/view exts and supported exts --- pype/plugins/global/publish/extract_review.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 1da9eb186e..1b539b05a8 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -1,5 +1,6 @@ import os import re +import copy import pyblish.api import clique import pype.api @@ -24,6 +25,9 @@ class ExtractReview(pyblish.api.InstancePlugin): order = pyblish.api.ExtractorOrder + 0.02 families = ["review"] hosts = ["nuke", "maya", "shell"] + image_exts = ["exr", "jpg", "jpeg", "png", "dpx"] + video_exts = ["mov", "mp4"] + supported_exts = image_exts + video_exts # Preset attributes profiles = None From dfc32490202cc3bd0a3a4791a7d1a66da01c60f4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 22 Apr 2020 17:24:13 +0200 Subject: [PATCH 062/207] first step of representations loop --- pype/plugins/global/publish/extract_review.py | 200 ++++++++++++++++-- 1 file changed, 178 insertions(+), 22 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 1b539b05a8..620de20fcb 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -27,6 +27,15 @@ class ExtractReview(pyblish.api.InstancePlugin): hosts = ["nuke", "maya", "shell"] image_exts = ["exr", "jpg", "jpeg", "png", "dpx"] video_exts = ["mov", "mp4"] + # image_exts = [".exr", ".jpg", ".jpeg", ".jpe", ".jif", ".jfif", ".jfi", ".png", ".dpx"] + # video_exts = [ + # ".webm", ".mkv", ".flv", ".flv", ".vob", ".ogv", ".ogg", ".drc", + # ".gif", ".gifv", ".mng", ".avi", ".MTS", ".M2TS", + # ".TS", ".mov", ".qt", ".wmv", ".yuv", ".rm", ".rmvb", + # ".asf", ".amv", ".mp4", ".m4p", ".mpg", ".mp2", ".mpeg", + # ".mpe", ".mpv", ".mpg", ".mpeg", ".m2v", ".m4v", ".svi", ".3gp", + # ".3g2", ".mxf", ".roq", ".nsv", ".flv", ".f4v", ".f4p", ".f4a", ".f4b" + # ] supported_exts = image_exts + video_exts # Preset attributes @@ -56,33 +65,18 @@ class ExtractReview(pyblish.api.InstancePlugin): return instance_families = self.families_from_instance(instance) - profile_outputs = self.filter_outputs_by_families(profile, instance_families) + profile_outputs = self.filter_outputs_by_families( + profile, instance_families + ) if not profile_outputs: return - context = instance.context - - fps = float(instance.data["fps"]) - frame_start = instance.data.get("frameStart") - frame_end = instance.data.get("frameEnd") - handle_start = instance.data.get( - "handleStart", - context.data.get("handleStart") - ) - handle_end = instance.data.get( - "handleEnd", - context.data.get("handleEnd") - ) - pixel_aspect = instance.data.get("pixelAspect", 1) - resolution_width = instance.data.get("resolutionWidth") - resolution_height = instance.data.get("resolutionHeight") + instance_data = None ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - # get representation and loop them - representations = instance.data["representations"] - - for repre in tuple(representations): + # Loop through representations + for repre in tuple(instance.data["representations"]): tags = repre.get("tags", []) if ( "review" not in tags @@ -91,9 +85,171 @@ class ExtractReview(pyblish.api.InstancePlugin): ): continue + source_ext = repre["ext"] + if source_ext.startswith("."): + source_ext = source_ext[1:] + + if source_ext not in self.supported_exts: + continue + + # Filter output definition by representation tags (optional) outputs = self.filter_outputs_by_tags(profile_outputs, tags) if not outputs: continue + + staging_dir = repre["stagingDir"] + + # Prepare instance data. + # NOTE Till this point it is not required to have set most + # of keys in instance data. So publishing won't crash if plugin + # won't get here and instance miss required keys. + if instance_data is None: + instance_data = self.prepare_instance_data(instance) + + for filename_suffix, output_def in outputs.items(): + + # Create copy of representation + new_repre = copy.deepcopy(repre) + + ext = output_def.get("ext") or "mov" + if ext.startswith("."): + ext = ext[1:] + + additional_tags = output_def.get("tags") or [] + # TODO new method? + # `self.new_repre_tags(new_repre, additional_tags)` + # Remove "delete" tag from new repre if there is + if "delete" in new_repre["tags"]: + new_repre["tags"].remove("delete") + + # Add additional tags from output definition to representation + for tag in additional_tags: + if tag not in new_repre["tags"]: + new_repre["tags"].append(tag) + + self.log.debug( + "New representation ext: \"{}\" | tags: `{}`".format( + ext, new_repre["tags"] + ) + ) + + # Output is image file sequence witht frames + # TODO change variable to `output_is_sequence` + # QUESTION Should we check for "sequence" only in additional + # tags or in all tags of new representation + is_sequence = ( + "sequence" in additional_tags + and (ext in self.image_exts) + ) + + # no handles switch from profile tags + no_handles = "no-handles" in additional_tags + + # TODO Find better way how to find out if input is sequence + # Theoretically issues: + # - there may be multiple files ant not be sequence + # - remainders are not checked at all + # - there can be more than one collection + if isinstance(repre["files"], (tuple, list)): + collections, remainder = clique.assemble(repre["files"]) + + full_input_path = os.path.join( + staging_dir, + collections[0].format("{head}{padding}{tail}") + ) + + filename = collections[0].format("{head}") + if filename.endswith("."): + filename = filename[:-1] + else: + full_input_path = os.path.join( + staging_dir, repre["files"] + ) + filename = os.path.splitext(repre["files"])[0] + + # QUESTION This breaks Anatomy template system is it ok? + # How do we care about multiple outputs with same extension? + if is_sequence: + filename_base = filename + "_{0}".format(filename_suffix) + repr_file = filename_base + ".%08d.{0}".format( + ext + ) + new_repre["sequence_file"] = repr_file + full_output_path = os.path.join( + staging_dir, filename_base, repr_file + ) + + else: + repr_file = filename + "_{0}.{1}".format( + filename_suffix, ext + ) + full_output_path = os.path.join(staging_dir, repr_file) + + self.log.info("Input path {}".format(full_input_path)) + self.log.info("Output path {}".format(full_output_path)) + + # QUESTION Why the hell we do this? + # add families + for tag in additional_tags: + if tag not in instance.data["families"]: + instance.data["families"].append(tag) + + # Get FFmpeg arguments from profile presets + output_ffmpeg_args = output_def.get("ffmpeg_args") or {} + output_ffmpeg_input = output_ffmpeg_args.get("input") or [] + output_ffmpeg_filters = output_ffmpeg_args.get("filters") or [] + output_ffmpeg_output = output_ffmpeg_args.get("output") or [] + + ffmpeg_input_args = [] + ffmpeg_output_args = [] + + # Override output file + ffmpeg_input_args.append("-y") + # Add input args from presets + ffmpeg_input_args.extend(output_ffmpeg_input) + + + if isinstance(repre["files"], list): + # QUESTION What is sence of this? + if frame_start_handle != repre.get( + "detectedStart", frame_start_handle + ): + frame_start_handle = repre.get("detectedStart") + + # exclude handle if no handles defined + if no_handles: + frame_start_handle = frame_start + frame_end_handle = frame_end + + ffmpeg_input_args.append( + "-start_number {0} -framerate {1}".format( + frame_start_handle, fps)) + else: + if no_handles: + start_sec = float(handle_start) / fps + ffmpeg_input_args.append("-ss {:0.2f}".format(start_sec)) + frame_start_handle = frame_start + frame_end_handle = frame_end + + + def prepare_instance_data(self, instance): + return { + "fps": float(instance.data["fps"]), + "frame_start": instance.data["frameStart"], + "frame_end": instance.data["frameEnd"], + "handle_start": instance.data.get( + "handleStart", + instance.context.data["handleStart"] + ), + "handle_end": instance.data.get( + "handleEnd", + instance.context.data["handleEnd"] + ), + "pixel_aspect": instance.data.get("pixelAspect", 1), + "resolution_width": instance.data.get("resolutionWidth"), + "resolution_height": instance.data.get("resolutionHeight") + } + def main_family_from_instance(self, instance): family = instance.data.get("family") if not family: @@ -339,7 +495,7 @@ class ExtractReview(pyblish.api.InstancePlugin): def filter_outputs_by_tags(self, outputs, tags): filtered_outputs = {} repre_tags_low = [tag.lower() for tag in tags] - for filename_suffix, output_def in outputs.values(): + for filename_suffix, output_def in outputs.items(): valid = True output_filters = output_def.get("output_filter") if output_filters: From 0586fff9ab1bda66884cc7114ee36508ed3a955c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 22 Apr 2020 19:07:27 +0200 Subject: [PATCH 063/207] few more steps and added few comments --- pype/plugins/global/publish/extract_review.py | 108 +++++++++++------- 1 file changed, 66 insertions(+), 42 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 620de20fcb..87cb519485 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -111,13 +111,13 @@ class ExtractReview(pyblish.api.InstancePlugin): # Create copy of representation new_repre = copy.deepcopy(repre) - ext = output_def.get("ext") or "mov" - if ext.startswith("."): - ext = ext[1:] + output_ext = output_def.get("ext") or "mov" + if output_ext.startswith("."): + output_ext = output_ext[1:] additional_tags = output_def.get("tags") or [] # TODO new method? - # `self.new_repre_tags(new_repre, additional_tags)` + # `self.prepare_new_repre_tags(new_repre, additional_tags)` # Remove "delete" tag from new repre if there is if "delete" in new_repre["tags"]: new_repre["tags"].remove("delete") @@ -129,24 +129,28 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug( "New representation ext: \"{}\" | tags: `{}`".format( - ext, new_repre["tags"] + output_ext, new_repre["tags"] ) ) # Output is image file sequence witht frames # TODO change variable to `output_is_sequence` + # QUESTION Shall we do it in opposite? Expect that if output + # extension is image format and input is sequence or video + # format then do sequence and single frame only if tag is + # "single-frame" (or similar) # QUESTION Should we check for "sequence" only in additional # tags or in all tags of new representation is_sequence = ( "sequence" in additional_tags - and (ext in self.image_exts) + and (output_ext in self.image_exts) ) # no handles switch from profile tags no_handles = "no-handles" in additional_tags - # TODO Find better way how to find out if input is sequence - # Theoretically issues: + # TODO GLOBAL ISSUE - Find better way how to find out if input + # is sequence. Issues( in theory): # - there may be multiple files ant not be sequence # - remainders are not checked at all # - there can be more than one collection @@ -168,11 +172,14 @@ class ExtractReview(pyblish.api.InstancePlugin): filename = os.path.splitext(repre["files"])[0] # QUESTION This breaks Anatomy template system is it ok? - # How do we care about multiple outputs with same extension? + # QUESTION How do we care about multiple outputs with same + # extension? (Expect we don't...) + # - possible solution add "<{review_suffix}>" into templates + # but that may cause issues when clients remove that. if is_sequence: filename_base = filename + "_{0}".format(filename_suffix) repr_file = filename_base + ".%08d.{0}".format( - ext + output_ext ) new_repre["sequence_file"] = repr_file full_output_path = os.path.join( @@ -181,7 +188,7 @@ class ExtractReview(pyblish.api.InstancePlugin): else: repr_file = filename + "_{0}.{1}".format( - filename_suffix, ext + filename_suffix, output_ext ) full_output_path = os.path.join(staging_dir, repr_file) @@ -194,42 +201,59 @@ class ExtractReview(pyblish.api.InstancePlugin): if tag not in instance.data["families"]: instance.data["families"].append(tag) - # Get FFmpeg arguments from profile presets - output_ffmpeg_args = output_def.get("ffmpeg_args") or {} - output_ffmpeg_input = output_ffmpeg_args.get("input") or [] - output_ffmpeg_filters = output_ffmpeg_args.get("filters") or [] - output_ffmpeg_output = output_ffmpeg_args.get("output") or [] + ffmpeg_args = self._ffmpeg_arguments( + output_def, instance, instance_data + ) - ffmpeg_input_args = [] - ffmpeg_output_args = [] + def _ffmpeg_arguments(output_def, instance, repre, instance_data): + # TODO split into smaller methods and use these variable only there + fps = instance_data["fps"] + frame_start = instance_data["frame_start"] + frame_end = instance_data["frame_end"] + handle_start = instance_data["handle_start"] + handle_end = instance_data["handle_end"] + frame_start_handle = frame_start - handle_start, + frame_end_handle = frame_end + handle_end, + pixel_aspect = instance_data["pixel_aspect"] + resolution_width = instance_data["resolution_width"] + resolution_height = instance_data["resolution_height"] - # Override output file - ffmpeg_input_args.append("-y") - # Add input args from presets - ffmpeg_input_args.extend(output_ffmpeg_input) + # Get FFmpeg arguments from profile presets + output_ffmpeg_args = output_def.get("ffmpeg_args") or {} + output_ffmpeg_input = output_ffmpeg_args.get("input") or [] + output_ffmpeg_filters = output_ffmpeg_args.get("filters") or [] + output_ffmpeg_output = output_ffmpeg_args.get("output") or [] + ffmpeg_input_args = [] + ffmpeg_output_args = [] - if isinstance(repre["files"], list): - # QUESTION What is sence of this? - if frame_start_handle != repre.get( - "detectedStart", frame_start_handle - ): - frame_start_handle = repre.get("detectedStart") + # Override output file + ffmpeg_input_args.append("-y") + # Add input args from presets + ffmpeg_input_args.extend(output_ffmpeg_input) - # exclude handle if no handles defined - if no_handles: - frame_start_handle = frame_start - frame_end_handle = frame_end + if isinstance(repre["files"], list): + # QUESTION What is sence of this? + if frame_start_handle != repre.get( + "detectedStart", frame_start_handle + ): + frame_start_handle = repre.get("detectedStart") - ffmpeg_input_args.append( - "-start_number {0} -framerate {1}".format( - frame_start_handle, fps)) - else: - if no_handles: - start_sec = float(handle_start) / fps - ffmpeg_input_args.append("-ss {:0.2f}".format(start_sec)) - frame_start_handle = frame_start - frame_end_handle = frame_end + # exclude handle if no handles defined + if no_handles: + frame_start_handle = frame_start + frame_end_handle = frame_end + + ffmpeg_input_args.append( + "-start_number {0} -framerate {1}".format( + frame_start_handle, fps)) + else: + if no_handles: + # QUESTION why we are using seconds instead of frames? + start_sec = float(handle_start) / fps + ffmpeg_input_args.append("-ss {:0.2f}".format(start_sec)) + frame_start_handle = frame_start + frame_end_handle = frame_end def prepare_instance_data(self, instance): @@ -247,7 +271,7 @@ class ExtractReview(pyblish.api.InstancePlugin): ), "pixel_aspect": instance.data.get("pixelAspect", 1), "resolution_width": instance.data.get("resolutionWidth"), - "resolution_height": instance.data.get("resolutionHeight") + "resolution_height": instance.data.get("resolutionHeight"), } def main_family_from_instance(self, instance): From 9ed201c0cd1e8d278d38fa2602374966f43985c2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 23 Apr 2020 11:59:13 +0200 Subject: [PATCH 064/207] process is splitted more than already was --- pype/plugins/global/publish/extract_review.py | 475 +++++++++++++----- 1 file changed, 338 insertions(+), 137 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 87cb519485..887c506701 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -65,13 +65,17 @@ class ExtractReview(pyblish.api.InstancePlugin): return instance_families = self.families_from_instance(instance) - profile_outputs = self.filter_outputs_by_families( + _profile_outputs = self.filter_outputs_by_families( profile, instance_families ) - if not profile_outputs: + if not _profile_outputs: return - instance_data = None + # Store `filename_suffix` to save to save arguments + profile_outputs = [] + for filename_suffix, definition in _profile_outputs.items(): + definition["filename_suffix"] = filename_suffix + profile_outputs.append(definition) ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") @@ -97,24 +101,14 @@ class ExtractReview(pyblish.api.InstancePlugin): if not outputs: continue - staging_dir = repre["stagingDir"] - # Prepare instance data. # NOTE Till this point it is not required to have set most # of keys in instance data. So publishing won't crash if plugin # won't get here and instance miss required keys. - if instance_data is None: - instance_data = self.prepare_instance_data(instance) - - for filename_suffix, output_def in outputs.items(): - + for output_def in outputs: # Create copy of representation new_repre = copy.deepcopy(repre) - output_ext = output_def.get("ext") or "mov" - if output_ext.startswith("."): - output_ext = output_ext[1:] - additional_tags = output_def.get("tags") or [] # TODO new method? # `self.prepare_new_repre_tags(new_repre, additional_tags)` @@ -128,152 +122,359 @@ class ExtractReview(pyblish.api.InstancePlugin): new_repre["tags"].append(tag) self.log.debug( - "New representation ext: \"{}\" | tags: `{}`".format( - output_ext, new_repre["tags"] - ) + "New representation tags: `{}`".format(new_repre["tags"]) ) - # Output is image file sequence witht frames - # TODO change variable to `output_is_sequence` - # QUESTION Shall we do it in opposite? Expect that if output - # extension is image format and input is sequence or video - # format then do sequence and single frame only if tag is - # "single-frame" (or similar) - # QUESTION Should we check for "sequence" only in additional - # tags or in all tags of new representation - is_sequence = ( - "sequence" in additional_tags - and (output_ext in self.image_exts) - ) - - # no handles switch from profile tags - no_handles = "no-handles" in additional_tags - - # TODO GLOBAL ISSUE - Find better way how to find out if input - # is sequence. Issues( in theory): - # - there may be multiple files ant not be sequence - # - remainders are not checked at all - # - there can be more than one collection - if isinstance(repre["files"], (tuple, list)): - collections, remainder = clique.assemble(repre["files"]) - - full_input_path = os.path.join( - staging_dir, - collections[0].format("{head}{padding}{tail}") - ) - - filename = collections[0].format("{head}") - if filename.endswith("."): - filename = filename[:-1] - else: - full_input_path = os.path.join( - staging_dir, repre["files"] - ) - filename = os.path.splitext(repre["files"])[0] - - # QUESTION This breaks Anatomy template system is it ok? - # QUESTION How do we care about multiple outputs with same - # extension? (Expect we don't...) - # - possible solution add "<{review_suffix}>" into templates - # but that may cause issues when clients remove that. - if is_sequence: - filename_base = filename + "_{0}".format(filename_suffix) - repr_file = filename_base + ".%08d.{0}".format( - output_ext - ) - new_repre["sequence_file"] = repr_file - full_output_path = os.path.join( - staging_dir, filename_base, repr_file - ) - - else: - repr_file = filename + "_{0}.{1}".format( - filename_suffix, output_ext - ) - full_output_path = os.path.join(staging_dir, repr_file) - - self.log.info("Input path {}".format(full_input_path)) - self.log.info("Output path {}".format(full_output_path)) - - # QUESTION Why the hell we do this? + # QUESTION Why the hell we do this, adding tags to families? # add families for tag in additional_tags: if tag not in instance.data["families"]: instance.data["families"].append(tag) - ffmpeg_args = self._ffmpeg_arguments( - output_def, instance, instance_data - ) + ffmpeg_args = self._ffmpeg_arguments(output_def, instance) - def _ffmpeg_arguments(output_def, instance, repre, instance_data): - # TODO split into smaller methods and use these variable only there - fps = instance_data["fps"] - frame_start = instance_data["frame_start"] - frame_end = instance_data["frame_end"] - handle_start = instance_data["handle_start"] - handle_end = instance_data["handle_end"] - frame_start_handle = frame_start - handle_start, - frame_end_handle = frame_end + handle_end, - pixel_aspect = instance_data["pixel_aspect"] - resolution_width = instance_data["resolution_width"] - resolution_height = instance_data["resolution_height"] + def repre_has_sequence(self, repre): + # TODO GLOBAL ISSUE - Find better way how to find out if input + # is sequence. Issues( in theory): + # - there may be multiple files ant not be sequence + # - remainders are not checked at all + # - there can be more than one collection + return isinstance(repre["files"], (list, tuple)) + def _ffmpeg_arguments(self, output_def, instance, repre): + temp_data = self.prepare_temp_data(instance) + + # NOTE used different key for final frame start/end to not confuse + # those who don't know what + # - e.g. "frame_start_output" + # QUESTION should we use tags ONLY from output definition? + # - In that case `output_def.get("tags") or []` should replace + # `repre["tags"]`. + # Change output frames when output should be without handles + no_handles = "no-handles" in repre["tags"] + if no_handles: + temp_data["output_frame_start"] = temp_data["frame_start"] + temp_data["output_frame_end"] = temp_data["frame_end"] + + # TODO this may hold class which may be easier to work with # Get FFmpeg arguments from profile presets - output_ffmpeg_args = output_def.get("ffmpeg_args") or {} - output_ffmpeg_input = output_ffmpeg_args.get("input") or [] - output_ffmpeg_filters = output_ffmpeg_args.get("filters") or [] - output_ffmpeg_output = output_ffmpeg_args.get("output") or [] + out_def_ffmpeg_args = output_def.get("ffmpeg_args") or {} - ffmpeg_input_args = [] - ffmpeg_output_args = [] + ffmpeg_input_args = out_def_ffmpeg_args.get("input") or [] + ffmpeg_output_args = out_def_ffmpeg_args.get("output") or [] + ffmpeg_video_filters = out_def_ffmpeg_args.get("video_filters") or [] + ffmpeg_audio_filters = out_def_ffmpeg_args.get("audio_filters") or [] - # Override output file + # Add argument to override output file ffmpeg_input_args.append("-y") - # Add input args from presets - ffmpeg_input_args.extend(output_ffmpeg_input) - if isinstance(repre["files"], list): - # QUESTION What is sence of this? - if frame_start_handle != repre.get( - "detectedStart", frame_start_handle - ): - frame_start_handle = repre.get("detectedStart") + if no_handles: + # NOTE used `-frames:v` instead of `-t` + duration_frames = ( + temp_data["output_frame_end"] + - temp_data["output_frame_start"] + + 1 + ) + ffmpeg_output_args.append("-frames:v {}".format(duration_frames)) - # exclude handle if no handles defined - if no_handles: - frame_start_handle = frame_start - frame_end_handle = frame_end + if self.repre_has_sequence(repre): + # NOTE removed "detectedStart" key handling (NOT SET) + # Set start frame ffmpeg_input_args.append( - "-start_number {0} -framerate {1}".format( - frame_start_handle, fps)) - else: - if no_handles: - # QUESTION why we are using seconds instead of frames? - start_sec = float(handle_start) / fps - ffmpeg_input_args.append("-ss {:0.2f}".format(start_sec)) - frame_start_handle = frame_start - frame_end_handle = frame_end + "-start_number {}".format(temp_data["output_frame_start"]) + ) + # TODO add fps mapping `{fps: fraction}` + # - e.g.: { + # "25": "25/1", + # "24": "24/1", + # "23.976": "24000/1001" + # } + # Add framerate to input when input is sequence + ffmpeg_input_args.append( + "-framerate {}".format(temp_data["fps"]) + ) + + elif no_handles: + # QUESTION Shall we change this to use filter: + # `select="gte(n\,handle_start),setpts=PTS-STARTPTS` + # Pros: + # 1.) Python is not good at float operation + # 2.) FPS on instance may not be same as input's + start_sec = float(temp_data["handle_start"]) / temp_data["fps"] + ffmpeg_input_args.append("-ss {:0.2f}".format(start_sec)) + + full_input_path, full_output_path = self.input_output_paths( + repre, output_def + ) + ffmpeg_input_args.append("-i \"{}\"".format(full_input_path)) + + # Add audio arguments if there are any + audio_in_args, audio_filters, audio_out_args = self.audio_args( + instance, temp_data + ) + ffmpeg_input_args.extend(audio_in_args) + ffmpeg_audio_filters.extend(audio_filters) + ffmpeg_output_args.extend(audio_out_args) + + # In case audio is longer than video. + # QUESTION what if audio is shoter than video? + if "-shortest" not in ffmpeg_output_args: + ffmpeg_output_args.append("-shortest") + + ffmpeg_output_args.append("\"{}\"".format(full_output_path)) + + def prepare_temp_data(self, instance): + frame_start = instance.data["frameStart"] + handle_start = instance.data.get( + "handleStart", + instance.context.data["handleStart"] + ) + frame_end = instance.data["frameEnd"] + handle_end = instance.data.get( + "handleEnd", + instance.context.data["handleEnd"] + ) + + frame_start_handle = frame_start - handle_start + frame_end_handle = frame_end + handle_end - def prepare_instance_data(self, instance): return { "fps": float(instance.data["fps"]), - "frame_start": instance.data["frameStart"], - "frame_end": instance.data["frameEnd"], - "handle_start": instance.data.get( - "handleStart", - instance.context.data["handleStart"] - ), - "handle_end": instance.data.get( - "handleEnd", - instance.context.data["handleEnd"] - ), + "frame_start": frame_start, + "frame_end": frame_end, + "handle_start": handle_start, + "handle_end": handle_end, + "frame_start_handle": frame_start_handle, + "frame_end_handle": frame_end_handle, + "output_frame_start": frame_start_handle, + "output_frame_end": frame_end_handle, "pixel_aspect": instance.data.get("pixelAspect", 1), "resolution_width": instance.data.get("resolutionWidth"), "resolution_height": instance.data.get("resolutionHeight"), } + def input_output_paths(self, repre, output_def): + staging_dir = repre["stagingDir"] + + # TODO Define if extension should have dot or not + output_ext = output_def.get("ext") or "mov" + if output_ext.startswith("."): + output_ext = output_ext[1:] + + self.log.debug( + "New representation ext: `{}`".format(output_ext) + ) + + # Output is image file sequence witht frames + # QUESTION Shall we do it in opposite? Expect that if output + # extension is image format and input is sequence or video + # format then do sequence and single frame only if tag is + # "single-frame" (or similar) + # QUESTION should we use tags ONLY from output definition? + # - In that case `output_def.get("tags") or []` should replace + # `repre["tags"]`. + output_is_sequence = ( + "sequence" in repre["tags"] + and (output_ext in self.image_exts) + ) + + if self.repre_has_sequence(repre): + collections, remainder = clique.assemble(repre["files"]) + + full_input_path = os.path.join( + staging_dir, + collections[0].format("{head}{padding}{tail}") + ) + + filename = collections[0].format("{head}") + if filename.endswith("."): + filename = filename[:-1] + else: + full_input_path = os.path.join( + staging_dir, repre["files"] + ) + filename = os.path.splitext(repre["files"])[0] + + filename_suffix = output_def["filename_suffix"] + # QUESTION This breaks Anatomy template system is it ok? + # QUESTION How do we care about multiple outputs with same + # extension? (Expect we don't...) + # - possible solution add "<{review_suffix}>" into templates + # but that may cause issues when clients remove that (and it's + # ugly). + if output_is_sequence: + filename_base = "{}_{}".format( + filename, filename_suffix + ) + repr_file = "{}.%08d.{}".format( + filename_base, output_ext + ) + + repre["sequence_file"] = repr_file + full_output_path = os.path.join( + staging_dir, filename_base, repr_file + ) + + else: + repr_file = "{}_{}.{}".format( + filename, filename_suffix, output_ext + ) + full_output_path = os.path.join(staging_dir, repr_file) + + self.log.debug("Input path {}".format(full_input_path)) + self.log.debug("Output path {}".format(full_output_path)) + + return full_input_path, full_output_path + + def audio_args(self, instance, temp_data): + audio_in_args = [] + audio_filters = [] + audio_out_args = [] + audio_inputs = instance.data.get("audio") + if not audio_inputs: + return audio_in_args, audio_filters, audio_out_args + + for audio in audio_inputs: + # NOTE modified, always was expected "frameStartFtrack" which is + # STANGE?!!! + # TODO use different frame start! + offset_seconds = 0 + frame_start_ftrack = instance.data.get("frameStartFtrack") + if frame_start_ftrack is not None: + offset_frames = frame_start_ftrack - audio["offset"] + offset_seconds = offset_frames / temp_data["fps"] + + if offset_seconds > 0: + audio_in_args.append( + "-ss {}".format(offset_seconds) + ) + elif offset_seconds < 0: + audio_in_args.append( + "-itsoffset {}".format(abs(offset_seconds)) + ) + + audio_in_args.append("-i \"{}\"".format(audio["filename"])) + + # NOTE: These were changed from input to output arguments. + # NOTE: value in "-ac" was hardcoded to 2, changed to audio inputs len. + # Need to merge audio if there are more than 1 input. + if len(audio_inputs) > 1: + audio_out_args.append("-filter_complex amerge") + audio_out_args.append("-ac {}".format(len(audio_inputs))) + + return audio_in_args, audio_filters, audio_out_args + + def resolution_ratios(self, temp_data, output_def, repre): + output_width = output_def.get("width") + output_height = output_def.get("height") + output_pixel_aspect = output_def.get("aspect_ratio") + output_letterbox = output_def.get("letter_box") + + # defining image ratios + resolution_ratio = ( + (float(resolution_width) * pixel_aspect) / resolution_height + ) + delivery_ratio = float(self.to_width) / float(self.to_height) + self.log.debug("resolution_ratio: `{}`".format(resolution_ratio)) + self.log.debug("delivery_ratio: `{}`".format(delivery_ratio)) + + # shorten two decimals long float number for testing conditions + resolution_ratio_test = float("{:0.2f}".format(resolution_ratio)) + delivery_ratio_test = float("{:0.2f}".format(delivery_ratio)) + + # get scale factor + if resolution_ratio_test < delivery_ratio_test: + scale_factor = ( + float(self.to_width) / (resolution_width * pixel_aspect) + ) + else: + scale_factor = ( + float(self.to_height) / (resolution_height * pixel_aspect) + ) + + self.log.debug("__ scale_factor: `{}`".format(scale_factor)) + + filters = [] + # letter_box + if output_letterbox: + ffmpeg_width = self.to_width + ffmpeg_height = self.to_height + if "reformat" not in repre["tags"]: + output_letterbox /= pixel_aspect + if resolution_ratio_test != delivery_ratio_test: + ffmpeg_width = resolution_width + ffmpeg_height = int( + resolution_height * pixel_aspect) + else: + if resolution_ratio_test != delivery_ratio_test: + output_letterbox /= scale_factor + else: + output_letterbox /= pixel_aspect + + filters.append( + "scale={}x{}:flags=lanczos".format(ffmpeg_width, ffmpeg_height) + ) + # QUESTION shouldn't this contain aspect ration value instead of 1? + filters.append( + "setsar=1" + ) + filters.append(( + "drawbox=0:0:iw:round((ih-(iw*(1/{})))/2):t=fill:c=black" + ).format(output_letterbox)) + + filters.append(( + "drawbox=0:ih-round((ih-(iw*(1/{0})))/2)" + ":iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black" + ).format(output_letterbox)) + + self.log.debug("pixel_aspect: `{}`".format(pixel_aspect)) + self.log.debug("resolution_width: `{}`".format(resolution_width)) + self.log.debug("resolution_height: `{}`".format(resolution_height)) + + # scaling none square pixels and 1920 width + # QUESTION: again check only output tags or repre tags + # WARNING: Duplication of filters when letter_box is set (or not?) + if "reformat" in repre["tags"]: + if resolution_ratio_test < delivery_ratio_test: + self.log.debug("lower then delivery") + width_scale = int(self.to_width * scale_factor) + width_half_pad = int((self.to_width - width_scale) / 2) + height_scale = self.to_height + height_half_pad = 0 + else: + self.log.debug("heigher then delivery") + width_scale = self.to_width + width_half_pad = 0 + scale_factor = ( + float(self.to_width) + / (float(resolution_width) * pixel_aspect) + ) + self.log.debug( + "__ scale_factor: `{}`".format(scale_factor) + ) + height_scale = int(resolution_height * scale_factor) + height_half_pad = int((self.to_height - height_scale) / 2) + + self.log.debug("width_scale: `{}`".format(width_scale)) + self.log.debug("width_half_pad: `{}`".format(width_half_pad)) + self.log.debug("height_scale: `{}`".format(height_scale)) + self.log.debug("height_half_pad: `{}`".format(height_half_pad)) + + filters.append( + "scale={}x{}:flags=lanczos".format(width_scale, height_scale) + ) + filters.append( + "pad={}:{}:{}:{}:black".format( + self.to_width, self.to_height, + width_half_pad, + height_half_pad + ) + filters.append("setsar=1") + + return filters + def main_family_from_instance(self, instance): family = instance.data.get("family") if not family: @@ -517,9 +718,9 @@ class ExtractReview(pyblish.api.InstancePlugin): return filtered_outputs def filter_outputs_by_tags(self, outputs, tags): - filtered_outputs = {} + filtered_outputs = [] repre_tags_low = [tag.lower() for tag in tags] - for filename_suffix, output_def in outputs.items(): + for output_def in outputs: valid = True output_filters = output_def.get("output_filter") if output_filters: @@ -537,7 +738,7 @@ class ExtractReview(pyblish.api.InstancePlugin): continue if valid: - filtered_outputs[filename_suffix] = output_def + filtered_outputs.append(output_def) return filtered_outputs From cc153acb32f2771921cdc7d96c4320be65d23458 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 23 Apr 2020 15:39:25 +0200 Subject: [PATCH 065/207] all except reformatting should work --- pype/plugins/global/publish/extract_review.py | 316 +++++++++++++----- 1 file changed, 237 insertions(+), 79 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 887c506701..170193ff12 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -25,19 +25,15 @@ class ExtractReview(pyblish.api.InstancePlugin): order = pyblish.api.ExtractorOrder + 0.02 families = ["review"] hosts = ["nuke", "maya", "shell"] + + # Supported extensions image_exts = ["exr", "jpg", "jpeg", "png", "dpx"] video_exts = ["mov", "mp4"] - # image_exts = [".exr", ".jpg", ".jpeg", ".jpe", ".jif", ".jfif", ".jfi", ".png", ".dpx"] - # video_exts = [ - # ".webm", ".mkv", ".flv", ".flv", ".vob", ".ogv", ".ogg", ".drc", - # ".gif", ".gifv", ".mng", ".avi", ".MTS", ".M2TS", - # ".TS", ".mov", ".qt", ".wmv", ".yuv", ".rm", ".rmvb", - # ".asf", ".amv", ".mp4", ".m4p", ".mpg", ".mp2", ".mpeg", - # ".mpe", ".mpv", ".mpg", ".mpeg", ".m2v", ".m4v", ".svi", ".3gp", - # ".3g2", ".mxf", ".roq", ".nsv", ".flv", ".f4v", ".f4p", ".f4a", ".f4b" - # ] supported_exts = image_exts + video_exts + # Path to ffmpeg + path_to_ffmpeg = None + # Preset attributes profiles = None @@ -52,6 +48,16 @@ class ExtractReview(pyblish.api.InstancePlugin): if self.profiles is None: return self.legacy_process(instance) + # Run processing + self.main_process(instance) + + # Make sure cleanup happens and pop representations with "delete" tag. + for repre in tuple(instance.data["representations"]): + tags = repre.get("tags") or [] + if "delete" if tags: + instance.data["representations"].remove(repre) + + def main_process(self, instance): profile_filter_data = { "host": pyblish.api.registered_hosts()[-1].title(), "family": self.main_family_from_instance(instance), @@ -71,13 +77,14 @@ class ExtractReview(pyblish.api.InstancePlugin): if not _profile_outputs: return - # Store `filename_suffix` to save to save arguments + # Store `filename_suffix` to save arguments profile_outputs = [] for filename_suffix, definition in _profile_outputs.items(): definition["filename_suffix"] = filename_suffix profile_outputs.append(definition) - ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + if self.path_to_ffmpeg is None: + self.path_to_ffmpeg = pype.lib.get_ffmpeg_tool_path("ffmpeg") # Loop through representations for repre in tuple(instance.data["representations"]): @@ -131,9 +138,39 @@ class ExtractReview(pyblish.api.InstancePlugin): if tag not in instance.data["families"]: instance.data["families"].append(tag) - ffmpeg_args = self._ffmpeg_arguments(output_def, instance) + temp_data = self.prepare_temp_data(instance, repre, new_repre) - def repre_has_sequence(self, repre): + ffmpeg_args = self._ffmpeg_arguments( + output_def, instance, temp_data + ) + subprcs_cmd = " ".join(ffmpeg_args) + + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) + output = pype.api.subprocess(subprcs_cmd) + self.log.debug("Output: {}".format(output)) + + output_name = output_def["filename_suffix"] + if temp_data["without_handles"]: + output_name += "_noHandles" + + new_repre.update({ + "name": output_def["filename_suffix"], + "outputName": output_name, + "outputDef": output_def, + "frameStartFtrack": temp_data["output_frame_start"], + "frameEndFtrack": temp_data["output_frame_end"] + }) + + # Force to pop these key if are in new repre + new_repre.pop("preview", None) + new_repre.pop("thumbnail", None) + + # adding representation + self.log.debug("Adding: {}".format(new_repre)) + instance.data["representations"].append(new_repre) + + def source_is_sequence(self, repre): # TODO GLOBAL ISSUE - Find better way how to find out if input # is sequence. Issues( in theory): # - there may be multiple files ant not be sequence @@ -141,21 +178,7 @@ class ExtractReview(pyblish.api.InstancePlugin): # - there can be more than one collection return isinstance(repre["files"], (list, tuple)) - def _ffmpeg_arguments(self, output_def, instance, repre): - temp_data = self.prepare_temp_data(instance) - - # NOTE used different key for final frame start/end to not confuse - # those who don't know what - # - e.g. "frame_start_output" - # QUESTION should we use tags ONLY from output definition? - # - In that case `output_def.get("tags") or []` should replace - # `repre["tags"]`. - # Change output frames when output should be without handles - no_handles = "no-handles" in repre["tags"] - if no_handles: - temp_data["output_frame_start"] = temp_data["frame_start"] - temp_data["output_frame_end"] = temp_data["frame_end"] - + def _ffmpeg_arguments(self, output_def, instance, new_repre, temp_data): # TODO this may hold class which may be easier to work with # Get FFmpeg arguments from profile presets out_def_ffmpeg_args = output_def.get("ffmpeg_args") or {} @@ -168,8 +191,8 @@ class ExtractReview(pyblish.api.InstancePlugin): # Add argument to override output file ffmpeg_input_args.append("-y") - if no_handles: - # NOTE used `-frames:v` instead of `-t` + if temp_data["without_handles"]: + # NOTE used `-frames:v` instead of `-t` - should work the same way duration_frames = ( temp_data["output_frame_end"] - temp_data["output_frame_start"] @@ -177,7 +200,7 @@ class ExtractReview(pyblish.api.InstancePlugin): ) ffmpeg_output_args.append("-frames:v {}".format(duration_frames)) - if self.repre_has_sequence(repre): + if temp_data["source_is_sequence"]: # NOTE removed "detectedStart" key handling (NOT SET) # Set start frame @@ -196,7 +219,7 @@ class ExtractReview(pyblish.api.InstancePlugin): "-framerate {}".format(temp_data["fps"]) ) - elif no_handles: + elif temp_data["without_handles"]: # QUESTION Shall we change this to use filter: # `select="gte(n\,handle_start),setpts=PTS-STARTPTS` # Pros: @@ -206,7 +229,7 @@ class ExtractReview(pyblish.api.InstancePlugin): ffmpeg_input_args.append("-ss {:0.2f}".format(start_sec)) full_input_path, full_output_path = self.input_output_paths( - repre, output_def + new_repre, output_def, temp_data ) ffmpeg_input_args.append("-i \"{}\"".format(full_input_path)) @@ -218,14 +241,80 @@ class ExtractReview(pyblish.api.InstancePlugin): ffmpeg_audio_filters.extend(audio_filters) ffmpeg_output_args.extend(audio_out_args) - # In case audio is longer than video. + # In case audio is longer than video`. # QUESTION what if audio is shoter than video? if "-shortest" not in ffmpeg_output_args: ffmpeg_output_args.append("-shortest") + res_filters = self.resolution_ratios(temp_data, output_def, new_repre) + ffmpeg_video_filters.extend(res_filters) + + ffmpeg_input_args = self.split_ffmpeg_args(ffmpeg_input_args) + + lut_filters = self.lut_filters(new_repre, instance, ffmpeg_input_args) + ffmpeg_video_filters.extend(lut_filters) + + # WARNING This must be latest added item to output arguments. ffmpeg_output_args.append("\"{}\"".format(full_output_path)) - def prepare_temp_data(self, instance): + return self.ffmpeg_full_args( + ffmpeg_input_args, + ffmpeg_video_filters, + ffmpeg_audio_filters, + ffmpeg_output_args + ) + + def split_ffmpeg_args(self, in_args): + splitted_args = [] + for arg in in_args: + sub_args = arg.split(" -") + if len(sub_args) == 1: + if arg and arg not in splitted_args: + splitted_args.append(arg) + continue + + for idx, arg in enumerate(sub_args): + if idx != 0: + arg = "-" + arg + + if arg and arg not in splitted_args: + splitted_args.append(arg) + return splitted_args + + def ffmpeg_full_args( + self, input_args, video_filters, audio_filters, output_args + ): + output_args = self.split_ffmpeg_args(output_args) + + video_args_dentifiers = ["-vf", "-filter:v"] + audio_args_dentifiers = ["-af", "-filter:a"] + for arg in tuple(output_args): + for identifier in video_args_dentifiers: + if identifier in arg: + output_args.remove(arg) + arg = arg.replace(identifier, "").strip() + video_filters.append(arg) + + for identifier in audio_args_dentifiers: + if identifier in arg: + output_args.remove(arg) + arg = arg.replace(identifier, "").strip() + audio_filters.append(arg) + + all_args = [] + all_args.append(self.path_to_ffmpeg) + all_args.extend(input_args) + if video_filters: + all_args.append("-filter:v {}".format(",".join(video_filters))) + + if audio_filters: + all_args.append("-filter:a {}".format(",".join(audio_filters))) + + all_args.extend(output_args) + + return all_args + + def prepare_temp_data(self, instance, repre, new_repre): frame_start = instance.data["frameStart"] handle_start = instance.data.get( "handleStart", @@ -240,6 +329,21 @@ class ExtractReview(pyblish.api.InstancePlugin): frame_start_handle = frame_start - handle_start frame_end_handle = frame_end + handle_end + # NOTE used different key for final frame start/end to not confuse + # those who don't know what + # - e.g. "frame_start_output" + # QUESTION should we use tags ONLY from output definition? + # - In that case `output_def.get("tags") or []` should replace + # `repre["tags"]`. + # Change output frames when output should be without handles + without_handles = "no-handles" in new_repre["tags"] + if without_handles: + output_frame_start = frame_start + output_frame_end = frame_end + else: + output_frame_start = frame_start_handle + output_frame_end = frame_end_handle + return { "fps": float(instance.data["fps"]), "frame_start": frame_start, @@ -248,39 +352,21 @@ class ExtractReview(pyblish.api.InstancePlugin): "handle_end": handle_end, "frame_start_handle": frame_start_handle, "frame_end_handle": frame_end_handle, - "output_frame_start": frame_start_handle, - "output_frame_end": frame_end_handle, + "output_frame_start": output_frame_start, + "output_frame_end": output_frame_end, "pixel_aspect": instance.data.get("pixelAspect", 1), "resolution_width": instance.data.get("resolutionWidth"), "resolution_height": instance.data.get("resolutionHeight"), + "origin_repre": repre, + "source_is_sequence": self.source_is_sequence(repre), + "without_handles": without_handles } - def input_output_paths(self, repre, output_def): - staging_dir = repre["stagingDir"] + def input_output_paths(self, new_repre, output_def, temp_data): + staging_dir = new_repre["stagingDir"] + repre = temp_data["origin_repre"] - # TODO Define if extension should have dot or not - output_ext = output_def.get("ext") or "mov" - if output_ext.startswith("."): - output_ext = output_ext[1:] - - self.log.debug( - "New representation ext: `{}`".format(output_ext) - ) - - # Output is image file sequence witht frames - # QUESTION Shall we do it in opposite? Expect that if output - # extension is image format and input is sequence or video - # format then do sequence and single frame only if tag is - # "single-frame" (or similar) - # QUESTION should we use tags ONLY from output definition? - # - In that case `output_def.get("tags") or []` should replace - # `repre["tags"]`. - output_is_sequence = ( - "sequence" in repre["tags"] - and (output_ext in self.image_exts) - ) - - if self.repre_has_sequence(repre): + if temp_data["source_is_sequence"]: collections, remainder = clique.assemble(repre["files"]) full_input_path = os.path.join( @@ -298,21 +384,52 @@ class ExtractReview(pyblish.api.InstancePlugin): filename = os.path.splitext(repre["files"])[0] filename_suffix = output_def["filename_suffix"] + + output_ext = output_def.get("ext") + # Use source extension if definition do not specify it + if output_ext is None: + output_ext = os.path.splitext(full_input_path)[1] + + # TODO Define if extension should have dot or not + if output_ext.startswith("."): + output_ext = output_ext[1:] + + new_repre["ext"] = output_ext + + self.log.debug( + "New representation ext: `{}`".format(output_ext) + ) + + # Output is image file sequence witht frames + # QUESTION Shall we do it in opposite? Expect that if output + # extension is image format and input is sequence or video + # format then do sequence and single frame only if tag is + # "single-frame" (or similar) + # QUESTION should we use tags ONLY from output definition? + # - In that case `output_def.get("tags") or []` should replace + # `repre["tags"]`. + output_is_sequence = ( + "sequence" in new_repre["tags"] + and (output_ext in self.image_exts) + ) + # QUESTION This breaks Anatomy template system is it ok? # QUESTION How do we care about multiple outputs with same - # extension? (Expect we don't...) + # extension? (Expectings are: We don't...) # - possible solution add "<{review_suffix}>" into templates # but that may cause issues when clients remove that (and it's # ugly). if output_is_sequence: - filename_base = "{}_{}".format( - filename, filename_suffix - ) - repr_file = "{}.%08d.{}".format( - filename_base, output_ext - ) + filename_base = "{}_{}".format(filename, filename_suffix) + repr_file = "{}.%08d.{}".format(filename_base, output_ext) - repre["sequence_file"] = repr_file + new_repre_files = [] + frame_start = temp_data["output_frame_start"] + frame_end = temp_data["output_frame_end"] + for frame in range(frame_start, frame_end + 1): + new_repre_files.append(repr_file % frame) + + new_repre["sequence_file"] = repr_file full_output_path = os.path.join( staging_dir, filename_base, repr_file ) @@ -322,6 +439,17 @@ class ExtractReview(pyblish.api.InstancePlugin): filename, filename_suffix, output_ext ) full_output_path = os.path.join(staging_dir, repr_file) + new_repre_files = repr_file + + new_repre["files"] = new_repre_files + + staging_dir = os.path.normpath(os.path.dirname(full_output_path)) + if not os.path.exists(staging_dir): + self.log.debug("Creating dir: {}".format(staging_dir)) + os.makedirs(staging_dir) + + # Set stagingDir + new_repre["stagingDir"] = staging_dir self.log.debug("Input path {}".format(full_input_path)) self.log.debug("Output path {}".format(full_output_path)) @@ -338,7 +466,7 @@ class ExtractReview(pyblish.api.InstancePlugin): for audio in audio_inputs: # NOTE modified, always was expected "frameStartFtrack" which is - # STANGE?!!! + # STRANGE?!!! There should be different key, right? # TODO use different frame start! offset_seconds = 0 frame_start_ftrack = instance.data.get("frameStartFtrack") @@ -366,7 +494,11 @@ class ExtractReview(pyblish.api.InstancePlugin): return audio_in_args, audio_filters, audio_out_args - def resolution_ratios(self, temp_data, output_def, repre): + def resolution_ratios(self, temp_data, output_def, new_repre): + # TODO This is not implemented and requires reimplementation since + # self.to_width and self.to_height are not set. + + # TODO get width, height from source output_width = output_def.get("width") output_height = output_def.get("height") output_pixel_aspect = output_def.get("aspect_ratio") @@ -394,19 +526,18 @@ class ExtractReview(pyblish.api.InstancePlugin): float(self.to_height) / (resolution_height * pixel_aspect) ) - self.log.debug("__ scale_factor: `{}`".format(scale_factor)) + self.log.debug("scale_factor: `{}`".format(scale_factor)) filters = [] # letter_box if output_letterbox: ffmpeg_width = self.to_width ffmpeg_height = self.to_height - if "reformat" not in repre["tags"]: + if "reformat" not in new_repre["tags"]: output_letterbox /= pixel_aspect if resolution_ratio_test != delivery_ratio_test: ffmpeg_width = resolution_width - ffmpeg_height = int( - resolution_height * pixel_aspect) + ffmpeg_height = int(resolution_height * pixel_aspect) else: if resolution_ratio_test != delivery_ratio_test: output_letterbox /= scale_factor @@ -452,7 +583,7 @@ class ExtractReview(pyblish.api.InstancePlugin): / (float(resolution_width) * pixel_aspect) ) self.log.debug( - "__ scale_factor: `{}`".format(scale_factor) + "scale_factor: `{}`".format(scale_factor) ) height_scale = int(resolution_height * scale_factor) height_half_pad = int((self.to_height - height_scale) / 2) @@ -467,12 +598,40 @@ class ExtractReview(pyblish.api.InstancePlugin): ) filters.append( "pad={}:{}:{}:{}:black".format( - self.to_width, self.to_height, + self.to_width, + self.to_height, width_half_pad, height_half_pad + ) ) filters.append("setsar=1") + new_repre["resolutionHeight"] = resolution_height + new_repre["resolutionWidth"] = resolution_width + + return filters + + def lut_filters(self, new_repre, instance, input_args): + filters = [] + # baking lut file application + lut_path = instance.data.get("lutPath") + if not lut_path or "bake-lut" not in new_repre["tags"]: + return filters + + # Prepare path for ffmpeg argument + lut_path = lut_path.replace("\\", "/").replace(":", "\\:") + + # Remove gamma from input arguments + if "-gamma" in input_args: + input_args.remove("-gamme") + + # Prepare filters + filters.append("lut3d=file='{}'".format(lut_path)) + # QUESTION hardcoded colormatrix? + filters.append("colormatrix=bt601:bt709") + + self.log.info("Added Lut to ffmpeg command") + return filters def main_family_from_instance(self, instance): @@ -556,7 +715,6 @@ class ExtractReview(pyblish.api.InstancePlugin): matching_profiles = None highest_profile_points = -1 - profile_values = {} # Each profile get 1 point for each matching filter. Profile with most # points is returnd. For cases when more than one profile will match # are also stored ordered lists of matching values. From ed84cf293f9fc4fb8587672da707a44d92889e46 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 23 Apr 2020 15:41:11 +0200 Subject: [PATCH 066/207] fixed typo --- pype/plugins/global/publish/extract_review.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 170193ff12..e2814d8eaf 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -54,7 +54,7 @@ class ExtractReview(pyblish.api.InstancePlugin): # Make sure cleanup happens and pop representations with "delete" tag. for repre in tuple(instance.data["representations"]): tags = repre.get("tags") or [] - if "delete" if tags: + if "delete" in tags: instance.data["representations"].remove(repre) def main_process(self, instance): From 2e85ef792a470d8bb29538d1a2b706ab197b6806 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 24 Apr 2020 11:28:03 +0200 Subject: [PATCH 067/207] added ffrpobe streams getting --- pype/plugins/global/publish/extract_review.py | 24 +++++++++++++++---- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index e2814d8eaf..242da397b2 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -1,6 +1,8 @@ import os import re import copy +import json +import subprocess import pyblish.api import clique import pype.api @@ -32,7 +34,8 @@ class ExtractReview(pyblish.api.InstancePlugin): supported_exts = image_exts + video_exts # Path to ffmpeg - path_to_ffmpeg = None + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") # Preset attributes profiles = None @@ -83,9 +86,6 @@ class ExtractReview(pyblish.api.InstancePlugin): definition["filename_suffix"] = filename_suffix profile_outputs.append(definition) - if self.path_to_ffmpeg is None: - self.path_to_ffmpeg = pype.lib.get_ffmpeg_tool_path("ffmpeg") - # Loop through representations for repre in tuple(instance.data["representations"]): tags = repre.get("tags", []) @@ -302,7 +302,7 @@ class ExtractReview(pyblish.api.InstancePlugin): audio_filters.append(arg) all_args = [] - all_args.append(self.path_to_ffmpeg) + all_args.append(self.ffmpeg_path) all_args.extend(input_args) if video_filters: all_args.append("-filter:v {}".format(",".join(video_filters))) @@ -634,6 +634,20 @@ class ExtractReview(pyblish.api.InstancePlugin): return filters + def ffprobe_streams(self, path_to_file): + args = [ + self.ffprobe_path, + "-v quiet", + "-print_format json", + "-show_format", + "-show_streams" + "\"{}\"".format(path_to_file) + ] + command = " ".join(args) + popen = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + + return json.loads(popen.communicate()[0])["streams"][0] + def main_family_from_instance(self, instance): family = instance.data.get("family") if not family: From a8a2efa975218da0a0d90e434e0aeab1016399da Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 24 Apr 2020 15:28:33 +0200 Subject: [PATCH 068/207] extract should work now --- pype/plugins/global/publish/extract_review.py | 212 ++++++++++-------- 1 file changed, 116 insertions(+), 96 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 242da397b2..04c987f717 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -451,6 +451,10 @@ class ExtractReview(pyblish.api.InstancePlugin): # Set stagingDir new_repre["stagingDir"] = staging_dir + # Store paths to temp data + temp_data["full_input_path"] = full_input_path + temp_data["full_output_path"] = full_output_path + self.log.debug("Input path {}".format(full_input_path)) self.log.debug("Output path {}".format(full_output_path)) @@ -495,119 +499,135 @@ class ExtractReview(pyblish.api.InstancePlugin): return audio_in_args, audio_filters, audio_out_args def resolution_ratios(self, temp_data, output_def, new_repre): - # TODO This is not implemented and requires reimplementation since - # self.to_width and self.to_height are not set. - - # TODO get width, height from source - output_width = output_def.get("width") - output_height = output_def.get("height") - output_pixel_aspect = output_def.get("aspect_ratio") - output_letterbox = output_def.get("letter_box") - - # defining image ratios - resolution_ratio = ( - (float(resolution_width) * pixel_aspect) / resolution_height - ) - delivery_ratio = float(self.to_width) / float(self.to_height) - self.log.debug("resolution_ratio: `{}`".format(resolution_ratio)) - self.log.debug("delivery_ratio: `{}`".format(delivery_ratio)) - - # shorten two decimals long float number for testing conditions - resolution_ratio_test = float("{:0.2f}".format(resolution_ratio)) - delivery_ratio_test = float("{:0.2f}".format(delivery_ratio)) - - # get scale factor - if resolution_ratio_test < delivery_ratio_test: - scale_factor = ( - float(self.to_width) / (resolution_width * pixel_aspect) - ) - else: - scale_factor = ( - float(self.to_height) / (resolution_height * pixel_aspect) - ) - - self.log.debug("scale_factor: `{}`".format(scale_factor)) - filters = [] - # letter_box - if output_letterbox: - ffmpeg_width = self.to_width - ffmpeg_height = self.to_height - if "reformat" not in new_repre["tags"]: - output_letterbox /= pixel_aspect - if resolution_ratio_test != delivery_ratio_test: - ffmpeg_width = resolution_width - ffmpeg_height = int(resolution_height * pixel_aspect) - else: - if resolution_ratio_test != delivery_ratio_test: - output_letterbox /= scale_factor - else: - output_letterbox /= pixel_aspect - filters.append( - "scale={}x{}:flags=lanczos".format(ffmpeg_width, ffmpeg_height) - ) - # QUESTION shouldn't this contain aspect ration value instead of 1? - filters.append( - "setsar=1" - ) - filters.append(( - "drawbox=0:0:iw:round((ih-(iw*(1/{})))/2):t=fill:c=black" - ).format(output_letterbox)) + letter_box = output_def.get("letter_box") + # Skip processing if both conditions are not met + if "reformat" not in new_repre["tags"] and not letter_box: + return filters - filters.append(( - "drawbox=0:ih-round((ih-(iw*(1/{0})))/2)" - ":iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black" - ).format(output_letterbox)) + # Get instance data + pixel_aspect = temp_data["pixel_aspect"] + input_width = temp_data["resolution_width"] + input_height = temp_data["resolution_height"] + + # If instance miss resolution settings. + if input_width is None or input_height is None: + # Use input resolution + # QUESTION Should we skip instance data and use these values + # by default? + input_data = self.ffprobe_streams(temp_data["full_input_path"]) + input_width = input_data["width"] + input_height = input_data["height"] self.log.debug("pixel_aspect: `{}`".format(pixel_aspect)) - self.log.debug("resolution_width: `{}`".format(resolution_width)) - self.log.debug("resolution_height: `{}`".format(resolution_height)) + self.log.debug("input_width: `{}`".format(input_width)) + self.log.debug("resolution_height: `{}`".format(input_height)) + + # NOTE Setting only one of `width` or `heigth` is not allowed + output_width = output_def.get("width") + output_height = output_def.get("height") + # Use instance resolution if output definition has not set it. + if output_width is None or output_height is None: + output_width = input_width + output_height = input_height + + # defining image ratios + input_res_ratio = ( + (float(input_width) * pixel_aspect) / input_height + ) + output_res_ratio = float(output_width) / float(output_height) + self.log.debug("resolution_ratio: `{}`".format(input_res_ratio)) + self.log.debug("output_res_ratio: `{}`".format(output_res_ratio)) + + # Round ratios to 2 decimal places for comparing + input_res_ratio = round(input_res_ratio, 2) + output_res_ratio = round(output_res_ratio, 2) + + # get scale factor + scale_factor_by_width = ( + float(output_width) / (input_width * pixel_aspect) + ) + scale_factor_by_height = ( + float(output_height) / (input_height * pixel_aspect) + ) + + self.log.debug( + "scale_factor_by_with: `{}`".format(scale_factor_by_width) + ) + self.log.debug( + "scale_factor_by_height: `{}`".format(scale_factor_by_height) + ) + + # letter_box + letter_box = output_def.get("letter_box") + if letter_box: + ffmpeg_width = output_width + ffmpeg_height = output_height + if "reformat" in new_repre["tags"]: + if input_res_ratio == output_res_ratio: + letter_box /= pixel_aspect + elif input_res_ratio < output_res_ratio: + letter_box /= scale_factor_by_width + else: + letter_box /= scale_factor_by_height + else: + letter_box /= pixel_aspect + if input_res_ratio != output_res_ratio: + ffmpeg_width = input_width + ffmpeg_height = int(input_height * pixel_aspect) + + # QUESTION Is scale required when ffmpeg_width is same as + # output_width and ffmpeg_height as output_height + scale_filter = "scale={0}x{1}:flags=lanczos".format( + ffmpeg_width, ffmpeg_height + ) + + top_box = ( + "drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black" + ).format(letter_box) + + bottom_box = ( + "drawbox=0:ih-round((ih-(iw*(1/{0})))/2)" + ":iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black" + ).format(letter_box) + + # Add letter box filters + filters.extend([scale_filter, "setsar=1", top_box, bottom_box]) # scaling none square pixels and 1920 width - # QUESTION: again check only output tags or repre tags - # WARNING: Duplication of filters when letter_box is set (or not?) - if "reformat" in repre["tags"]: - if resolution_ratio_test < delivery_ratio_test: - self.log.debug("lower then delivery") - width_scale = int(self.to_width * scale_factor) - width_half_pad = int((self.to_width - width_scale) / 2) - height_scale = self.to_height + if "reformat" in new_repre["tags"]: + if input_res_ratio < output_res_ratio: + self.log.debug("lower then output") + width_scale = int(output_width * scale_factor_by_width) + width_half_pad = int((output_width - width_scale) / 2) + height_scale = output_height height_half_pad = 0 else: - self.log.debug("heigher then delivery") - width_scale = self.to_width + self.log.debug("heigher then output") + width_scale = output_width width_half_pad = 0 - scale_factor = ( - float(self.to_width) - / (float(resolution_width) * pixel_aspect) - ) - self.log.debug( - "scale_factor: `{}`".format(scale_factor) - ) - height_scale = int(resolution_height * scale_factor) - height_half_pad = int((self.to_height - height_scale) / 2) + height_scale = int(input_height * scale_factor_by_width) + height_half_pad = int((output_height - height_scale) / 2) self.log.debug("width_scale: `{}`".format(width_scale)) self.log.debug("width_half_pad: `{}`".format(width_half_pad)) self.log.debug("height_scale: `{}`".format(height_scale)) self.log.debug("height_half_pad: `{}`".format(height_half_pad)) - filters.append( - "scale={}x{}:flags=lanczos".format(width_scale, height_scale) - ) - filters.append( - "pad={}:{}:{}:{}:black".format( - self.to_width, - self.to_height, - width_half_pad, - height_half_pad - ) - ) - filters.append("setsar=1") + filters.extend([ + "scale={0}x{1}:flags=lanczos".format( + width_scale, height_scale + ), + "pad={0}:{1}:{2}:{3}:black".format( + output_width, output_height, + width_half_pad, height_half_pad + ), + "setsar=1" + ]) - new_repre["resolutionHeight"] = resolution_height - new_repre["resolutionWidth"] = resolution_width + new_repre["resolutionWidth"] = output_width + new_repre["resolutionHeight"] = output_height return filters From b011caa6086f865ca8e136b451e01934b473c107 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 24 Apr 2020 18:29:07 +0200 Subject: [PATCH 069/207] few final touches --- pype/plugins/global/publish/extract_review.py | 340 ++++++++++++------ 1 file changed, 225 insertions(+), 115 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 04c987f717..c77368bccf 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -19,8 +19,7 @@ class ExtractReview(pyblish.api.InstancePlugin): All new represetnations are created and encoded by ffmpeg following presets found in `pype-config/presets/plugins/global/ - publish.json:ExtractReview:outputs`. To change the file extension - filter values use preset's attributes `ext_filter` + publish.json:ExtractReview:outputs`. """ label = "Extract Review" @@ -33,7 +32,7 @@ class ExtractReview(pyblish.api.InstancePlugin): video_exts = ["mov", "mp4"] supported_exts = image_exts + video_exts - # Path to ffmpeg + # FFmpeg tools paths ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") @@ -71,13 +70,23 @@ class ExtractReview(pyblish.api.InstancePlugin): self.profiles, profile_filter_data ) if not profile: + self.log.info(( + "Skipped instance. None of profiles in presets are for" + " Host: \"{host}\" | Family: \"{family}\" | Task \"{task}\"" + ).format(**profile_filter_data)) return + self.log.debug("Matching profile: \"{}\"".format(json.dumps(profile))) + instance_families = self.families_from_instance(instance) _profile_outputs = self.filter_outputs_by_families( profile, instance_families ) if not _profile_outputs: + self.log.info(( + "Skipped instance. All output definitions from selected" + " profile does not match to instance families. \"{}\"" + ).format(str(instance_families))) return # Store `filename_suffix` to save arguments @@ -88,7 +97,7 @@ class ExtractReview(pyblish.api.InstancePlugin): # Loop through representations for repre in tuple(instance.data["representations"]): - tags = repre.get("tags", []) + tags = repre.get("tags") or [] if ( "review" not in tags or "multipartExr" in tags @@ -96,35 +105,42 @@ class ExtractReview(pyblish.api.InstancePlugin): ): continue - source_ext = repre["ext"] - if source_ext.startswith("."): - source_ext = source_ext[1:] + input_ext = repre["ext"] + if input_ext.startswith("."): + input_ext = input_ext[1:] - if source_ext not in self.supported_exts: + if input_ext not in self.supported_exts: + self.log.info( + "Representation has unsupported extension \"{}\"".format( + input_ext + ) + ) continue # Filter output definition by representation tags (optional) outputs = self.filter_outputs_by_tags(profile_outputs, tags) if not outputs: + self.log.info(( + "Skipped representation. All output definitions from" + " selected profile does not match to representation's" + " tags. \"{}\"" + ).format(str(tags))) continue - # Prepare instance data. - # NOTE Till this point it is not required to have set most - # of keys in instance data. So publishing won't crash if plugin - # won't get here and instance miss required keys. for output_def in outputs: + # Make sure output definition has "tags" key + if "tags" not in output_def: + output_def["tags"] = [] + # Create copy of representation new_repre = copy.deepcopy(repre) - additional_tags = output_def.get("tags") or [] - # TODO new method? - # `self.prepare_new_repre_tags(new_repre, additional_tags)` # Remove "delete" tag from new repre if there is if "delete" in new_repre["tags"]: new_repre["tags"].remove("delete") # Add additional tags from output definition to representation - for tag in additional_tags: + for tag in output_def["tags"]: if tag not in new_repre["tags"]: new_repre["tags"].append(tag) @@ -132,13 +148,13 @@ class ExtractReview(pyblish.api.InstancePlugin): "New representation tags: `{}`".format(new_repre["tags"]) ) - # QUESTION Why the hell we do this, adding tags to families? - # add families - for tag in additional_tags: - if tag not in instance.data["families"]: - instance.data["families"].append(tag) + # # QUESTION Why the hell we were adding tags to families? + # # add families + # for tag in output_def["tags"]: + # if tag not in instance.data["families"]: + # instance.data["families"].append(tag) - temp_data = self.prepare_temp_data(instance, repre, new_repre) + temp_data = self.prepare_temp_data(instance, repre, output_def) ffmpeg_args = self._ffmpeg_arguments( output_def, instance, temp_data @@ -170,7 +186,8 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("Adding: {}".format(new_repre)) instance.data["representations"].append(new_repre) - def source_is_sequence(self, repre): + def input_is_sequence(self, repre): + """Deduce from representation data if input is sequence.""" # TODO GLOBAL ISSUE - Find better way how to find out if input # is sequence. Issues( in theory): # - there may be multiple files ant not be sequence @@ -178,8 +195,82 @@ class ExtractReview(pyblish.api.InstancePlugin): # - there can be more than one collection return isinstance(repre["files"], (list, tuple)) + def prepare_temp_data(self, instance, repre, output_def): + """Prepare dictionary with values used across extractor's process. + + All data are collected from instance, context, origin representation + and output definition. + + There are few required keys in Instance data: "frameStart", "frameEnd" + and "fps". + + Args: + instance (Instance): Currently processed instance. + repre (dict): Representation from which new representation was + copied. + output_def (dict): Definition of output of this plugin. + + Returns: + dict: All data which are used across methods during process. + Their values should not change during process but new keys + with values may be added. + """ + + frame_start = instance.data["frameStart"] + handle_start = instance.data.get( + "handleStart", + instance.context.data["handleStart"] + ) + frame_end = instance.data["frameEnd"] + handle_end = instance.data.get( + "handleEnd", + instance.context.data["handleEnd"] + ) + + frame_start_handle = frame_start - handle_start + frame_end_handle = frame_end + handle_end + + # Change output frames when output should be without handles + without_handles = "no-handles" in output_def["tags"] + if without_handles: + output_frame_start = frame_start + output_frame_end = frame_end + else: + output_frame_start = frame_start_handle + output_frame_end = frame_end_handle + + return { + "fps": float(instance.data["fps"]), + "frame_start": frame_start, + "frame_end": frame_end, + "handle_start": handle_start, + "handle_end": handle_end, + "frame_start_handle": frame_start_handle, + "frame_end_handle": frame_end_handle, + "output_frame_start": output_frame_start, + "output_frame_end": output_frame_end, + "pixel_aspect": instance.data.get("pixelAspect", 1), + "resolution_width": instance.data.get("resolutionWidth"), + "resolution_height": instance.data.get("resolutionHeight"), + "origin_repre": repre, + "input_is_sequence": self.input_is_sequence(repre), + "without_handles": without_handles + } + def _ffmpeg_arguments(self, output_def, instance, new_repre, temp_data): - # TODO this may hold class which may be easier to work with + """Prepares ffmpeg arguments for expected extraction. + + Prepares input and output arguments based on output definition and + input files. + + Args: + output_def (dict): Currently processed output definition. + instance (Instance): Currently processed instance. + new_repre (dict): Reprensetation representing output of this + process. + temp_data (dict): Base data for successfull process. + """ + # Get FFmpeg arguments from profile presets out_def_ffmpeg_args = output_def.get("ffmpeg_args") or {} @@ -200,15 +291,13 @@ class ExtractReview(pyblish.api.InstancePlugin): ) ffmpeg_output_args.append("-frames:v {}".format(duration_frames)) - if temp_data["source_is_sequence"]: - # NOTE removed "detectedStart" key handling (NOT SET) - + if temp_data["input_is_sequence"]: # Set start frame ffmpeg_input_args.append( "-start_number {}".format(temp_data["output_frame_start"]) ) - # TODO add fps mapping `{fps: fraction}` + # TODO add fps mapping `{fps: fraction}` ? # - e.g.: { # "25": "25/1", # "24": "24/1", @@ -221,7 +310,7 @@ class ExtractReview(pyblish.api.InstancePlugin): elif temp_data["without_handles"]: # QUESTION Shall we change this to use filter: - # `select="gte(n\,handle_start),setpts=PTS-STARTPTS` + # `select="gte(n\,{handle_start}),setpts=PTS-STARTPTS` # Pros: # 1.) Python is not good at float operation # 2.) FPS on instance may not be same as input's @@ -241,12 +330,12 @@ class ExtractReview(pyblish.api.InstancePlugin): ffmpeg_audio_filters.extend(audio_filters) ffmpeg_output_args.extend(audio_out_args) - # In case audio is longer than video`. # QUESTION what if audio is shoter than video? + # In case audio is longer than video`. if "-shortest" not in ffmpeg_output_args: ffmpeg_output_args.append("-shortest") - res_filters = self.resolution_ratios(temp_data, output_def, new_repre) + res_filters = self.rescaling_filters(temp_data, output_def, new_repre) ffmpeg_video_filters.extend(res_filters) ffmpeg_input_args = self.split_ffmpeg_args(ffmpeg_input_args) @@ -254,7 +343,7 @@ class ExtractReview(pyblish.api.InstancePlugin): lut_filters = self.lut_filters(new_repre, instance, ffmpeg_input_args) ffmpeg_video_filters.extend(lut_filters) - # WARNING This must be latest added item to output arguments. + # NOTE This must be latest added item to output arguments. ffmpeg_output_args.append("\"{}\"".format(full_output_path)) return self.ffmpeg_full_args( @@ -265,6 +354,11 @@ class ExtractReview(pyblish.api.InstancePlugin): ) def split_ffmpeg_args(self, in_args): + """Makes sure all entered arguments are separated in individual items. + + Split each argument string with " -" to identify if string contains + one or more arguments. + """ splitted_args = [] for arg in in_args: sub_args = arg.split(" -") @@ -284,6 +378,22 @@ class ExtractReview(pyblish.api.InstancePlugin): def ffmpeg_full_args( self, input_args, video_filters, audio_filters, output_args ): + """Post processing of collected FFmpeg arguments. + + Just verify that output arguments does not contain video or audio + filters which may cause issues because of duplicated argument entry. + Filters found in output arguments are moved to list they belong to. + + Args: + input_args (list): All collected ffmpeg arguments with inputs. + video_filters (list): All collected video filters. + audio_filters (list): All collected audio filters. + output_args (list): All collected ffmpeg output arguments with + output filepath. + + Returns: + list: Containing all arguments ready to run in subprocess. + """ output_args = self.split_ffmpeg_args(output_args) video_args_dentifiers = ["-vf", "-filter:v"] @@ -314,59 +424,23 @@ class ExtractReview(pyblish.api.InstancePlugin): return all_args - def prepare_temp_data(self, instance, repre, new_repre): - frame_start = instance.data["frameStart"] - handle_start = instance.data.get( - "handleStart", - instance.context.data["handleStart"] - ) - frame_end = instance.data["frameEnd"] - handle_end = instance.data.get( - "handleEnd", - instance.context.data["handleEnd"] - ) - - frame_start_handle = frame_start - handle_start - frame_end_handle = frame_end + handle_end - - # NOTE used different key for final frame start/end to not confuse - # those who don't know what - # - e.g. "frame_start_output" - # QUESTION should we use tags ONLY from output definition? - # - In that case `output_def.get("tags") or []` should replace - # `repre["tags"]`. - # Change output frames when output should be without handles - without_handles = "no-handles" in new_repre["tags"] - if without_handles: - output_frame_start = frame_start - output_frame_end = frame_end - else: - output_frame_start = frame_start_handle - output_frame_end = frame_end_handle - - return { - "fps": float(instance.data["fps"]), - "frame_start": frame_start, - "frame_end": frame_end, - "handle_start": handle_start, - "handle_end": handle_end, - "frame_start_handle": frame_start_handle, - "frame_end_handle": frame_end_handle, - "output_frame_start": output_frame_start, - "output_frame_end": output_frame_end, - "pixel_aspect": instance.data.get("pixelAspect", 1), - "resolution_width": instance.data.get("resolutionWidth"), - "resolution_height": instance.data.get("resolutionHeight"), - "origin_repre": repre, - "source_is_sequence": self.source_is_sequence(repre), - "without_handles": without_handles - } - def input_output_paths(self, new_repre, output_def, temp_data): + """Deduce input nad output file paths based on entered data. + + Input may be sequence of images, video file or single image file and + same can be said about output, this method helps to find out what + their paths are. + + It is validated that output directory exist and creates if not. + + During process are set "files", "stagingDir", "ext" and + "sequence_file" (if output is sequence) keys to new representation. + """ + staging_dir = new_repre["stagingDir"] repre = temp_data["origin_repre"] - if temp_data["source_is_sequence"]: + if temp_data["input_is_sequence"]: collections, remainder = clique.assemble(repre["files"]) full_input_path = os.path.join( @@ -386,7 +460,7 @@ class ExtractReview(pyblish.api.InstancePlugin): filename_suffix = output_def["filename_suffix"] output_ext = output_def.get("ext") - # Use source extension if definition do not specify it + # Use input extension if output definition do not specify it if output_ext is None: output_ext = os.path.splitext(full_input_path)[1] @@ -394,38 +468,29 @@ class ExtractReview(pyblish.api.InstancePlugin): if output_ext.startswith("."): output_ext = output_ext[1:] + # Store extension to representation new_repre["ext"] = output_ext - self.log.debug( - "New representation ext: `{}`".format(output_ext) - ) + self.log.debug("New representation ext: `{}`".format(output_ext)) # Output is image file sequence witht frames - # QUESTION Shall we do it in opposite? Expect that if output - # extension is image format and input is sequence or video - # format then do sequence and single frame only if tag is - # "single-frame" (or similar) - # QUESTION should we use tags ONLY from output definition? - # - In that case `output_def.get("tags") or []` should replace - # `repre["tags"]`. output_is_sequence = ( - "sequence" in new_repre["tags"] - and (output_ext in self.image_exts) + (output_ext in self.image_exts) + and "sequence" in output_def["tags"] ) - # QUESTION This breaks Anatomy template system is it ok? - # QUESTION How do we care about multiple outputs with same - # extension? (Expectings are: We don't...) - # - possible solution add "<{review_suffix}>" into templates - # but that may cause issues when clients remove that (and it's - # ugly). if output_is_sequence: - filename_base = "{}_{}".format(filename, filename_suffix) - repr_file = "{}.%08d.{}".format(filename_base, output_ext) - new_repre_files = [] frame_start = temp_data["output_frame_start"] frame_end = temp_data["output_frame_end"] + + filename_base = "{}_{}".format(filename, filename_suffix) + # Temporary tempalte for frame filling. Example output: + # "basename.%04d.mov" when `frame_end` == 1001 + repr_file = "{}.%{:0>2}d.{}".format( + filename_base, len(str(frame_end)), output_ext + ) + for frame in range(frame_start, frame_end + 1): new_repre_files.append(repr_file % frame) @@ -441,14 +506,16 @@ class ExtractReview(pyblish.api.InstancePlugin): full_output_path = os.path.join(staging_dir, repr_file) new_repre_files = repr_file + # Store files to representation new_repre["files"] = new_repre_files + # Make sure stagingDire exists staging_dir = os.path.normpath(os.path.dirname(full_output_path)) if not os.path.exists(staging_dir): self.log.debug("Creating dir: {}".format(staging_dir)) os.makedirs(staging_dir) - # Set stagingDir + # Store stagingDir to representaion new_repre["stagingDir"] = staging_dir # Store paths to temp data @@ -461,6 +528,7 @@ class ExtractReview(pyblish.api.InstancePlugin): return full_input_path, full_output_path def audio_args(self, instance, temp_data): + """Prepares FFMpeg arguments for audio inputs.""" audio_in_args = [] audio_filters = [] audio_out_args = [] @@ -498,13 +566,18 @@ class ExtractReview(pyblish.api.InstancePlugin): return audio_in_args, audio_filters, audio_out_args - def resolution_ratios(self, temp_data, output_def, new_repre): + def rescaling_filters(self, temp_data, output_def, new_repre): + """Prepare vieo filters based on tags in new representation. + + It is possible to add letterboxes to output video or rescale to + different resolution. + + During this preparation "resolutionWidth" and "resolutionHeight" are + set to new representation. + """ filters = [] letter_box = output_def.get("letter_box") - # Skip processing if both conditions are not met - if "reformat" not in new_repre["tags"] and not letter_box: - return filters # Get instance data pixel_aspect = temp_data["pixel_aspect"] @@ -513,9 +586,9 @@ class ExtractReview(pyblish.api.InstancePlugin): # If instance miss resolution settings. if input_width is None or input_height is None: - # Use input resolution - # QUESTION Should we skip instance data and use these values + # QUESTION Shall we skip instance data and use these values # by default? + # Use input resolution input_data = self.ffprobe_streams(temp_data["full_input_path"]) input_width = input_data["width"] input_height = input_data["height"] @@ -524,6 +597,12 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("input_width: `{}`".format(input_width)) self.log.debug("resolution_height: `{}`".format(input_height)) + # Skip processing if both conditions are not met + if "reformat" not in new_repre["tags"] and not letter_box: + new_repre["resolutionWidth"] = input_width + new_repre["resolutionHeight"] = input_height + return filters + # NOTE Setting only one of `width` or `heigth` is not allowed output_width = output_def.get("width") output_height = output_def.get("height") @@ -579,12 +658,12 @@ class ExtractReview(pyblish.api.InstancePlugin): # QUESTION Is scale required when ffmpeg_width is same as # output_width and ffmpeg_height as output_height - scale_filter = "scale={0}x{1}:flags=lanczos".format( + scale_filter = "scale={}x{}:flags=lanczos".format( ffmpeg_width, ffmpeg_height ) top_box = ( - "drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black" + "drawbox=0:0:iw:round((ih-(iw*(1/{})))/2):t=fill:c=black" ).format(letter_box) bottom_box = ( @@ -616,10 +695,10 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("height_half_pad: `{}`".format(height_half_pad)) filters.extend([ - "scale={0}x{1}:flags=lanczos".format( + "scale={}x{}:flags=lanczos".format( width_scale, height_scale ), - "pad={0}:{1}:{2}:{3}:black".format( + "pad={}:{}:{}:{}:black".format( output_width, output_height, width_half_pad, height_half_pad ), @@ -632,6 +711,7 @@ class ExtractReview(pyblish.api.InstancePlugin): return filters def lut_filters(self, new_repre, instance, input_args): + """Add lut file to output ffmpeg filters.""" filters = [] # baking lut file application lut_path = instance.data.get("lutPath") @@ -650,11 +730,12 @@ class ExtractReview(pyblish.api.InstancePlugin): # QUESTION hardcoded colormatrix? filters.append("colormatrix=bt601:bt709") - self.log.info("Added Lut to ffmpeg command") + self.log.info("Added Lut to ffmpeg command.") return filters def ffprobe_streams(self, path_to_file): + """Load streams from entered filepath.""" args = [ self.ffprobe_path, "-v quiet", @@ -669,12 +750,14 @@ class ExtractReview(pyblish.api.InstancePlugin): return json.loads(popen.communicate()[0])["streams"][0] def main_family_from_instance(self, instance): + """Returns main family of entered instance.""" family = instance.data.get("family") if not family: family = instance.data["families"][0] return family def families_from_instance(self, instance): + """Returns all families of entered instance.""" families = [] family = instance.data.get("family") if family: @@ -686,6 +769,7 @@ class ExtractReview(pyblish.api.InstancePlugin): return families def compile_list_of_regexes(self, in_list): + """Convert strings in entered list to compiled regex objects.""" regexes = [] if not in_list: return regexes @@ -851,6 +935,10 @@ class ExtractReview(pyblish.api.InstancePlugin): return final_profile def families_filter_validation(self, families, output_families_filter): + """Determines if entered families intersect with families filters. + + All family values are lowered to avoid unexpected results. + """ if not output_families_filter: return True @@ -885,6 +973,17 @@ class ExtractReview(pyblish.api.InstancePlugin): return False def filter_outputs_by_families(self, profile, families): + """Filter outputs that are not supported for instance families. + + Output definitions without families filter are marked as valid. + + Args: + profile (dict): Profile from presets matching current context. + families (list): All families of current instance. + + Returns: + list: Containg all output definitions matching entered families. + """ outputs = profile.get("outputs") or [] if not outputs: return outputs @@ -910,6 +1009,17 @@ class ExtractReview(pyblish.api.InstancePlugin): return filtered_outputs def filter_outputs_by_tags(self, outputs, tags): + """Filter output definitions by entered representation tags. + + Output definitions without tags filter are marked as valid. + + Args: + outputs (list): Contain list of output definitions from presets. + tags (list): Tags of processed representation. + + Returns: + list: Containg all output definitions matching entered tags. + """ filtered_outputs = [] repre_tags_low = [tag.lower() for tag in tags] for output_def in outputs: From 673e531689ad32b1377c11740d038f6ac457d20a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 24 Apr 2020 19:03:01 +0200 Subject: [PATCH 070/207] added profile_exclusion --- pype/plugins/global/publish/extract_review.py | 102 +++++++++++------- 1 file changed, 62 insertions(+), 40 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index c77368bccf..fe8946114d 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -811,6 +811,67 @@ class ExtractReview(pyblish.api.InstancePlugin): break return output + def profile_exclusion(self, matching_profiles): + """Find out most matching profile byt host, task and family match. + + Profiles are selectivelly filtered. Each profile should have + "__value__" key with list of booleans. Each boolean represents + existence of filter for specific key (host, taks, family). + Profiles are looped in sequence. In each sequence are split into + true_list and false_list. For next sequence loop are used profiles in + true_list if there are any profiles else false_list is used. + + Filtering ends when only one profile left in true_list. Or when all + existence booleans loops passed, in that case first profile from left + profiles is returned. + + Args: + matching_profiles (list): Profiles with same values. + + Returns: + dict: Most matching profile. + """ + self.log.info( + "Search for first most matching profile in match order:" + " Host name -> Task name -> Family." + ) + # Filter all profiles with highest points value. First filter profiles + # with matching host if there are any then filter profiles by task + # name if there are any and lastly filter by family. Else use first in + # list. + idx = 0 + final_profile = None + while True: + profiles_true = [] + profiles_false = [] + for profile in matching_profiles: + value = profile["__value__"] + # Just use first profile when idx is greater than values. + if not idx < len(value): + final_profile = profile + break + + if value[idx]: + profiles_true.append(profile) + else: + profiles_false.append(profile) + + if final_profile is not None: + break + + if profiles_true: + matching_profiles = profiles_true + else: + matching_profiles = profiles_false + + if len(matching_profiles) == 1: + final_profile = matching_profiles[0] + break + idx += 1 + + final_profile.pop("__value__") + return final_profile + def find_matching_profile(self, profiles, filter_data): """ Filter profiles by Host name, Task name and main Family. @@ -893,46 +954,7 @@ class ExtractReview(pyblish.api.InstancePlugin): " Host \"{host}\" | Task: \"{task}\" | Family: \"{family}\"" ).format(**filter_data)) - # Filter all profiles with highest points value. First filter profiles - # with matching host if there are any then filter profiles by task - # name if there are any and lastly filter by family. Else use first in - # list. - idx = 0 - final_profile = None - while True: - profiles_true = [] - profiles_false = [] - for profile in matching_profiles: - value = profile["__value__"] - # Just use first profile when idx is greater than values. - if not idx < len(value): - final_profile = profile - break - - if value[idx]: - profiles_true.append(profile) - else: - profiles_false.append(profile) - - if final_profile is not None: - break - - if profiles_true: - matching_profiles = profiles_true - else: - matching_profiles = profiles_false - - if len(matching_profiles) == 1: - final_profile = matching_profiles[0] - break - idx += 1 - - final_profile.pop("__value__") - self.log.info( - "Using first most matching profile in match order:" - " Host name -> Task name -> Family." - ) - return final_profile + return self.profile_exclusion(matching_profiles) def families_filter_validation(self, families, output_families_filter): """Determines if entered families intersect with families filters. From c55373238bb5da99b9cfdb61b8f0e083af871830 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 24 Apr 2020 19:22:53 +0200 Subject: [PATCH 071/207] make sure always is accessible path to one input file --- pype/plugins/global/publish/extract_review.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index fe8946114d..deb4eb9ab6 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -441,7 +441,7 @@ class ExtractReview(pyblish.api.InstancePlugin): repre = temp_data["origin_repre"] if temp_data["input_is_sequence"]: - collections, remainder = clique.assemble(repre["files"]) + collections = clique.assemble(repre["files"])[0] full_input_path = os.path.join( staging_dir, @@ -451,12 +451,21 @@ class ExtractReview(pyblish.api.InstancePlugin): filename = collections[0].format("{head}") if filename.endswith("."): filename = filename[:-1] + + # Make sure to have full path to one input file + full_input_path_single_file = os.path.join( + staging_dir, repre["files"][0] + ) + else: full_input_path = os.path.join( staging_dir, repre["files"] ) filename = os.path.splitext(repre["files"])[0] + # Make sure to have full path to one input file + full_input_path_single_file = full_input_path + filename_suffix = output_def["filename_suffix"] output_ext = output_def.get("ext") @@ -520,6 +529,7 @@ class ExtractReview(pyblish.api.InstancePlugin): # Store paths to temp data temp_data["full_input_path"] = full_input_path + temp_data["full_input_path_single_file"] = full_input_path_single_file temp_data["full_output_path"] = full_output_path self.log.debug("Input path {}".format(full_input_path)) From f267476d33dc7aef32f1e39cf3063e017d227bf6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 24 Apr 2020 19:23:45 +0200 Subject: [PATCH 072/207] input resolution is not taken from instance data but from input source --- pype/plugins/global/publish/extract_review.py | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index deb4eb9ab6..32c8a10039 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -591,17 +591,12 @@ class ExtractReview(pyblish.api.InstancePlugin): # Get instance data pixel_aspect = temp_data["pixel_aspect"] - input_width = temp_data["resolution_width"] - input_height = temp_data["resolution_height"] - # If instance miss resolution settings. - if input_width is None or input_height is None: - # QUESTION Shall we skip instance data and use these values - # by default? - # Use input resolution - input_data = self.ffprobe_streams(temp_data["full_input_path"]) - input_width = input_data["width"] - input_height = input_data["height"] + # NOTE Skipped using instance's resolution + full_input_path_single_file = temp_data["full_input_path_single_file"] + input_data = self.ffprobe_streams(full_input_path_single_file) + input_width = input_data["width"] + input_height = input_data["height"] self.log.debug("pixel_aspect: `{}`".format(pixel_aspect)) self.log.debug("input_width: `{}`".format(input_width)) From fcc664f31a39422a82ba42d68741a6825b17824a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 24 Apr 2020 19:24:16 +0200 Subject: [PATCH 073/207] use instance resolution for output before using input's resolution --- pype/plugins/global/publish/extract_review.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 32c8a10039..55e97c17d5 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -612,6 +612,11 @@ class ExtractReview(pyblish.api.InstancePlugin): output_width = output_def.get("width") output_height = output_def.get("height") # Use instance resolution if output definition has not set it. + if output_width is None or output_height is None: + output_width = temp_data["resolution_width"] + output_height = temp_data["resolution_height"] + + # Use source's input resolution instance does not have set it. if output_width is None or output_height is None: output_width = input_width output_height = input_height From 446b91e56de79ba9451900785a0fd6bfa08cb724 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 24 Apr 2020 19:25:14 +0200 Subject: [PATCH 074/207] extract review slate match new extract review and count values per each representation not per instance --- .../global/publish/extract_review_slate.py | 87 ++++++++++--------- 1 file changed, 48 insertions(+), 39 deletions(-) diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index aaa67bde68..e94701a312 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -26,47 +26,55 @@ class ExtractReviewSlate(pype.api.Extractor): slate_path = inst_data.get("slateFrame") ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - # values are set in ExtractReview - to_width = inst_data["reviewToWidth"] - to_height = inst_data["reviewToHeight"] - - resolution_width = inst_data.get("resolutionWidth", to_width) - resolution_height = inst_data.get("resolutionHeight", to_height) pixel_aspect = inst_data.get("pixelAspect", 1) fps = inst_data.get("fps") - # defining image ratios - resolution_ratio = ((float(resolution_width) * pixel_aspect) / - resolution_height) - delivery_ratio = float(to_width) / float(to_height) - self.log.debug("__ resolution_ratio: `{}`".format(resolution_ratio)) - self.log.debug("__ delivery_ratio: `{}`".format(delivery_ratio)) - - # get scale factor - scale_factor = float(to_height) / ( - resolution_height * pixel_aspect) - - # shorten two decimals long float number for testing conditions - resolution_ratio_test = float( - "{:0.2f}".format(resolution_ratio)) - delivery_ratio_test = float( - "{:0.2f}".format(delivery_ratio)) - - if resolution_ratio_test < delivery_ratio_test: - scale_factor = float(to_width) / ( - resolution_width * pixel_aspect) - - self.log.debug("__ scale_factor: `{}`".format(scale_factor)) - - for i, repre in enumerate(inst_data["representations"]): - _remove_at_end = [] - self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre)) + for idx, repre in enumerate(inst_data["representations"]): + self.log.debug("__ i: `{}`, repre: `{}`".format(idx, repre)) p_tags = repre.get("tags", []) - if "slate-frame" not in p_tags: continue + # values are set in ExtractReview + to_width = repre["resolutionWidth"] + to_height = repre["resolutionHeight"] + + # QUESTION Should we use resolution from instance and not source's? + resolution_width = inst_data.get("resolutionWidth") + if resolution_width is None: + resolution_width = to_width + + resolution_height = inst_data.get("resolutionHeight") + if resolution_height is None: + resolution_height = to_height + + # defining image ratios + resolution_ratio = ( + (float(resolution_width) * pixel_aspect) / resolution_height + ) + delivery_ratio = float(to_width) / float(to_height) + self.log.debug("resolution_ratio: `{}`".format(resolution_ratio)) + self.log.debug("delivery_ratio: `{}`".format(delivery_ratio)) + + # get scale factor + scale_factor = float(to_height) / ( + resolution_height * pixel_aspect) + + # shorten two decimals long float number for testing conditions + resolution_ratio_test = float( + "{:0.2f}".format(resolution_ratio)) + delivery_ratio_test = float( + "{:0.2f}".format(delivery_ratio)) + + if resolution_ratio_test < delivery_ratio_test: + scale_factor = float(to_width) / ( + resolution_width * pixel_aspect) + + self.log.debug("__ scale_factor: `{}`".format(scale_factor)) + + _remove_at_end = [] + stagingdir = repre["stagingDir"] input_file = "{0}".format(repre["files"]) @@ -87,7 +95,7 @@ class ExtractReviewSlate(pype.api.Extractor): # overrides output file input_args.append("-y") # preset's input data - input_args.extend(repre["_profile"].get('input', [])) + input_args.extend(repre["outputDef"].get('input', [])) input_args.append("-loop 1 -i {}".format(slate_path)) input_args.extend([ "-r {}".format(fps), @@ -95,10 +103,11 @@ class ExtractReviewSlate(pype.api.Extractor): ) # output args - codec_args = repre["_profile"].get('codec', []) - output_args.extend(codec_args) # preset's output data - output_args.extend(repre["_profile"].get('output', [])) + output_args.extend(repre["outputDef"].get('output', [])) + + # Codecs are copied from source for whole input + output_args.append("-codec copy") # make sure colors are correct output_args.extend([ @@ -206,10 +215,10 @@ class ExtractReviewSlate(pype.api.Extractor): "name": repre["name"], "tags": [x for x in repre["tags"] if x != "delete"] } - inst_data["representations"][i].update(repre_update) + inst_data["representations"][idx].update(repre_update) self.log.debug( "_ representation {}: `{}`".format( - i, inst_data["representations"][i])) + idx, inst_data["representations"][idx])) # removing temp files for f in _remove_at_end: From 1c933741eaaa26710c5c6aa455d3d6146e5e9e18 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 13:47:59 +0200 Subject: [PATCH 075/207] burnin script can handle both new and old ExtractBurnin plugin processes --- pype/scripts/otio_burnin.py | 56 ++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 7c94006466..4c9e0fc4d7 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -335,22 +335,23 @@ def example(input_path, output_path): def burnins_from_data( - input_path, output_path, data, codec_data=None, overwrite=True + input_path, output_path, data, + codec_data=None, options=None, burnin_values=None, overwrite=True ): - ''' - This method adds burnins to video/image file based on presets setting. + """This method adds burnins to video/image file based on presets setting. + Extension of output MUST be same as input. (mov -> mov, avi -> avi,...) - :param input_path: full path to input file where burnins should be add - :type input_path: str - :param codec_data: all codec related arguments in list - :param codec_data: list - :param output_path: full path to output file where output will be rendered - :type output_path: str - :param data: data required for burnin settings (more info below) - :type data: dict - :param overwrite: output will be overriden if already exists, defaults to True - :type overwrite: bool + Args: + input_path (str): Full path to input file where burnins should be add. + output_path (str): Full path to output file where output will be + rendered. + data (dict): Data required for burnin settings (more info below). + codec_data (list): All codec related arguments in list. + options (dict): Options for burnins. + burnin_values (dict): Contain positioned values. + overwrite (bool): Output will be overriden if already exists, + True by default. Presets must be set separately. Should be dict with 2 keys: - "options" - sets look of burnins - colors, opacity,...(more info: ModifiedBurnins doc) @@ -391,11 +392,18 @@ def burnins_from_data( "frame_start_tc": 1, "shot": "sh0010" } - ''' - presets = config.get_presets().get('tools', {}).get('burnins', {}) - options_init = presets.get('options') + """ + # Make sure `codec_data` is list + if not codec_data: + codec_data = [] - burnin = ModifiedBurnins(input_path, options_init=options_init) + # Use legacy processing when options are not set + if options is None or burnin_values is None: + presets = config.get_presets().get("tools", {}).get("burnins", {}) + options = presets.get("options") + burnin_values = presets.get("burnins") or {} + + burnin = ModifiedBurnins(input_path, options_init=options) frame_start = data.get("frame_start") frame_end = data.get("frame_end") @@ -425,7 +433,7 @@ def burnins_from_data( if source_timecode is not None: data[SOURCE_TIMECODE_KEY[1:-1]] = SOURCE_TIMECODE_KEY - for align_text, value in presets.get('burnins', {}).items(): + for align_text, value in burnin_values.items(): if not value: continue @@ -511,11 +519,13 @@ def burnins_from_data( burnin.render(output_path, args=codec_args, overwrite=overwrite, **data) -if __name__ == '__main__': +if __name__ == "__main__": in_data = json.loads(sys.argv[-1]) burnins_from_data( - in_data['input'], - in_data['output'], - in_data['burnin_data'], - in_data['codec'] + in_data["input"], + in_data["output"], + in_data["burnin_data"], + codec_data=in_data.get("codec"), + options=in_data.get("optios"), + values=in_data.get("values") ) From 74f278d507fc54623a8108eb3db043a415b963ce Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 13:49:05 +0200 Subject: [PATCH 076/207] profile filter values are not prestored to dictionary --- pype/plugins/global/publish/extract_review.py | 36 +++++++++---------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 55e97c17d5..8d7aec1d2e 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -60,20 +60,18 @@ class ExtractReview(pyblish.api.InstancePlugin): instance.data["representations"].remove(repre) def main_process(self, instance): - profile_filter_data = { - "host": pyblish.api.registered_hosts()[-1].title(), - "family": self.main_family_from_instance(instance), - "task": os.environ["AVALON_TASK"] - } + host_name = pyblish.api.registered_hosts()[-1].title() + task_name = os.environ["AVALON_TASK"] + family = self.main_family_from_instance(instance) profile = self.find_matching_profile( - self.profiles, profile_filter_data + host_name, task_name, family ) if not profile: self.log.info(( "Skipped instance. None of profiles in presets are for" " Host: \"{host}\" | Family: \"{family}\" | Task \"{task}\"" - ).format(**profile_filter_data)) + ).format(host_name, family, task_name)) return self.log.debug("Matching profile: \"{}\"".format(json.dumps(profile))) @@ -882,7 +880,7 @@ class ExtractReview(pyblish.api.InstancePlugin): final_profile.pop("__value__") return final_profile - def find_matching_profile(self, profiles, filter_data): + def find_matching_profile(self, host_name, task_name, family): """ Filter profiles by Host name, Task name and main Family. Filtering keys are "hosts" (list), "tasks" (list), "families" (list). @@ -890,24 +888,24 @@ class ExtractReview(pyblish.api.InstancePlugin): Args: profiles (list): Profiles definition from presets. - filter_data (dict): Dictionary with data for filtering. - Required keys are "host" - Host name, "task" - Task name - and "family" - Main instance family. + host_name (str): Current running host name. + task_name (str): Current context task name. + family (str): Main family of current Instance. Returns: dict/None: Return most matching profile or None if none of profiles match at least one criteria. """ - host_name = filter_data["host"] - task_name = filter_data["task"] - family = filter_data["family"] matching_profiles = None + if not self.profiles: + return matching_profiles + highest_profile_points = -1 # Each profile get 1 point for each matching filter. Profile with most # points is returnd. For cases when more than one profile will match # are also stored ordered lists of matching values. - for profile in profiles: + for profile in self.profiles: profile_points = 0 profile_value = [] @@ -950,8 +948,8 @@ class ExtractReview(pyblish.api.InstancePlugin): if not matching_profiles: self.log.info(( "None of profiles match your setup." - " Host \"{host}\" | Task: \"{task}\" | Family: \"{family}\"" - ).format(**filter_data)) + " Host \"{}\" | Task: \"{}\" | Family: \"{}\"" + ).format(host_name, task_name, family)) return if len(matching_profiles) == 1: @@ -961,8 +959,8 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.warning(( "More than one profile match your setup." - " Host \"{host}\" | Task: \"{task}\" | Family: \"{family}\"" - ).format(**filter_data)) + " Host \"{}\" | Task: \"{}\" | Family: \"{}\"" + ).format(host_name, task_name, family)) return self.profile_exclusion(matching_profiles) From fc694cd516f0326b7c978f52211bb74d029caf9b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 13:49:42 +0200 Subject: [PATCH 077/207] "output_filter" for output definition in presets was renamed to "filter" --- pype/plugins/global/publish/extract_review.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 8d7aec1d2e..e7eb210b2d 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -999,7 +999,6 @@ class ExtractReview(pyblish.api.InstancePlugin): if valid: return True - return False def filter_outputs_by_families(self, profile, families): @@ -1024,7 +1023,7 @@ class ExtractReview(pyblish.api.InstancePlugin): filtered_outputs = {} for filename_suffix, output_def in outputs.items(): - output_filters = output_def.get("output_filter") + output_filters = output_def.get("filter") # When filters not set then skip filtering process if not output_filters: filtered_outputs[filename_suffix] = output_def @@ -1054,7 +1053,7 @@ class ExtractReview(pyblish.api.InstancePlugin): repre_tags_low = [tag.lower() for tag in tags] for output_def in outputs: valid = True - output_filters = output_def.get("output_filter") + output_filters = output_def.get("filter") if output_filters: # Check tag filters tag_filters = output_filters.get("tags") From 59ce9a5a6dc576826888b3dccce3eb4250499903 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 13:52:26 +0200 Subject: [PATCH 078/207] extract burnin updated to care about new burnin presets with backwards compatibility --- pype/plugins/global/publish/extract_burnin.py | 736 ++++++++++++++++++ 1 file changed, 736 insertions(+) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 7668eafd2a..693940accf 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -1,10 +1,13 @@ import os +import re import json import copy import pype.api import pyblish +StringType = type("") + class ExtractBurnin(pype.api.Extractor): """ @@ -21,10 +24,743 @@ class ExtractBurnin(pype.api.Extractor): hosts = ["nuke", "maya", "shell"] optional = True + positions = [ + "top_left", "top_centered", "top_right", + "bottom_right", "bottom_centered", "bottom_left" + ] + # Default options for burnins for cases that are not set in presets. + default_options = { + "opacity": 1, + "x_offset": 5, + "y_offset": 5, + "bg_padding": 5, + "bg_opacity": 0.5, + "font_size": 42 + } + + # Preset attributes + profiles = None + options = None + fields = None + def process(self, instance): + # QUESTION what is this for and should we raise an exception? if "representations" not in instance.data: raise RuntimeError("Burnin needs already created mov to work on.") + if self.profiles is None: + return self.legacy_process(instance) + self.main_process(instance) + + # Remove any representations tagged for deletion. + # QUESTION Is possible to have representation with "delete" tag? + for repre in tuple(instance.data["representations"]): + if "delete" in repre.get("tags", []): + self.log.debug("Removing representation: {}".format(repre)) + instance.data["representations"].remove(repre) + + self.log.debug(instance.data["representations"]) + + def main_process(self, instance): + host_name = pyblish.api.registered_hosts()[-1].title() + task_name = os.environ["AVALON_TASK"] + family = self.main_family_from_instance(instance) + + # Find profile most matching current host, task and instance family + profile = self.find_matching_profile(host_name, task_name, family) + if not profile: + self.log.info(( + "Skipped instance. None of profiles in presets are for" + " Host: \"{}\" | Family: \"{}\" | Task \"{}\"" + ).format(host_name, family, task_name)) + return + + # Pre-filter burnin definitions by instance families + burnin_defs = self.filter_burnins_by_families(profile, instance) + if not burnin_defs: + self.log.info(( + "Skipped instance. Burnin definitions are not set for profile" + " Host: \"{}\" | Family: \"{}\" | Task \"{}\" | Profile \"{}\"" + ).format(host_name, family, task_name, profile)) + return + + # Prepare burnin options + profile_options = copy.deepcopy(self.default_options) + for key, value in (self.options or {}).items(): + if value is not None: + profile_options[key] = value + + # Prepare global burnin values from presets + profile_burnins = {} + for key, value in (self.fields or {}).items(): + key_low = key.lower() + if key_low in self.positions: + if value is not None: + profile_burnins[key_low] = value + + # Prepare basic data for processing + _burnin_data, _temp_data = self.prepare_basic_data(instance) + + anatomy = instance.context.data["anatomy"] + scriptpath = self.burnin_script_path() + executable = self.python_executable_path() + + for idx, repre in tuple(instance.data["representations"].items()): + self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"])) + if not self.repres_is_valid(repre): + continue + + # Filter output definition by representation tags (optional) + repre_burnin_defs = self.filter_burnins_by_tags( + burnin_defs, repre["tags"] + ) + if not repre_burnin_defs: + self.log.info(( + "Skipped representation. All burnin definitions from" + " selected profile does not match to representation's" + " tags. \"{}\"" + ).format(str(repre["tags"]))) + continue + + # Create copy of `_burnin_data` and `_temp_data` for repre. + burnin_data = copy.deepcopy(_burnin_data) + temp_data = copy.deepcopy(_temp_data) + + # Prepare representation based data. + self.prepare_repre_data(instance, repre, burnin_data, temp_data) + + # Add anatomy keys to burnin_data. + filled_anatomy = anatomy.format_all(burnin_data) + burnin_data["anatomy"] = filled_anatomy.get_solved() + + files_to_delete = [] + for filename_suffix, burnin_def in repre_burnin_defs.items(): + new_repre = copy.deepcopy(repre) + + burnin_options = copy.deepcopy(profile_options) + burnin_values = copy.deepcopy(profile_burnins) + + # Options overrides + for key, value in (burnin_def.get("options") or {}).items(): + # Set or override value if is valid + if value is not None: + burnin_options[key] = value + + # Burnin values overrides + for key, value in burnin_def.items(): + key_low = key.lower() + if key_low in self.positions: + if value is not None: + # Set or override value if is valid + burnin_values[key_low] = value + + elif key_low in burnin_values: + # Pop key if value is set to None (null in json) + burnin_values.pop(key_low) + + # Remove "delete" tag from new representation + if "delete" in new_repre["tags"]: + new_repre["tags"].remove("delete") + + # Update outputName to be able have multiple outputs + # Join previous "outputName" with filename suffix + new_repre["outputName"] = "_".join( + [new_repre["outputName"], filename_suffix] + ) + + # Prepare paths and files for process. + self.input_output_paths(new_repre, temp_data, filename_suffix) + + # Data for burnin script + script_data = { + "input": temp_data["full_input_path"], + "output": temp_data["full_output_path"], + "burnin_data": burnin_data, + "options": burnin_options, + "values": burnin_values + } + + self.log.debug("script_data: {}".format(script_data)) + + # Dump data to string + dumped_script_data = json.dumps(script_data) + + # Prepare subprocess arguments + args = [executable, scriptpath, dumped_script_data] + self.log.debug("Executing: {}".format(args)) + + # Run burnin script + output = pype.api.subprocess(args) + self.log.debug("Output: {}".format(output)) + + for filepath in temp_data["full_input_paths"]: + filepath = filepath.replace("\\", "/") + if filepath not in files_to_delete: + files_to_delete.append(filepath) + + # Add new representation to instance + instance.data["representations"].append(new_repre) + + # Remove source representation + # NOTE we maybe can keep source representation if necessary + instance.data["representations"].remove(repre) + + # Delete input files + for filepath in files_to_delete: + if os.path.exists(filepath): + os.remove(filepath) + self.log.debug("Removed: \"{}\"".format(filepath)) + + def prepare_basic_data(self, instance): + """Pick data from instance for processing and for burnin strings. + + Args: + instance (Instance): Currently processed instance. + + Returns: + tuple: `(burnin_data, temp_data)` - `burnin_data` contain data for + filling burnin strings. `temp_data` are for repre pre-process + preparation. + """ + self.log.debug("Prepring basic data for burnins") + context = instance.context + + version = instance.data.get("version") + if version is None: + version = context.data.get("version") + + frame_start = instance.data.get("frameStart") + if frame_start is None: + self.log.warning( + "Key \"frameStart\" is not set. Setting to \"0\"." + ) + frame_start = 0 + frame_start = int(frame_start) + + frame_end = instance.data.get("frameEnd") + if frame_end is None: + self.log.warning( + "Key \"frameEnd\" is not set. Setting to \"1\"." + ) + frame_end = 1 + frame_end = int(frame_end) + + handles = instance.data.get("handles") + if handles is None: + handles = context.data.get("handles") + if handles is None: + handles = 0 + + handle_start = instance.data.get("handleStart") + if handle_start is None: + handle_start = context.data.get("handleStart") + if handle_start is None: + handle_start = handles + + handle_end = instance.data.get("handleEnd") + if handle_end is None: + handle_end = context.data.get("handleEnd") + if handle_end is None: + handle_end = handles + + frame_start_handle = frame_start - handle_start + frame_end_handle = frame_end + handle_end + + burnin_data = copy.deepcopy(instance.data["anatomyData"]) + + if "slate.farm" in instance.data["families"]: + frame_start_handle += 1 + + burnin_data.update({ + "version": int(version), + "comment": context.data.get("comment") or "" + }) + + intent_label = context.data.get("intent") + if intent_label and isinstance(intent_label, dict): + intent_label = intent_label.get("label") + + if intent_label: + burnin_data["intent"] = intent_label + + temp_data = { + "frame_start": frame_start, + "frame_end": frame_end, + "frame_start_handle": frame_start_handle, + "frame_end_handle": frame_end_handle + } + + self.log.debug("Basic burnin_data: {}".format(burnin_data)) + + return burnin_data, temp_data + + def repres_is_valid(self, repre): + """Validation if representaion should be processed. + + Args: + repre (dict): Representation which should be checked. + + Returns: + bool: False if can't be processed else True. + """ + + if "burnin" not in (repre.get("tags") or []): + self.log.info("Representation don't have \"burnin\" tag.") + return False + + # ffmpeg doesn't support multipart exrs + if "multipartExr" in repre["tags"]: + self.log.info("Representation contain \"multipartExr\" tag.") + return False + return True + + def filter_burnins_by_tags(self, burnin_defs, tags): + """Filter burnin definitions by entered representation tags. + + Burnin definitions without tags filter are marked as valid. + + Args: + outputs (list): Contain list of burnin definitions from presets. + tags (list): Tags of processed representation. + + Returns: + list: Containg all burnin definitions matching entered tags. + """ + filtered_outputs = [] + repre_tags_low = [tag.lower() for tag in tags] + for burnin_def in burnin_defs: + valid = True + output_filters = burnin_def.get("filter") + if output_filters: + # Check tag filters + tag_filters = output_filters.get("tags") + if tag_filters: + tag_filters_low = [tag.lower() for tag in tag_filters] + valid = False + for tag in repre_tags_low: + if tag in tag_filters_low: + valid = True + break + + if not valid: + continue + + if valid: + filtered_outputs.append(burnin_def) + + return filtered_outputs + + def input_output_paths(self, new_repre, temp_data, filename_suffix): + """Prepare input and output paths for representation. + + Store data to `temp_data` for keys "full_input_path" which is full path + to source files optionally with sequence formatting, + "full_output_path" full path to otput with optionally with sequence + formatting, "full_input_paths" list of all source files which will be + deleted when burnin script ends, "repre_files" list of output + filenames. + + Args: + new_repre (dict): Currently processed new representation. + temp_data (dict): Temp data of representation process. + filename_suffix (str): Filename suffix added to inputl filename. + + Returns: + None: This is processing method. + """ + is_sequence = "sequence" in new_repre["tags"] + if is_sequence: + input_filename = new_repre["sequence_file"] + else: + input_filename = new_repre["files"] + + filepart_start, ext = os.path.splitext(input_filename) + dir_path, basename = os.path.split(filepart_start) + + if is_sequence: + # NOTE modified to keep name when multiple dots are in name + basename_parts = basename.split(".") + frame_part = basename_parts.pop(-1) + + basename_start = ".".join(basename_parts) + filename_suffix + new_basename = ".".join(basename_start, frame_part) + output_filename = new_basename + ext + + else: + output_filename = basename + filename_suffix + ext + + if dir_path: + output_filename = os.path.join(dir_path, output_filename) + + stagingdir = new_repre["stagingDir"] + full_input_path = os.path.join( + os.path.normpath(stagingdir), input_filename + ).replace("\\", "/") + full_output_path = os.path.join( + os.path.normpath(stagingdir), output_filename + ).replace("\\", "/") + + temp_data["full_input_path"] = full_input_path + temp_data["full_output_path"] = full_output_path + + self.log.debug("full_input_path: {}".format(full_input_path)) + self.log.debug("full_output_path: {}".format(full_output_path)) + + # Prepare full paths to input files and filenames for reprensetation + full_input_paths = [] + if is_sequence: + repre_files = [] + for frame_index in range(1, temp_data["duration"] + 1): + repre_files.append(output_filename % frame_index) + full_input_paths.append(full_input_path % frame_index) + + else: + full_input_paths.append(full_input_path) + repre_files = output_filename + + temp_data["full_input_paths"] = full_input_paths + new_repre["repre_files"] = repre_files + + def prepare_repre_data(self, instance, repre, burnin_data, temp_data): + """Prepare data for representation. + + Args: + instance (Instance): Currently processed Instance. + repre (dict): Currently processed representation. + burnin_data (dict): Copy of basic burnin data based on instance + data. + temp_data (dict): Copy of basic temp data + """ + # Add representation name to burnin data + burnin_data["representation"] = repre["name"] + + # no handles switch from profile tags + if "no-handles" in repre["tags"]: + burnin_frame_start = temp_data["frame_start"] + burnin_frame_end = temp_data["frame_end"] + + else: + burnin_frame_start = temp_data["frame_start_handle"] + burnin_frame_end = temp_data["frame_end_handle"] + + burnin_duration = burnin_frame_end - burnin_frame_start + 1 + + burnin_data.update({ + "frame_start": burnin_frame_start, + "frame_end": burnin_frame_end, + "duration": burnin_duration, + }) + temp_data["duration"] = burnin_duration + + # Add values for slate frames + burnin_slate_frame_start = burnin_frame_start + + # Move frame start by 1 frame when slate is used. + if ( + "slate" in instance.data["families"] + and "slate-frame" in repre["tags"] + ): + burnin_slate_frame_start -= 1 + + self.log.debug("burnin_slate_frame_start: {}".format( + burnin_slate_frame_start + )) + + burnin_data.update({ + "slate_frame_start": burnin_slate_frame_start, + "slate_frame_end": burnin_frame_end, + "slate_duration": ( + burnin_frame_end - burnin_slate_frame_start + 1 + ) + }) + + def find_matching_profile(self, host_name, task_name, family): + """ Filter profiles by Host name, Task name and main Family. + + Filtering keys are "hosts" (list), "tasks" (list), "families" (list). + If key is not find or is empty than it's expected to match. + + Args: + profiles (list): Profiles definition from presets. + host_name (str): Current running host name. + task_name (str): Current context task name. + family (str): Main family of current Instance. + + Returns: + dict/None: Return most matching profile or None if none of profiles + match at least one criteria. + """ + + matching_profiles = None + highest_points = -1 + for profile in self.profiles or tuple(): + profile_points = 0 + profile_value = [] + + # Host filtering + host_names = profile.get("hosts") + match = self.validate_value_by_regexes(host_name, host_names) + if match == -1: + continue + profile_points += match + profile_value.append(bool(match)) + + # Task filtering + task_names = profile.get("tasks") + match = self.validate_value_by_regexes(task_name, task_names) + if match == -1: + continue + profile_points += match + profile_value.append(bool(match)) + + # Family filtering + families = profile.get("families") + match = self.validate_value_by_regexes(family, families) + if match == -1: + continue + profile_points += match + profile_value.append(bool(match)) + + if profile_points > highest_points: + matching_profiles = [] + highest_points = profile_points + + if profile_points == highest_points: + profile["__value__"] = profile_value + matching_profiles.append(profile) + + if not matching_profiles: + return + + if len(matching_profiles) == 1: + return matching_profiles[0] + + return self.profile_exclusion(profile) + + def profile_exclusion(self, matching_profiles): + """Find out most matching profile by host, task and family match. + + Profiles are selectivelly filtered. Each profile should have + "__value__" key with list of booleans. Each boolean represents + existence of filter for specific key (host, taks, family). + Profiles are looped in sequence. In each sequence are split into + true_list and false_list. For next sequence loop are used profiles in + true_list if there are any profiles else false_list is used. + + Filtering ends when only one profile left in true_list. Or when all + existence booleans loops passed, in that case first profile from left + profiles is returned. + + Args: + matching_profiles (list): Profiles with same values. + + Returns: + dict: Most matching profile. + """ + self.log.info( + "Search for first most matching profile in match order:" + " Host name -> Task name -> Family." + ) + # Filter all profiles with highest points value. First filter profiles + # with matching host if there are any then filter profiles by task + # name if there are any and lastly filter by family. Else use first in + # list. + idx = 0 + final_profile = None + while True: + profiles_true = [] + profiles_false = [] + for profile in matching_profiles: + value = profile["__value__"] + # Just use first profile when idx is greater than values. + if not idx < len(value): + final_profile = profile + break + + if value[idx]: + profiles_true.append(profile) + else: + profiles_false.append(profile) + + if final_profile is not None: + break + + if profiles_true: + matching_profiles = profiles_true + else: + matching_profiles = profiles_false + + if len(matching_profiles) == 1: + final_profile = matching_profiles[0] + break + idx += 1 + + final_profile.pop("__value__") + return final_profile + + def filter_burnins_by_families(self, profile, instance): + """Filter outputs that are not supported for instance families. + + Output definitions without families filter are marked as valid. + + Args: + profile (dict): Profile from presets matching current context. + families (list): All families of current instance. + + Returns: + list: Containg all output definitions matching entered families. + """ + filtered_burnin_defs = {} + + burnin_defs = profile.get("burnins") + if not burnin_defs: + return filtered_burnin_defs + + # Prepare families + families = self.families_from_instance(instance) + families = [family.lower() for family in families] + + for filename_suffix, burnin_def in burnin_defs.items(): + burnin_filter = burnin_def.get("filter") + # When filters not set then skip filtering process + if burnin_filter: + families_filters = burnin_filter.get("families") + if not self.families_filter_validation( + families, families_filters + ): + continue + + filtered_burnin_defs[filename_suffix] = burnin_def + return filtered_burnin_defs + + def families_filter_validation(self, families, output_families_filter): + """Determines if entered families intersect with families filters. + + All family values are lowered to avoid unexpected results. + """ + if not output_families_filter: + return True + + for family_filter in output_families_filter: + if not family_filter: + continue + + if not isinstance(family_filter, (list, tuple)): + if family_filter.lower() not in families: + continue + return True + + valid = True + for family in family_filter: + if family.lower() not in families: + valid = False + break + + if valid: + return True + return False + + def compile_list_of_regexes(self, in_list): + """Convert strings in entered list to compiled regex objects.""" + regexes = [] + if not in_list: + return regexes + + for item in in_list: + if not item: + continue + + if not isinstance(item, StringType): + self.log.warning(( + "Invalid type \"{}\" value \"{}\"." + " Expected . Skipping." + ).format(str(type(item)), str(item))) + continue + + regexes.append(re.compile(item)) + return regexes + + def validate_value_by_regexes(self, value, in_list): + """Validates in any regexe from list match entered value. + + Args: + in_list (list): List with regexes. + value (str): String where regexes is checked. + + Returns: + int: Returns `0` when list is not set or is empty. Returns `1` when + any regex match value and returns `-1` when none of regexes + match value entered. + """ + if not in_list: + return 0 + + output = -1 + regexes = self.compile_list_of_regexes(in_list) + for regex in regexes: + if re.match(regex, value): + output = 1 + break + return output + + def main_family_from_instance(self, instance): + """Returns main family of entered instance.""" + family = instance.data.get("family") + if not family: + family = instance.data["families"][0] + return family + + def families_from_instance(self, instance): + """Returns all families of entered instance.""" + families = [] + family = instance.data.get("family") + if family: + families.append(family) + + for family in (instance.data.get("families") or tuple()): + if family not in families: + families.append(family) + return families + + def burnin_script_path(self): + """Returns path to python script for burnin processing.""" + # TODO maybe convert to Plugin's attribute + # Get script path. + module_path = os.environ["PYPE_MODULE_ROOT"] + + # There can be multiple paths in PYPE_MODULE_ROOT, in which case + # we just take first one. + if os.pathsep in module_path: + module_path = module_path.split(os.pathsep)[0] + + scriptpath = os.path.normpath( + os.path.join( + module_path, + "pype", + "scripts", + "otio_burnin.py" + ) + ) + + self.log.debug("scriptpath: {}".format(scriptpath)) + + return scriptpath + + def python_executable_path(self): + """Returns path to Python 3 executable.""" + # TODO maybe convert to Plugin's attribute + # Get executable. + executable = os.getenv("PYPE_PYTHON_EXE") + + # There can be multiple paths in PYPE_PYTHON_EXE, in which case + # we just take first one. + if os.pathsep in executable: + executable = executable.split(os.pathsep)[0] + + self.log.debug("EXE: {}".format(executable)) + return executable + + def legacy_process(self, instance): context_data = instance.context.data version = instance.data.get( From fa97e76e329361dfc2ea41a9bc29c3ed961d2ccc Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 13:53:41 +0200 Subject: [PATCH 079/207] added logs to legacy processing methods --- pype/plugins/global/publish/extract_burnin.py | 2 ++ pype/plugins/global/publish/extract_review.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 693940accf..2f5d23a676 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -761,6 +761,8 @@ class ExtractBurnin(pype.api.Extractor): return executable def legacy_process(self, instance): + self.log.warning("Legacy burnin presets are used.") + context_data = instance.context.data version = instance.data.get( diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index e7eb210b2d..c5f86e7706 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -1074,6 +1074,8 @@ class ExtractReview(pyblish.api.InstancePlugin): return filtered_outputs def legacy_process(self, instance): + self.log.warning("Legacy review presets are used.") + output_profiles = self.outputs or {} inst_data = instance.data From 4c7bacdb72cab7dfa3995b890a8518387cc1ec3d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 15:11:12 +0200 Subject: [PATCH 080/207] Fixed few bugs in extract review --- pype/plugins/global/publish/extract_review.py | 58 ++++++++++++++----- 1 file changed, 42 insertions(+), 16 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index c5f86e7706..063a3cb25e 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -60,17 +60,21 @@ class ExtractReview(pyblish.api.InstancePlugin): instance.data["representations"].remove(repre) def main_process(self, instance): - host_name = pyblish.api.registered_hosts()[-1].title() + host_name = pyblish.api.registered_hosts()[-1] task_name = os.environ["AVALON_TASK"] family = self.main_family_from_instance(instance) + self.log.info("Host: \"{}\"".format(host_name)) + self.log.info("Task: \"{}\"".format(task_name)) + self.log.info("Family: \"{}\"".format(family)) + profile = self.find_matching_profile( host_name, task_name, family ) if not profile: self.log.info(( "Skipped instance. None of profiles in presets are for" - " Host: \"{host}\" | Family: \"{family}\" | Task \"{task}\"" + " Host: \"{}\" | Family: \"{}\" | Task \"{}\"" ).format(host_name, family, task_name)) return @@ -155,7 +159,7 @@ class ExtractReview(pyblish.api.InstancePlugin): temp_data = self.prepare_temp_data(instance, repre, output_def) ffmpeg_args = self._ffmpeg_arguments( - output_def, instance, temp_data + output_def, instance, new_repre, temp_data ) subprcs_cmd = " ".join(ffmpeg_args) @@ -181,7 +185,9 @@ class ExtractReview(pyblish.api.InstancePlugin): new_repre.pop("thumbnail", None) # adding representation - self.log.debug("Adding: {}".format(new_repre)) + self.log.debug( + "Adding new representation: {}".format(new_repre) + ) instance.data["representations"].append(new_repre) def input_is_sequence(self, repre): @@ -602,6 +608,7 @@ class ExtractReview(pyblish.api.InstancePlugin): # Skip processing if both conditions are not met if "reformat" not in new_repre["tags"] and not letter_box: + self.log.debug('Tag "reformat" and "letter_box" key are not set.') new_repre["resolutionWidth"] = input_width new_repre["resolutionHeight"] = input_height return filters @@ -616,9 +623,14 @@ class ExtractReview(pyblish.api.InstancePlugin): # Use source's input resolution instance does not have set it. if output_width is None or output_height is None: + self.log.debug("Using resolution from input.") output_width = input_width output_height = input_height + self.log.debug( + "Output resolution is {}x{}".format(output_width, output_height) + ) + # defining image ratios input_res_ratio = ( (float(input_width) * pixel_aspect) / input_height @@ -744,18 +756,24 @@ class ExtractReview(pyblish.api.InstancePlugin): def ffprobe_streams(self, path_to_file): """Load streams from entered filepath.""" + self.log.info( + "Getting information about input \"{}\".".format(path_to_file) + ) args = [ self.ffprobe_path, "-v quiet", "-print_format json", "-show_format", - "-show_streams" + "-show_streams", "\"{}\"".format(path_to_file) ] command = " ".join(args) + self.log.debug("FFprobe command: \"{}\"".format(command)) popen = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) - return json.loads(popen.communicate()[0])["streams"][0] + output = popen.communicate()[0] + self.log.debug("FFprobe output: {}".format(output)) + return json.loads(output)["streams"][0] def main_family_from_instance(self, instance): """Returns main family of entered instance.""" @@ -786,14 +804,14 @@ class ExtractReview(pyblish.api.InstancePlugin): if not item: continue - if not isinstance(item, StringType): + try: + regexes.append(re.compile(item)) + except TypeError: self.log.warning(( "Invalid type \"{}\" value \"{}\"." - " Expected . Skipping." + " Expected string based object. Skipping." ).format(str(type(item)), str(item))) - continue - regexes.append(re.compile(item)) return regexes def validate_value_by_regexes(self, value, in_list): @@ -913,6 +931,9 @@ class ExtractReview(pyblish.api.InstancePlugin): host_names = profile.get("hosts") match = self.validate_value_by_regexes(host_name, host_names) if match == -1: + self.log.debug( + "\"{}\" not found in {}".format(host_name, host_names) + ) continue profile_points += match profile_value.append(bool(match)) @@ -921,6 +942,9 @@ class ExtractReview(pyblish.api.InstancePlugin): task_names = profile.get("tasks") match = self.validate_value_by_regexes(task_name, task_names) if match == -1: + self.log.debug( + "\"{}\" not found in {}".format(task_name, task_names) + ) continue profile_points += match profile_value.append(bool(match)) @@ -929,6 +953,9 @@ class ExtractReview(pyblish.api.InstancePlugin): families = profile.get("families") match = self.validate_value_by_regexes(family, families) if match == -1: + self.log.debug( + "\"{}\" not found in {}".format(family, families) + ) continue profile_points += match profile_value.append(bool(match)) @@ -936,13 +963,12 @@ class ExtractReview(pyblish.api.InstancePlugin): if profile_points < highest_profile_points: continue - profile["__value__"] = profile_value - if profile_points == highest_profile_points: - matching_profiles.append(profile) - - elif profile_points > highest_profile_points: - highest_profile_points = profile_points + if profile_points > highest_profile_points: matching_profiles = [] + highest_profile_points = profile_points + + if profile_points == highest_profile_points: + profile["__value__"] = profile_value matching_profiles.append(profile) if not matching_profiles: From 89d0bc7a4761ea158ad51fdfe394b51c5b3fee76 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 15:18:01 +0200 Subject: [PATCH 081/207] removed unused StringType --- pype/plugins/global/publish/extract_burnin.py | 10 ++++------ pype/plugins/global/publish/extract_review.py | 2 -- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 2f5d23a676..ae53a16917 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -6,8 +6,6 @@ import copy import pype.api import pyblish -StringType = type("") - class ExtractBurnin(pype.api.Extractor): """ @@ -670,14 +668,14 @@ class ExtractBurnin(pype.api.Extractor): if not item: continue - if not isinstance(item, StringType): + try: + regexes.append(re.compile(item)) + except TypeError: self.log.warning(( "Invalid type \"{}\" value \"{}\"." - " Expected . Skipping." + " Expected string based object. Skipping." ).format(str(type(item)), str(item))) - continue - regexes.append(re.compile(item)) return regexes def validate_value_by_regexes(self, value, in_list): diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 063a3cb25e..4eeb526a19 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -8,8 +8,6 @@ import clique import pype.api import pype.lib -StringType = type("") - class ExtractReview(pyblish.api.InstancePlugin): """Extracting Review mov file for Ftrack From 0c42f3d6a29efe7c1d6e6a8ef434839c08670f65 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 15:18:20 +0200 Subject: [PATCH 082/207] few fixes in ExtractBurnin --- pype/plugins/global/publish/extract_burnin.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index ae53a16917..c729c24908 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -103,7 +103,7 @@ class ExtractBurnin(pype.api.Extractor): scriptpath = self.burnin_script_path() executable = self.python_executable_path() - for idx, repre in tuple(instance.data["representations"].items()): + for idx, repre in enumerate(tuple(instance.data["representations"])): self.log.debug("repre ({}): `{}`".format(idx + 1, repre["name"])) if not self.repres_is_valid(repre): continue @@ -324,9 +324,9 @@ class ExtractBurnin(pype.api.Extractor): Returns: list: Containg all burnin definitions matching entered tags. """ - filtered_outputs = [] + filtered_burnins = {} repre_tags_low = [tag.lower() for tag in tags] - for burnin_def in burnin_defs: + for filename_suffix, burnin_def in burnin_defs.items(): valid = True output_filters = burnin_def.get("filter") if output_filters: @@ -344,9 +344,9 @@ class ExtractBurnin(pype.api.Extractor): continue if valid: - filtered_outputs.append(burnin_def) + filtered_burnins[filename_suffix] = burnin_def - return filtered_outputs + return filtered_burnins def input_output_paths(self, new_repre, temp_data, filename_suffix): """Prepare input and output paths for representation. From 5ec1e510d4cce51a56819e8361384b4344ea102c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 15:59:21 +0200 Subject: [PATCH 083/207] fixed few bugs in extract burnin --- pype/plugins/global/publish/extract_burnin.py | 27 ++++++++++++------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index c729c24908..83b00bc574 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -60,7 +60,8 @@ class ExtractBurnin(pype.api.Extractor): self.log.debug(instance.data["representations"]) def main_process(self, instance): - host_name = pyblish.api.registered_hosts()[-1].title() + # TODO get these data from context + host_name = pyblish.api.registered_hosts()[-1] task_name = os.environ["AVALON_TASK"] family = self.main_family_from_instance(instance) @@ -160,11 +161,11 @@ class ExtractBurnin(pype.api.Extractor): if "delete" in new_repre["tags"]: new_repre["tags"].remove("delete") - # Update outputName to be able have multiple outputs + # Update name and outputName to be able have multiple outputs # Join previous "outputName" with filename suffix - new_repre["outputName"] = "_".join( - [new_repre["outputName"], filename_suffix] - ) + new_name = "_".join([new_repre["outputName"], filename_suffix]) + new_repre["name"] = new_name + new_repre["outputName"] = new_name # Prepare paths and files for process. self.input_output_paths(new_repre, temp_data, filename_suffix) @@ -288,7 +289,9 @@ class ExtractBurnin(pype.api.Extractor): "frame_end_handle": frame_end_handle } - self.log.debug("Basic burnin_data: {}".format(burnin_data)) + self.log.debug( + "Basic burnin_data: {}".format(json.dumps(burnin_data, indent=4)) + ) return burnin_data, temp_data @@ -303,12 +306,16 @@ class ExtractBurnin(pype.api.Extractor): """ if "burnin" not in (repre.get("tags") or []): - self.log.info("Representation don't have \"burnin\" tag.") + self.log.info(( + "Representation \"{}\" don't have \"burnin\" tag. Skipped." + ).format(repre["name"])) return False # ffmpeg doesn't support multipart exrs if "multipartExr" in repre["tags"]: - self.log.info("Representation contain \"multipartExr\" tag.") + self.log.info(( + "Representation \"{}\" contain \"multipartExr\" tag. Skipped." + ).format(repre["name"])) return False return True @@ -417,7 +424,7 @@ class ExtractBurnin(pype.api.Extractor): repre_files = output_filename temp_data["full_input_paths"] = full_input_paths - new_repre["repre_files"] = repre_files + new_repre["files"] = repre_files def prepare_repre_data(self, instance, repre, burnin_data, temp_data): """Prepare data for representation. @@ -755,7 +762,7 @@ class ExtractBurnin(pype.api.Extractor): if os.pathsep in executable: executable = executable.split(os.pathsep)[0] - self.log.debug("EXE: {}".format(executable)) + self.log.debug("executable: {}".format(executable)) return executable def legacy_process(self, instance): From 0fb7032e73f9cf3c45fd7cb6c0f30b6757166612 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 16:05:41 +0200 Subject: [PATCH 084/207] better script data log --- pype/plugins/global/publish/extract_burnin.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 83b00bc574..9db8023cbd 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -179,7 +179,9 @@ class ExtractBurnin(pype.api.Extractor): "values": burnin_values } - self.log.debug("script_data: {}".format(script_data)) + self.log.debug( + "script_data: {}".format(json.dumps(script_data, indent=4)) + ) # Dump data to string dumped_script_data = json.dumps(script_data) From 498e9ab6e5dea13764b3ea2e2200b281c54d7988 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 16:10:38 +0200 Subject: [PATCH 085/207] fixed arguments in brunin script --- pype/scripts/otio_burnin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 4c9e0fc4d7..dc0e5fcbf6 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -526,6 +526,6 @@ if __name__ == "__main__": in_data["output"], in_data["burnin_data"], codec_data=in_data.get("codec"), - options=in_data.get("optios"), - values=in_data.get("values") + options=in_data.get("options"), + burnin_values=in_data.get("values") ) From a5efeb616ccf087e44d8980919d5b9812150b835 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 16:36:36 +0200 Subject: [PATCH 086/207] fixed codec copy in burnin script --- pype/scripts/otio_burnin.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index dc0e5fcbf6..d1291184ae 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -393,9 +393,6 @@ def burnins_from_data( "shot": "sh0010" } """ - # Make sure `codec_data` is list - if not codec_data: - codec_data = [] # Use legacy processing when options are not set if options is None or burnin_values is None: @@ -512,11 +509,14 @@ def burnins_from_data( text = value.format(**data) burnin.add_text(text, align, frame_start, frame_end) - codec_args = "" if codec_data: - codec_args = " ".join(codec_data) + # Use codec definition from method arguments + burnin_args = " ".join(codec_data) + else: + # Else use copy of source codecs for both audio and video + burnin_args = "-codec copy" - burnin.render(output_path, args=codec_args, overwrite=overwrite, **data) + burnin.render(output_path, args=burnin_args, overwrite=overwrite, **data) if __name__ == "__main__": From 69d0b8dd767b079da3de0e558e88d681779190e5 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 18:47:20 +0200 Subject: [PATCH 087/207] added quotation marks where paths are used --- pype/scripts/otio_burnin.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index d1291184ae..a75df25255 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -20,7 +20,7 @@ FFMPEG = ( ).format(ffmpeg_path) FFPROBE = ( - '{} -v quiet -print_format json -show_format -show_streams %(source)s' + '{} -v quiet -print_format json -show_format -show_streams "%(source)s"' ).format(ffprobe_path) DRAWTEXT = ( @@ -55,7 +55,7 @@ def _streams(source): def get_fps(str_value): if str_value == "0/0": - print("Source has \"r_frame_rate\" value set to \"0/0\".") + log.warning("Source has \"r_frame_rate\" value set to \"0/0\".") return "Unknown" items = str_value.split("/") @@ -266,7 +266,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): :returns: completed command :rtype: str """ - output = output or '' + output = '"{}"'.format(output or '') if overwrite: output = '-y {}'.format(output) From e1e3326dca2b2424f2e8990839abff2a819a7202 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 18:49:01 +0200 Subject: [PATCH 088/207] using codec from source since -codec copy can't be used --- pype/scripts/otio_burnin.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index a75df25255..1a9f3e3605 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -509,14 +509,25 @@ def burnins_from_data( text = value.format(**data) burnin.add_text(text, align, frame_start, frame_end) + ffmpeg_args = [] if codec_data: # Use codec definition from method arguments - burnin_args = " ".join(codec_data) - else: - # Else use copy of source codecs for both audio and video - burnin_args = "-codec copy" + ffmpeg_args = codec_data - burnin.render(output_path, args=burnin_args, overwrite=overwrite, **data) + else: + codec_name = burnin._streams[0].get("codec_name") + log.info("codec_name: {}".format(codec_name)) + if codec_name: + ffmpeg_args.append("-codec:v {}".format(codec_name)) + + pix_fmt = burnin._streams[0].get("pix_fmt") + if pix_fmt: + ffmpeg_args.append("-pix_fmt {}".format(pix_fmt)) + + ffmpeg_args_str = " ".join(ffmpeg_args) + burnin.render( + output_path, args=ffmpeg_args_str, overwrite=overwrite, **data + ) if __name__ == "__main__": From 4ed64d9c8a1dbca0064cca0a70ff982d0815e5df Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 18:51:36 +0200 Subject: [PATCH 089/207] fixed logs in burnin script --- pype/scripts/otio_burnin.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 1a9f3e3605..47e1811283 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -300,7 +300,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): args=args, overwrite=overwrite ) - # print(command) + # log.info(command) proc = subprocess.Popen(command, shell=True) proc.communicate() @@ -516,7 +516,6 @@ def burnins_from_data( else: codec_name = burnin._streams[0].get("codec_name") - log.info("codec_name: {}".format(codec_name)) if codec_name: ffmpeg_args.append("-codec:v {}".format(codec_name)) From 29b2196825fdc7a6ee5cd005842d3ee37263e8af Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 19:04:12 +0200 Subject: [PATCH 090/207] better handling of images and sequences --- pype/plugins/global/publish/extract_review.py | 68 +++++++++++-------- 1 file changed, 40 insertions(+), 28 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 4eeb526a19..47ab1c482b 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -284,14 +284,8 @@ class ExtractReview(pyblish.api.InstancePlugin): # Add argument to override output file ffmpeg_input_args.append("-y") - if temp_data["without_handles"]: - # NOTE used `-frames:v` instead of `-t` - should work the same way - duration_frames = ( - temp_data["output_frame_end"] - - temp_data["output_frame_start"] - + 1 - ) - ffmpeg_output_args.append("-frames:v {}".format(duration_frames)) + # Prepare input and output filepaths + self.input_output_paths(new_repre, output_def, temp_data) if temp_data["input_is_sequence"]: # Set start frame @@ -319,23 +313,37 @@ class ExtractReview(pyblish.api.InstancePlugin): start_sec = float(temp_data["handle_start"]) / temp_data["fps"] ffmpeg_input_args.append("-ss {:0.2f}".format(start_sec)) - full_input_path, full_output_path = self.input_output_paths( - new_repre, output_def, temp_data - ) - ffmpeg_input_args.append("-i \"{}\"".format(full_input_path)) + # Set output frames len to 1 when ouput is single image + if ( + temp_data["output_ext_is_image"] + and not temp_data["output_is_sequence"] + ): + output_frames_len = 1 - # Add audio arguments if there are any - audio_in_args, audio_filters, audio_out_args = self.audio_args( - instance, temp_data - ) - ffmpeg_input_args.extend(audio_in_args) - ffmpeg_audio_filters.extend(audio_filters) - ffmpeg_output_args.extend(audio_out_args) + else: + output_frames_len = ( + temp_data["output_frame_end"] + - temp_data["output_frame_start"] + + 1 + ) - # QUESTION what if audio is shoter than video? - # In case audio is longer than video`. - if "-shortest" not in ffmpeg_output_args: - ffmpeg_output_args.append("-shortest") + # NOTE used `-frames` instead of `-t` - should work the same way + # NOTE this also replaced `-shortest` argument + ffmpeg_output_args.append("-frames {}".format(output_frames_len)) + + # Add video/image input path + ffmpeg_input_args.append( + "-i \"{}\"".format(temp_data["full_input_path"]) + ) + + # Add audio arguments if there are any. Skipped when output are images. + if not temp_data["output_ext_is_image"]: + audio_in_args, audio_filters, audio_out_args = self.audio_args( + instance, temp_data + ) + ffmpeg_input_args.extend(audio_in_args) + ffmpeg_audio_filters.extend(audio_filters) + ffmpeg_output_args.extend(audio_out_args) res_filters = self.rescaling_filters(temp_data, output_def, new_repre) ffmpeg_video_filters.extend(res_filters) @@ -346,7 +354,9 @@ class ExtractReview(pyblish.api.InstancePlugin): ffmpeg_video_filters.extend(lut_filters) # NOTE This must be latest added item to output arguments. - ffmpeg_output_args.append("\"{}\"".format(full_output_path)) + ffmpeg_output_args.append( + "\"{}\"".format(temp_data["full_output_path"]) + ) return self.ffmpeg_full_args( ffmpeg_input_args, @@ -485,11 +495,11 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("New representation ext: `{}`".format(output_ext)) # Output is image file sequence witht frames + output_ext_is_image = output_ext in self.image_exts output_is_sequence = ( - (output_ext in self.image_exts) + output_ext_is_image and "sequence" in output_def["tags"] ) - if output_is_sequence: new_repre_files = [] frame_start = temp_data["output_frame_start"] @@ -534,11 +544,13 @@ class ExtractReview(pyblish.api.InstancePlugin): temp_data["full_input_path_single_file"] = full_input_path_single_file temp_data["full_output_path"] = full_output_path + # Store information about output + temp_data["output_ext_is_image"] = output_ext_is_image + temp_data["output_is_sequence"] = output_is_sequence + self.log.debug("Input path {}".format(full_input_path)) self.log.debug("Output path {}".format(full_output_path)) - return full_input_path, full_output_path - def audio_args(self, instance, temp_data): """Prepares FFMpeg arguments for audio inputs.""" audio_in_args = [] From bd854fc7e177419036e5924039e43bd5ceed36db Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 19:04:43 +0200 Subject: [PATCH 091/207] extract review has better check for sequence input --- pype/plugins/global/publish/extract_burnin.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 9db8023cbd..b79cb15a69 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -375,7 +375,11 @@ class ExtractBurnin(pype.api.Extractor): Returns: None: This is processing method. """ - is_sequence = "sequence" in new_repre["tags"] + # TODO we should find better way to know if input is sequence + is_sequence = ( + "sequence" in new_repre["tags"] + and isinstance(new_repre["files"], (tuple, list)) + ) if is_sequence: input_filename = new_repre["sequence_file"] else: From 7b9f29f5bf18fdbf1e7c20a09903b228ebb05cec Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 19:17:03 +0200 Subject: [PATCH 092/207] make sure output frames are integers --- pype/plugins/global/publish/extract_review.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 47ab1c482b..7549b6818a 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -249,8 +249,8 @@ class ExtractReview(pyblish.api.InstancePlugin): "handle_end": handle_end, "frame_start_handle": frame_start_handle, "frame_end_handle": frame_end_handle, - "output_frame_start": output_frame_start, - "output_frame_end": output_frame_end, + "output_frame_start": int(output_frame_start), + "output_frame_end": int(output_frame_end), "pixel_aspect": instance.data.get("pixelAspect", 1), "resolution_width": instance.data.get("resolutionWidth"), "resolution_height": instance.data.get("resolutionHeight"), From 54c3374977db7798d8d9813577e6a17e5b538e13 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Apr 2020 19:17:16 +0200 Subject: [PATCH 093/207] fix join --- pype/plugins/global/publish/extract_burnin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index b79cb15a69..b2f858ee2f 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -394,7 +394,7 @@ class ExtractBurnin(pype.api.Extractor): frame_part = basename_parts.pop(-1) basename_start = ".".join(basename_parts) + filename_suffix - new_basename = ".".join(basename_start, frame_part) + new_basename = ".".join((basename_start, frame_part)) output_filename = new_basename + ext else: From 0ec66b337b8392efcfaf3edd30796549b9dbd839 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 29 Apr 2020 17:27:53 +0200 Subject: [PATCH 094/207] fix codec usage in extract slate review --- .../global/publish/extract_review_slate.py | 35 ++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index e94701a312..928e3fdc40 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -107,7 +107,9 @@ class ExtractReviewSlate(pype.api.Extractor): output_args.extend(repre["outputDef"].get('output', [])) # Codecs are copied from source for whole input - output_args.append("-codec copy") + codec_args = self.codec_args(repre) + self.log.debug("Codec arguments: {}".format(codec_args)) + output_args.extend(codec_args) # make sure colors are correct output_args.extend([ @@ -269,3 +271,34 @@ class ExtractReviewSlate(pype.api.Extractor): vf_back = "-vf " + ",".join(vf_fixed) return vf_back + + def codec_args(self, repre): + """Detect possible codec arguments from representation.""" + codec_args = [] + + # Get one filename of representation files + filename = repre["files"] + # If files is list then pick first filename in list + if isinstance(filename, (tuple, list)): + filename = filename[0] + # Get full path to the file + full_input_path = os.path.join(repre["stagingDir"], filename) + + try: + # Get information about input file via ffprobe tool + streams = pype.lib.ffprobe_streams(full_input_path) + except Exception: + self.log.warning( + "Could not get codec data from input.", + exc_info=True + ) + return codec_args + + codec_name = streams[0].get("codec_name") + if codec_name: + codec_args.append("-codec:v {}".format(codec_name)) + + pix_fmt = streams[0].get("pix_fmt") + if pix_fmt: + codec_args.append("-pix_fmt {}".format(pix_fmt)) + return codec_args From 19c436f8f2ef9552c3928c4f5ff5fbb5f097ea5e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 29 Apr 2020 17:28:50 +0200 Subject: [PATCH 095/207] ffprobe_streams moved to pype.lib --- pype/lib.py | 22 ++++++++++++++ pype/plugins/global/publish/extract_review.py | 29 ++----------------- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/pype/lib.py b/pype/lib.py index d3ccbc8589..7f88a130d3 100644 --- a/pype/lib.py +++ b/pype/lib.py @@ -1327,3 +1327,25 @@ class BuildWorkfile: ) return output + + +def ffprobe_streams(path_to_file): + """Load streams from entered filepath via ffprobe.""" + log.info( + "Getting information about input \"{}\".".format(path_to_file) + ) + args = [ + get_ffmpeg_tool_path("ffprobe"), + "-v quiet", + "-print_format json", + "-show_format", + "-show_streams", + "\"{}\"".format(path_to_file) + ] + command = " ".join(args) + log.debug("FFprobe command: \"{}\"".format(command)) + popen = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + + popen_output = popen.communicate()[0] + log.debug("FFprobe output: {}".format(popen_output)) + return json.loads(popen_output)["streams"] diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 7549b6818a..56a9c870b1 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -2,7 +2,6 @@ import os import re import copy import json -import subprocess import pyblish.api import clique import pype.api @@ -32,7 +31,6 @@ class ExtractReview(pyblish.api.InstancePlugin): # FFmpeg tools paths ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") # Preset attributes profiles = None @@ -83,7 +81,7 @@ class ExtractReview(pyblish.api.InstancePlugin): profile, instance_families ) if not _profile_outputs: - self.log.info(( + self.log.warning(( "Skipped instance. All output definitions from selected" " profile does not match to instance families. \"{}\"" ).format(str(instance_families))) @@ -608,7 +606,7 @@ class ExtractReview(pyblish.api.InstancePlugin): # NOTE Skipped using instance's resolution full_input_path_single_file = temp_data["full_input_path_single_file"] - input_data = self.ffprobe_streams(full_input_path_single_file) + input_data = pype.lib.ffprobe_streams(full_input_path_single_file)[0] input_width = input_data["width"] input_height = input_data["height"] @@ -764,27 +762,6 @@ class ExtractReview(pyblish.api.InstancePlugin): return filters - def ffprobe_streams(self, path_to_file): - """Load streams from entered filepath.""" - self.log.info( - "Getting information about input \"{}\".".format(path_to_file) - ) - args = [ - self.ffprobe_path, - "-v quiet", - "-print_format json", - "-show_format", - "-show_streams", - "\"{}\"".format(path_to_file) - ] - command = " ".join(args) - self.log.debug("FFprobe command: \"{}\"".format(command)) - popen = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) - - output = popen.communicate()[0] - self.log.debug("FFprobe output: {}".format(output)) - return json.loads(output)["streams"][0] - def main_family_from_instance(self, instance): """Returns main family of entered instance.""" family = instance.data.get("family") @@ -982,7 +959,7 @@ class ExtractReview(pyblish.api.InstancePlugin): matching_profiles.append(profile) if not matching_profiles: - self.log.info(( + self.log.warning(( "None of profiles match your setup." " Host \"{}\" | Task: \"{}\" | Family: \"{}\"" ).format(host_name, task_name, family)) From c9dd40de22c94e80413b69aea9b6b7451f93ff29 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 29 Apr 2020 17:30:34 +0200 Subject: [PATCH 096/207] better repre debug log --- pype/plugins/global/publish/extract_review_slate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 928e3fdc40..1825035aef 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -30,7 +30,7 @@ class ExtractReviewSlate(pype.api.Extractor): fps = inst_data.get("fps") for idx, repre in enumerate(inst_data["representations"]): - self.log.debug("__ i: `{}`, repre: `{}`".format(idx, repre)) + self.log.debug("repre ({}): `{}`".format(idx + 1, repre)) p_tags = repre.get("tags", []) if "slate-frame" not in p_tags: From 623b4ac1e4a3c7f937b17d071c4c675ed0b3e719 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 29 Apr 2020 17:47:40 +0200 Subject: [PATCH 097/207] added logs to burnin script --- pype/scripts/otio_burnin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 47e1811283..3bd022943c 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -300,10 +300,10 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): args=args, overwrite=overwrite ) - # log.info(command) + log.info("Launching command: {}".format(command)) proc = subprocess.Popen(command, shell=True) - proc.communicate() + log.info(proc.communicate()[0]) if proc.returncode != 0: raise RuntimeError("Failed to render '%s': %s'" % (output, command)) From 9291fda2c2c83d928b5f782657d92edbc48a529e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 29 Apr 2020 20:05:16 +0200 Subject: [PATCH 098/207] removed setting tags to families --- pype/plugins/global/publish/extract_review.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 56a9c870b1..ddd56124f1 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -146,12 +146,6 @@ class ExtractReview(pyblish.api.InstancePlugin): "New representation tags: `{}`".format(new_repre["tags"]) ) - # # QUESTION Why the hell we were adding tags to families? - # # add families - # for tag in output_def["tags"]: - # if tag not in instance.data["families"]: - # instance.data["families"].append(tag) - temp_data = self.prepare_temp_data(instance, repre, output_def) ffmpeg_args = self._ffmpeg_arguments( From b68f049a7cae8fbd08510185ffcbbad74688819e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 30 Apr 2020 17:55:46 +0200 Subject: [PATCH 099/207] removed usage and setting of "anatomy_template" key in representations --- pype/nuke/lib.py | 1 - pype/plugins/global/publish/extract_burnin.py | 1 - pype/plugins/global/publish/submit_publish_job.py | 6 +----- pype/plugins/maya/publish/extract_yeti_cache.py | 4 +--- pype/plugins/maya/publish/extract_yeti_rig.py | 6 ++---- pype/plugins/nuke/publish/collect_writes.py | 3 +-- pype/plugins/nuke/publish/extract_render_local.py | 3 +-- pype/plugins/nuke/publish/extract_thumbnail.py | 1 - .../publish/collect_context.py | 15 +-------------- 9 files changed, 7 insertions(+), 33 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 423738dd7f..249a3f8f5b 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -1412,7 +1412,6 @@ class ExporterReview: 'ext': self.ext, 'files': self.file, "stagingDir": self.staging_dir, - "anatomy_template": "render", "tags": [self.name.replace("_", "-")] + add_tags } diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index c151752c8f..7668eafd2a 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -193,7 +193,6 @@ class ExtractBurnin(pype.api.Extractor): self.log.debug("Output: {}".format(output)) repre_update = { - "anatomy_template": "render", "files": movieFileBurnin, "name": repre["name"], "tags": [x for x in repre["tags"] if x != "delete"] diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 843760f9ec..b9c14d4fe4 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -380,7 +380,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "frameEnd": int(instance_data.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames "stagingDir": staging, - "anatomy_template": "render", "fps": new_instance.get("fps"), "tags": ["review"] if preview else [] } @@ -443,7 +442,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "frameEnd": int(instance.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames "stagingDir": os.path.dirname(list(c)[0]), - "anatomy_template": "render", "fps": instance.get("fps"), "tags": ["review", "preview"] if preview else [], } @@ -462,13 +460,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "name": ext, "ext": ext, "files": os.path.basename(r), - "stagingDir": os.path.dirname(r), - "anatomy_template": "publish" + "stagingDir": os.path.dirname(r) } if r in bake_render_path: rep.update({ "fps": instance.get("fps"), - "anatomy_template": "render", "tags": ["review", "delete"] }) # solve families with `preview` attributes diff --git a/pype/plugins/maya/publish/extract_yeti_cache.py b/pype/plugins/maya/publish/extract_yeti_cache.py index 7d85f396ae..5a67a6ab7e 100644 --- a/pype/plugins/maya/publish/extract_yeti_cache.py +++ b/pype/plugins/maya/publish/extract_yeti_cache.py @@ -73,7 +73,6 @@ class ExtractYetiCache(pype.api.Extractor): 'ext': 'fur', 'files': cache_files[0] if len(cache_files) == 1 else cache_files, 'stagingDir': dirname, - 'anatomy_template': 'publish', 'frameStart': int(start_frame), 'frameEnd': int(end_frame) } @@ -84,8 +83,7 @@ class ExtractYetiCache(pype.api.Extractor): 'name': 'fursettings', 'ext': 'fursettings', 'files': os.path.basename(data_file), - 'stagingDir': dirname, - 'anatomy_template': 'publish' + 'stagingDir': dirname } ) diff --git a/pype/plugins/maya/publish/extract_yeti_rig.py b/pype/plugins/maya/publish/extract_yeti_rig.py index 98e7271d1a..f82cd75c30 100644 --- a/pype/plugins/maya/publish/extract_yeti_rig.py +++ b/pype/plugins/maya/publish/extract_yeti_rig.py @@ -169,8 +169,7 @@ class ExtractYetiRig(pype.api.Extractor): 'name': "ma", 'ext': 'ma', 'files': "yeti_rig.ma", - 'stagingDir': dirname, - 'anatomy_template': 'publish' + 'stagingDir': dirname } ) self.log.info("settings file: {}".format("yeti.rigsettings")) @@ -179,8 +178,7 @@ class ExtractYetiRig(pype.api.Extractor): 'name': 'rigsettings', 'ext': 'rigsettings', 'files': 'yeti.rigsettings', - 'stagingDir': dirname, - 'anatomy_template': 'publish' + 'stagingDir': dirname } ) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 6379a1db87..1850df2d00 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -79,8 +79,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): representation = { 'name': ext, 'ext': ext, - "stagingDir": output_dir, - "anatomy_template": "render" + "stagingDir": output_dir } try: diff --git a/pype/plugins/nuke/publish/extract_render_local.py b/pype/plugins/nuke/publish/extract_render_local.py index b7aa59a457..37a6701380 100644 --- a/pype/plugins/nuke/publish/extract_render_local.py +++ b/pype/plugins/nuke/publish/extract_render_local.py @@ -71,8 +71,7 @@ class NukeRenderLocal(pype.api.Extractor): 'ext': ext, 'frameStart': "%0{}d".format(len(str(last_frame))) % first_frame, 'files': collected_frames, - "stagingDir": out_dir, - "anatomy_template": "render" + "stagingDir": out_dir } instance.data["representations"].append(repre) diff --git a/pype/plugins/nuke/publish/extract_thumbnail.py b/pype/plugins/nuke/publish/extract_thumbnail.py index 362625c2f5..5e9302a01a 100644 --- a/pype/plugins/nuke/publish/extract_thumbnail.py +++ b/pype/plugins/nuke/publish/extract_thumbnail.py @@ -130,7 +130,6 @@ class ExtractThumbnail(pype.api.Extractor): "stagingDir": staging_dir, "frameStart": first_frame, "frameEnd": last_frame, - "anatomy_template": "render", "tags": tags } instance.data["representations"].append(repre) diff --git a/pype/plugins/standalonepublisher/publish/collect_context.py b/pype/plugins/standalonepublisher/publish/collect_context.py index 327b99f432..0567f82755 100644 --- a/pype/plugins/standalonepublisher/publish/collect_context.py +++ b/pype/plugins/standalonepublisher/publish/collect_context.py @@ -46,7 +46,6 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): in_data = json.load(f) asset_name = in_data["asset"] - family_preset_key = in_data.get("family_preset_key", "") family = in_data["family"] subset = in_data["subset"] @@ -57,15 +56,6 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): presets = config.get_presets() - # Get from presets anatomy key that will be used for getting template - # - default integrate new is used if not set - anatomy_key = ( - presets.get("standalone_publish", {}) - .get("families", {}) - .get(family_preset_key, {}) - .get("anatomy_template") - ) - project = io.find_one({"type": "project"}) asset = io.find_one({"type": "asset", "name": asset_name}) context.data["project"] = project @@ -98,12 +88,9 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): instance.data["source"] = "standalone publisher" for component in in_data["representations"]: - component["destination"] = component["files"] component["stagingDir"] = component["stagingDir"] - # Do not set anatomy_template if not specified - if anatomy_key: - component["anatomy_template"] = anatomy_key + if isinstance(component["files"], list): collections, remainder = clique.assemble(component["files"]) self.log.debug("collecting sequence: {}".format(collections)) From ebdfaf8baaf6caed014aa9f7867b61364eb17d70 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 30 Apr 2020 19:35:03 +0200 Subject: [PATCH 100/207] implemented method for template name determination based on plugin presets --- pype/plugins/global/publish/integrate_new.py | 76 ++++++++++++++++++-- 1 file changed, 69 insertions(+), 7 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 9eab1a15b1..d26cc4e856 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -40,10 +40,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): 'name': representation name (usually the same as extension) 'ext': file extension optional data - 'anatomy_template': 'publish' or 'render', etc. - template from anatomy that should be used for - integrating this file. Only the first level can - be specified right now. "frameStart" "frameEnd" 'fps' @@ -92,6 +88,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "family", "hierarchy", "task", "username" ] default_template_name = "publish" + template_name_profiles = None def process(self, instance): @@ -268,6 +265,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if 'transfers' not in instance.data: instance.data['transfers'] = [] + template_name = self.template_name_from_instance(instance) + published_representations = {} for idx, repre in enumerate(instance.data["representations"]): published_files = [] @@ -292,9 +291,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if repre.get('stagingDir'): stagingdir = repre['stagingDir'] - template_name = ( - repre.get('anatomy_template') or self.default_template_name - ) if repre.get("outputName"): template_data["output"] = repre['outputName'] @@ -701,3 +697,69 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): version_data[key] = instance.data[key] return version_data + + def main_family_from_instance(self, instance): + """Returns main family of entered instance.""" + family = instance.data.get("family") + if not family: + family = instance.data["families"][0] + return family + + def template_name_from_instance(self, instance): + template_name = self.default_template_name + if not self.template_name_profiles: + self.log.debug(( + "Template name profiles are not set." + " Using default \"{}\"" + ).format(template_name)) + return template_name + + # Task name from session? + task_name = io.Session.get("AVALON_TASK") + family = self.main_family_from_instance(instance) + + matching_profiles = None + highest_value = -1 + for name, filters in self.template_name_profiles: + value = 0 + families = filters.get("families") + if families: + if family not in families: + continue + value += 1 + + tasks = filters.get("tasks") + if tasks: + if task_name not in tasks: + continue + value += 1 + + if value > highest_value: + matching_profiles = {} + highest_value = value + + if value == highest_value: + matching_profiles[name] = filters + + if len(matching_profiles) == 1: + template_name = matching_profiles.keys()[0] + self.log.debug( + "Using template name \"{}\".".format(template_name) + ) + + elif len(matching_profiles) > 1: + template_name = matching_profiles.keys()[0] + self.log.warning(( + "More than one template profiles matched" + " Family \"{}\" and Task: \"{}\"." + " Using first template name in row \"{}\"." + ).format(family, task_name, template_name)) + + else: + self.log.debug(( + "None of template profiles matched" + " Family \"{}\" and Task: \"{}\"." + " Using default template name \"{}\"" + ).format(family, task_name, template_name)) + + return template_name From 87a000227af72854798e19427536b45669f93226 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 4 May 2020 19:23:01 +0200 Subject: [PATCH 101/207] sync to avalon ignores MongoID attribute in hierarchical attributes --- pype/ftrack/events/event_sync_to_avalon.py | 2 ++ pype/ftrack/lib/avalon_sync.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index faf7539540..71e52c68da 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -1244,6 +1244,8 @@ class SyncToAvalonEvent(BaseEvent): self.process_session, entity, hier_keys, defaults ) for key, val in hier_values.items(): + if key == CustAttrIdKey: + continue output[key] = val return output diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 474c70bd26..179977d403 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -877,6 +877,8 @@ class SyncEntitiesFactory: project_values[key] = value for key in avalon_hier: + if key == CustAttrIdKey: + continue value = self.entities_dict[top_id]["avalon_attrs"][key] if value is not None: project_values[key] = value From 25b2c66ebee62d0e7de33e34ed997c23514b3d5c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 4 May 2020 19:26:42 +0200 Subject: [PATCH 102/207] security roles are queried effectivelly --- .../actions/action_create_cust_attrs.py | 78 +++++++++---------- 1 file changed, 35 insertions(+), 43 deletions(-) diff --git a/pype/ftrack/actions/action_create_cust_attrs.py b/pype/ftrack/actions/action_create_cust_attrs.py index 5279a95a20..ff3a30b534 100644 --- a/pype/ftrack/actions/action_create_cust_attrs.py +++ b/pype/ftrack/actions/action_create_cust_attrs.py @@ -138,7 +138,7 @@ class CustomAttributes(BaseAction): self.types = {} self.object_type_ids = {} self.groups = {} - self.security_roles = {} + self.security_roles = None # JOB SETTINGS userId = event['source']['user']['id'] @@ -199,8 +199,8 @@ class CustomAttributes(BaseAction): filtered_types_id.add(obj_type['id']) # Set security roles for attribute - role_list = ['API', 'Administrator'] - roles = self.get_security_role(role_list) + role_list = ("API", "Administrator", "Pypeclub") + roles = self.get_security_roles(role_list) # Set Text type of Attribute custom_attribute_type = self.get_type('text') # Set group to 'avalon' @@ -416,48 +416,40 @@ class CustomAttributes(BaseAction): 'Found more than one group "{}"'.format(group_name) ) - def get_role_ALL(self): - role_name = 'ALL' - if role_name in self.security_roles: - all_roles = self.security_roles[role_name] - else: - all_roles = self.session.query('SecurityRole').all() - self.security_roles[role_name] = all_roles - for role in all_roles: - if role['name'] not in self.security_roles: - self.security_roles[role['name']] = role - return all_roles + def query_roles(self): + if self.security_roles is None: + self.security_roles = {} + for role in self.session.query("SecurityRole").all(): + key = role["name"].lower() + self.security_roles[key] = role + return self.security_roles - def get_security_role(self, security_roles): - roles = [] - security_roles_lowered = [role.lower() for role in security_roles] - if len(security_roles) == 0 or 'all' in security_roles_lowered: - roles = self.get_role_ALL() - elif security_roles_lowered[0] == 'except': - excepts = security_roles[1:] - all = self.get_role_ALL() - for role in all: - if role['name'] not in excepts: - roles.append(role) - if role['name'] not in self.security_roles: - self.security_roles[role['name']] = role - else: - for role_name in security_roles: - if role_name in self.security_roles: - roles.append(self.security_roles[role_name]) - continue + def get_security_roles(self, security_roles): + security_roles = self.query_roles() - try: - query = 'SecurityRole where name is "{}"'.format(role_name) - role = self.session.query(query).one() - self.security_roles[role_name] = role - roles.append(role) - except NoResultFoundError: + security_roles_lowered = tuple(name.lower() for name in security_roles) + if ( + len(security_roles_lowered) == 0 + or "all" in security_roles_lowered + ): + return tuple(security_roles.values()) + + output = [] + if security_roles_lowered[0] == "except": + excepts = security_roles_lowered[1:] + for role_name, role in security_roles.items(): + if role_name not in excepts: + output.append(role) + + else: + for role_name in security_roles_lowered: + if role_name in security_roles: + output.append(security_roles[role_name]) + else: raise CustAttrException(( - 'Securit role "{}" does not exist' + "Securit role \"{}\" was not found in Ftrack." ).format(role_name)) - - return roles + return output def get_default(self, attr): type = attr['type'] @@ -512,8 +504,8 @@ class CustomAttributes(BaseAction): roles_read = attr['read_security_roles'] if 'read_security_roles' in output: roles_write = attr['write_security_roles'] - output['read_security_roles'] = self.get_security_role(roles_read) - output['write_security_roles'] = self.get_security_role(roles_write) + output['read_security_roles'] = self.get_security_roles(roles_read) + output['write_security_roles'] = self.get_security_roles(roles_write) return output From 83352891585c3b4f567585d3f66a43d30b1de7ec Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 4 May 2020 19:27:03 +0200 Subject: [PATCH 103/207] few minor possible bugs fix --- pype/ftrack/actions/action_create_cust_attrs.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pype/ftrack/actions/action_create_cust_attrs.py b/pype/ftrack/actions/action_create_cust_attrs.py index ff3a30b534..8ee1d138e3 100644 --- a/pype/ftrack/actions/action_create_cust_attrs.py +++ b/pype/ftrack/actions/action_create_cust_attrs.py @@ -8,7 +8,6 @@ import ftrack_api from pype.ftrack import BaseAction from pype.ftrack.lib.avalon_sync import CustAttrIdKey from pypeapp import config -from ftrack_api.exception import NoResultFoundError """ This action creates/updates custom attributes. @@ -382,15 +381,15 @@ class CustomAttributes(BaseAction): config = json.dumps({ 'multiSelect': multiSelect, 'data': json.dumps(data) - }) + }) return config def get_group(self, attr): - if isinstance(attr, str): - group_name = attr - else: + if isinstance(attr, dict): group_name = attr['group'].lower() + else: + group_name = attr if group_name in self.groups: return self.groups[group_name] From 1f2451df7377aa328f7984ee96167f28505baa23 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 4 May 2020 19:36:40 +0200 Subject: [PATCH 104/207] added method for converting mongoid attr from per entity type to hierarchical attribute --- .../actions/action_create_cust_attrs.py | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) diff --git a/pype/ftrack/actions/action_create_cust_attrs.py b/pype/ftrack/actions/action_create_cust_attrs.py index 8ee1d138e3..12e40b7ee5 100644 --- a/pype/ftrack/actions/action_create_cust_attrs.py +++ b/pype/ftrack/actions/action_create_cust_attrs.py @@ -224,6 +224,94 @@ class CustomAttributes(BaseAction): data['object_type_id'] = str(object_type_id) self.process_attribute(data) + def convert_mongo_id_to_hierarchical( + self, hierarchical_attr, object_type_attrs, session, event + ): + user_msg = "Converting old custom attributes. This may take some time." + self.show_message(event, user_msg, True) + self.log.info(user_msg) + + object_types_per_id = { + object_type["id"]: object_type + for object_type in session.query("ObjectType").all() + } + + cust_attr_query = ( + "select value, entity_id from ContextCustomAttributeValue " + "where configuration_id is {}" + ) + for attr_def in object_type_attrs: + attr_ent_type = attr_def["entity_type"] + if attr_ent_type == "show": + entity_type_label = "Project" + elif attr_ent_type == "task": + entity_type_label = ( + object_types_per_id[attr_def["object_type_id"]] + ) + else: + self.log.warning( + "Unsupported entity type: \"{}\". Skipping.".format( + attr_ent_type + ) + ) + continue + + self.log.debug(( + "Converting Avalon MongoID attr for Entity type \"{}\"." + ).format(entity_type_label)) + + call_expr = [{ + "action": "query", + "expression": cust_attr_query.format(attr_def["id"]) + }] + if hasattr(session, "call"): + [values] = session.call(call_expr) + else: + [values] = session._call(call_expr) + + for value in values["data"]: + table_values = collections.OrderedDict({ + "configuration_id": hierarchical_attr["id"], + "entity_id": value["entity_id"] + }) + + session.recorded_operations.push( + ftrack_api.operation.UpdateEntityOperation( + "ContextCustomAttributeValue", + table_values, + "value", + ftrack_api.symbol.NOT_SET, + value["value"] + ) + ) + + try: + session.commit() + + except Exception: + session.rollback() + self.log.warning( + ( + "Couldn't transfer Avalon Mongo ID" + " attribute for entity type \"{}\"." + ).format(entity_type_label), + exc_info=True + ) + + try: + session.delete(attr_def) + session.commit() + + except Exception: + session.rollback() + self.log.warning( + ( + "Couldn't delete Avalon Mongo ID" + " attribute for entity type \"{}\"." + ).format(entity_type_label), + exc_info=True + ) + def custom_attributes_from_file(self, session, event): presets = config.get_presets()['ftrack']['ftrack_custom_attributes'] From f345da371520b96ba6cc021fdaea224510d66776 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 4 May 2020 19:47:19 +0200 Subject: [PATCH 105/207] removed standalone action support --- .../actions/action_create_cust_attrs.py | 42 ------------------- 1 file changed, 42 deletions(-) diff --git a/pype/ftrack/actions/action_create_cust_attrs.py b/pype/ftrack/actions/action_create_cust_attrs.py index 12e40b7ee5..2ff0a3b49d 100644 --- a/pype/ftrack/actions/action_create_cust_attrs.py +++ b/pype/ftrack/actions/action_create_cust_attrs.py @@ -1,9 +1,6 @@ import os -import sys -import argparse import json import arrow -import logging import ftrack_api from pype.ftrack import BaseAction from pype.ftrack.lib.avalon_sync import CustAttrIdKey @@ -652,42 +649,3 @@ def register(session, plugins_presets={}): '''Register plugin. Called when used as an plugin.''' CustomAttributes(session, plugins_presets).register() - - -def main(arguments=None): - '''Set up logging and register action.''' - if arguments is None: - arguments = [] - - parser = argparse.ArgumentParser() - # Allow setting of logging level from arguments. - loggingLevels = {} - for level in ( - logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, - logging.ERROR, logging.CRITICAL - ): - loggingLevels[logging.getLevelName(level).lower()] = level - - parser.add_argument( - '-v', '--verbosity', - help='Set the logging output verbosity.', - choices=loggingLevels.keys(), - default='info' - ) - namespace = parser.parse_args(arguments) - - # Set up basic logging - logging.basicConfig(level=loggingLevels[namespace.verbosity]) - - session = ftrack_api.Session() - register(session) - - # Wait for events - logging.info( - 'Registered actions and listening for events. Use Ctrl-C to abort.' - ) - session.event_hub.wait() - - -if __name__ == '__main__': - raise SystemExit(main(sys.argv[1:])) From 9935972a859531014d81b7c71efc69a2c649ff05 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 4 May 2020 19:54:12 +0200 Subject: [PATCH 106/207] hierarchical mongo id attribute automatically replace previous access --- .../actions/action_create_cust_attrs.py | 201 +++++++++--------- 1 file changed, 99 insertions(+), 102 deletions(-) diff --git a/pype/ftrack/actions/action_create_cust_attrs.py b/pype/ftrack/actions/action_create_cust_attrs.py index 2ff0a3b49d..c141d6672c 100644 --- a/pype/ftrack/actions/action_create_cust_attrs.py +++ b/pype/ftrack/actions/action_create_cust_attrs.py @@ -1,4 +1,5 @@ import os +import collections import json import arrow import ftrack_api @@ -131,11 +132,6 @@ class CustomAttributes(BaseAction): return True def launch(self, session, entities, event): - self.types = {} - self.object_type_ids = {} - self.groups = {} - self.security_roles = None - # JOB SETTINGS userId = event['source']['user']['id'] user = session.query('User where id is ' + userId).one() @@ -149,7 +145,8 @@ class CustomAttributes(BaseAction): }) session.commit() try: - self.avalon_mongo_id_attributes(session) + self.prepare_global_data(session) + self.avalon_mongo_id_attributes(session, event) self.custom_attributes_from_file(session, event) job['status'] = 'done' @@ -166,60 +163,92 @@ class CustomAttributes(BaseAction): return True - def avalon_mongo_id_attributes(self, session): + def prepare_global_data(self, session): + self.types_per_name = { + attr_type["name"].lower(): attr_type + for attr_type in session.query("CustomAttributeType").all() + } + + self.security_roles = { + role["name"].lower(): role + for role in session.query("SecurityRole").all() + } + + object_types = session.query("ObjectType").all() + self.object_types_per_id = { + object_type["id"]: object_type for object_type in object_types + } + self.object_types_per_name = { + object_type["name"].lower(): object_type + for object_type in object_types + } + + self.groups = {} + + def avalon_mongo_id_attributes(self, session, event): + hierarchical_attr, object_type_attrs = ( + self.mongo_id_custom_attributes(session) + ) + + if hierarchical_attr is None: + self.create_hierarchical_mongo_attr(session) + hierarchical_attr, object_type_attrs = ( + self.mongo_id_custom_attributes(session) + ) + + if hierarchical_attr is None: + return + + if object_type_attrs: + self.convert_mongo_id_to_hierarchical( + hierarchical_attr, object_type_attrs, session, event + ) + + def mongo_id_custom_attributes(self, session): + cust_attrs_query = ( + "select id, entity_type, object_type_id, is_hierarchical, default" + " from CustomAttributeConfiguration" + " where key = \"{}\"" + ).format(CustAttrIdKey) + + mongo_id_avalon_attr = session.query(cust_attrs_query).all() + heirarchical_attr = None + object_type_attrs = [] + for cust_attr in mongo_id_avalon_attr: + if cust_attr["is_hierarchical"]: + heirarchical_attr = cust_attr + + else: + object_type_attrs.append(cust_attr) + + return heirarchical_attr, object_type_attrs + + def create_hierarchical_mongo_attr(self, session): # Attribute Name and Label - cust_attr_label = 'Avalon/Mongo Id' - - # Types that don't need object_type_id - base = {'show'} - - # Don't create custom attribute on these entity types: - exceptions = ['task', 'milestone'] - exceptions.extend(base) - - # Get all possible object types - all_obj_types = session.query('ObjectType').all() - - # Filter object types by exceptions - filtered_types_id = set() - - for obj_type in all_obj_types: - name = obj_type['name'] - if " " in name: - name = name.replace(' ', '') - - if obj_type['name'] not in self.object_type_ids: - self.object_type_ids[name] = obj_type['id'] - - if name.lower() not in exceptions: - filtered_types_id.add(obj_type['id']) + cust_attr_label = "Avalon/Mongo ID" # Set security roles for attribute role_list = ("API", "Administrator", "Pypeclub") roles = self.get_security_roles(role_list) # Set Text type of Attribute - custom_attribute_type = self.get_type('text') + custom_attribute_type = self.types_per_name["text"] # Set group to 'avalon' - group = self.get_group('avalon') + group = self.get_group("avalon") - data = {} - data['key'] = CustAttrIdKey - data['label'] = cust_attr_label - data['type'] = custom_attribute_type - data['default'] = '' - data['write_security_roles'] = roles - data['read_security_roles'] = roles - data['group'] = group - data['config'] = json.dumps({'markdown': False}) + data = { + "key": CustAttrIdKey, + "label": cust_attr_label, + "type": custom_attribute_type, + "default": "", + "write_security_roles": roles, + "read_security_roles": roles, + "group": group, + "is_hierarchical": True, + "entity_type": "show", + "config": json.dumps({"markdown": False}) + } - for entity_type in base: - data['entity_type'] = entity_type - self.process_attribute(data) - - data['entity_type'] = 'task' - for object_type_id in filtered_types_id: - data['object_type_id'] = str(object_type_id) - self.process_attribute(data) + self.process_attribute(data) def convert_mongo_id_to_hierarchical( self, hierarchical_attr, object_type_attrs, session, event @@ -401,11 +430,11 @@ class CustomAttributes(BaseAction): 'Type {} is not valid'.format(attr['type']) ) - type_name = attr['type'].lower() - output['key'] = attr['key'] output['label'] = attr['label'] - output['type'] = self.get_type(type_name) + + type_name = attr['type'].lower() + output['type'] = self.types_per_name[type_name] config = None if type_name == 'number': @@ -500,35 +529,25 @@ class CustomAttributes(BaseAction): 'Found more than one group "{}"'.format(group_name) ) - def query_roles(self): - if self.security_roles is None: - self.security_roles = {} - for role in self.session.query("SecurityRole").all(): - key = role["name"].lower() - self.security_roles[key] = role - return self.security_roles - def get_security_roles(self, security_roles): - security_roles = self.query_roles() - security_roles_lowered = tuple(name.lower() for name in security_roles) if ( len(security_roles_lowered) == 0 or "all" in security_roles_lowered ): - return tuple(security_roles.values()) + return list(self.security_roles.values()) output = [] if security_roles_lowered[0] == "except": excepts = security_roles_lowered[1:] - for role_name, role in security_roles.items(): + for role_name, role in self.security_roles.items(): if role_name not in excepts: output.append(role) else: for role_name in security_roles_lowered: - if role_name in security_roles: - output.append(security_roles[role_name]) + if role_name in self.security_roles: + output.append(self.security_roles[role_name]) else: raise CustAttrException(( "Securit role \"{}\" was not found in Ftrack." @@ -593,27 +612,12 @@ class CustomAttributes(BaseAction): return output - def get_type(self, type_name): - if type_name in self.types: - return self.types[type_name] - - query = 'CustomAttributeType where name is "{}"'.format(type_name) - type = self.session.query(query).one() - self.types[type_name] = type - - return type - def get_entity_type(self, attr): - if 'is_hierarchical' in attr: - if attr['is_hierarchical'] is True: - type = 'show' - if 'entity_type' in attr: - type = attr['entity_type'] - - return { - 'is_hierarchical': True, - 'entity_type': type - } + if attr.get("is_hierarchical", False): + return { + "is_hierarchical": True, + "entity_type": attr.get("entity_type") or "show" + } if 'entity_type' not in attr: raise CustAttrException('Missing entity_type') @@ -625,23 +629,16 @@ class CustomAttributes(BaseAction): raise CustAttrException('Missing object_type') object_type_name = attr['object_type'] - if object_type_name not in self.object_type_ids: - try: - query = 'ObjectType where name is "{}"'.format( - object_type_name - ) - object_type_id = self.session.query(query).one()['id'] - except Exception: - raise CustAttrException(( - 'Object type with name "{}" don\'t exist' - ).format(object_type_name)) - self.object_type_ids[object_type_name] = object_type_id - else: - object_type_id = self.object_type_ids[object_type_name] + object_type_name_low = object_type_name.lower() + object_type = self.object_types_per_name.get(object_type_name_low) + if not object_type: + raise CustAttrException(( + 'Object type with name "{}" don\'t exist' + ).format(object_type_name)) return { 'entity_type': attr['entity_type'], - 'object_type_id': object_type_id + 'object_type_id': object_type["id"] } From 0c73041ab402d4e0eb54d8d84c6bbfc599713632 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 4 May 2020 20:29:15 +0200 Subject: [PATCH 107/207] fixed getting configuration id with hierarchical mongo id attribute --- pype/ftrack/lib/avalon_sync.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 179977d403..e915e86184 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -291,6 +291,8 @@ class SyncEntitiesFactory: self.filtered_ids = [] self.not_selected_ids = [] + self.hier_cust_attr_ids_by_key = {} + self._ent_paths_by_ftrack_id = {} self.ftrack_avalon_mapper = None @@ -812,6 +814,7 @@ class SyncEntitiesFactory: key = attr["key"] attribute_key_by_id[attr["id"]] = key attributes_by_key[key] = attr + self.hier_cust_attr_ids_by_key[key] = attr["id"] store_key = "hier_attrs" if key.startswith("avalon_"): @@ -1595,9 +1598,16 @@ class SyncEntitiesFactory: if current_id != new_id_str: # store mongo id to ftrack entity - configuration_id = self.entities_dict[ftrack_id][ - "avalon_attrs_id" - ][CustAttrIdKey] + configuration_id = self.hier_cust_attr_ids_by_key.get( + CustAttrIdKey + ) + if not configuration_id: + # NOTE this is for cases when CustAttrIdKey key is not + # hierarchical custom attribute but per entity type + configuration_id = self.entities_dict[ftrack_id][ + "avalon_attrs_id" + ][CustAttrIdKey] + _entity_key = collections.OrderedDict({ "configuration_id": configuration_id, "entity_id": ftrack_id From 0d30a3bc8eae857327d47f2d348a02875ad2eac3 Mon Sep 17 00:00:00 2001 From: Toke Stuart Jepsen Date: Mon, 4 May 2020 22:40:29 +0100 Subject: [PATCH 108/207] Expose write node frame range and limit checkbox. - also some code cosmetics. --- pype/nuke/lib.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 423738dd7f..185def7052 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -313,7 +313,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): if input: # if connected input node was defined connections.append({ - "node": input, + "node": input, "inputName": input.name()}) prev_node = nuke.createNode( "Input", "name {}".format(input.name())) @@ -369,7 +369,7 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): write_node = now_node = avalon.nuke.lib.add_write_node( "inside_{}".format(name), **_data - ) + ) # connect to previous node now_node.setInput(0, prev_node) @@ -393,11 +393,13 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): if review: add_review_knob(GN) - # add render button - lnk = nuke.Link_Knob("Render") - lnk.makeLink(write_node.name(), "Render") - lnk.setName("Render") - GN.addKnob(lnk) + # Add linked knobs. + linked_knob_names = ["Render", "use_limit", "first", "last"] + for name in linked_knob_names: + link = nuke.Link_Knob(name) + link.makeLink(write_node.name(), name) + link.setName(name) + GN.addKnob(link) divider = nuke.Text_Knob('') GN.addKnob(divider) @@ -408,7 +410,6 @@ def create_write_node(name, data, input=None, prenodes=None, review=True): # Deadline tab. add_deadline_tab(GN) - # set tile color tile_color = _data.get("tile_color", "0xff0000ff") GN["tile_color"].setValue(tile_color) @@ -436,6 +437,7 @@ def add_rendering_knobs(node): node.addKnob(knob) return node + def add_review_knob(node): ''' Adds additional review knob to given node @@ -645,8 +647,9 @@ class WorkfileSettings(object): if root_dict.get("customOCIOConfigPath"): self._root_node["customOCIOConfigPath"].setValue( str(root_dict["customOCIOConfigPath"]).format( - **os.environ).replace("\\", "/") - ) + **os.environ + ).replace("\\", "/") + ) log.debug("nuke.root()['{}'] changed to: {}".format( "customOCIOConfigPath", root_dict["customOCIOConfigPath"])) root_dict.pop("customOCIOConfigPath") @@ -750,10 +753,9 @@ class WorkfileSettings(object): if changes: msg = "Read nodes are not set to correct colospace:\n\n" for nname, knobs in changes.items(): - msg += str(" - node: '{0}' is now '{1}' " - "but should be '{2}'\n").format( - nname, knobs["from"], knobs["to"] - ) + msg += str( + " - node: '{0}' is now '{1}' but should be '{2}'\n" + ).format(nname, knobs["from"], knobs["to"]) msg += "\nWould you like to change it?" @@ -1420,7 +1422,7 @@ class ExporterReview: repre.update({ "frameStart": self.first_frame, "frameEnd": self.last_frame, - }) + }) self.data["representations"].append(repre) @@ -1655,7 +1657,7 @@ class ExporterReviewMov(ExporterReview): if not self.viewer_lut_raw: colorspaces = [ self.bake_colorspace_main, self.bake_colorspace_fallback - ] + ] if any(colorspaces): # OCIOColorSpace with controled output @@ -1709,7 +1711,7 @@ class ExporterReviewMov(ExporterReview): self.get_representation_data( tags=["review", "delete"], range=True - ) + ) self.log.debug("Representation... `{}`".format(self.data)) @@ -1744,14 +1746,14 @@ def get_dependent_nodes(nodes): if test_in: connections_in.update({ node: test_in - }) + }) # collect all outputs outside test_out = [i for i in outputs if i.name() not in node_names] if test_out: # only one dependent node is allowed connections_out.update({ node: test_out[-1] - }) + }) return connections_in, connections_out From 738c18792830ba450536cc63d84b1f44e31c4a8b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 5 May 2020 10:00:06 +0200 Subject: [PATCH 109/207] annoying fix --- pype/ftrack/actions/action_create_cust_attrs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/actions/action_create_cust_attrs.py b/pype/ftrack/actions/action_create_cust_attrs.py index c141d6672c..37b11256d2 100644 --- a/pype/ftrack/actions/action_create_cust_attrs.py +++ b/pype/ftrack/actions/action_create_cust_attrs.py @@ -272,7 +272,7 @@ class CustomAttributes(BaseAction): entity_type_label = "Project" elif attr_ent_type == "task": entity_type_label = ( - object_types_per_id[attr_def["object_type_id"]] + object_types_per_id[attr_def["object_type_id"]]["name"] ) else: self.log.warning( From 02e129f5404c461de69377dfe1bd9f09d26d5e25 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Tue, 5 May 2020 18:15:08 +0200 Subject: [PATCH 110/207] handle errors during file copy --- pype/plugins/global/publish/integrate_new.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 9ea3d0bda9..5d3e70bf13 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -5,6 +5,7 @@ import sys import copy import clique import errno +import six from pymongo import DeleteOne, InsertOne import pyblish.api @@ -569,7 +570,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # copy file with speedcopy and check if size of files are simetrical while True: - copyfile(src, dst) + try: + copyfile(src, dst) + except OSError as e: + self.log.critical("Cannot copy {} to {}".format(src, dst)) + self.log.critical(e) + six.reraise(*sys.exc_info()) if str(getsize(src)) in str(getsize(dst)): break From 87b100236fb5fc7544b03c3caaa4fe5bc66e10b1 Mon Sep 17 00:00:00 2001 From: Toke Stuart Jepsen Date: Wed, 6 May 2020 09:09:06 +0100 Subject: [PATCH 111/207] Fix render collection --- pype/plugins/maya/publish/collect_render.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 68e865b3e0..456b584191 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -157,6 +157,9 @@ class CollectMayaRender(pyblish.api.ContextPlugin): attachTo = [] if sets: for s in sets: + if "family" not in cmds.listAttr(s): + continue + attachTo.append( { "version": None, # we need integrator for that From 76bae583889413183a08245de7a4ba68e20429d3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 6 May 2020 14:19:28 +0200 Subject: [PATCH 112/207] handles are checked for None instead of using `.get(...)` method --- .../global/publish/collect_avalon_entities.py | 24 +++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/collect_avalon_entities.py b/pype/plugins/global/publish/collect_avalon_entities.py index 53f11aa693..9d16a05a78 100644 --- a/pype/plugins/global/publish/collect_avalon_entities.py +++ b/pype/plugins/global/publish/collect_avalon_entities.py @@ -51,10 +51,26 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): context.data["frameStart"] = data.get("frameStart") context.data["frameEnd"] = data.get("frameEnd") - handles = int(data.get("handles") or 0) - context.data["handles"] = handles - context.data["handleStart"] = int(data.get("handleStart", handles)) - context.data["handleEnd"] = int(data.get("handleEnd", handles)) + handles = data.get("handles") or 0 + handle_start = data.get("handleStart") + if handle_start is None: + handle_start = handles + self.log.info(( + "Key \"handleStart\" is not set." + " Using value from \"handles\" key {}." + ).format(handle_start)) + + handle_end = data.get("handleEnd") + if handle_end is None: + handle_end = handles + self.log.info(( + "Key \"handleEnd\" is not set." + " Using value from \"handles\" key {}." + ).format(handle_end)) + + context.data["handles"] = int(handles) + context.data["handleStart"] = int(handle_start) + context.data["handleEnd"] = int(handle_end) frame_start_h = data.get("frameStart") - context.data["handleStart"] frame_end_h = data.get("frameEnd") + context.data["handleEnd"] From 59b50beb0b60390f0ebfe8f4bfa2818c7ea42fac Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 6 May 2020 14:27:14 +0200 Subject: [PATCH 113/207] added action for removing values of hierarchical custom attributes set to None --- .../action_clean_hierarchical_attributes.py | 107 ++++++++++++++++++ 1 file changed, 107 insertions(+) create mode 100644 pype/ftrack/actions/action_clean_hierarchical_attributes.py diff --git a/pype/ftrack/actions/action_clean_hierarchical_attributes.py b/pype/ftrack/actions/action_clean_hierarchical_attributes.py new file mode 100644 index 0000000000..8b4c2682f6 --- /dev/null +++ b/pype/ftrack/actions/action_clean_hierarchical_attributes.py @@ -0,0 +1,107 @@ +import os +import collections +import ftrack_api +from pype.ftrack import BaseAction +from pype.ftrack.lib.avalon_sync import get_avalon_attr + + +class CleanHierarchicalAttrsAction(BaseAction): + identifier = "clean.hierarchical.attr" + label = "Pype Admin" + variant = "- Clean hierarchical custom attributes" + description = "Unset empty hierarchical attribute values." + role_list = ["Pypeclub", "Administrator", "Project Manager"] + icon = "{}/ftrack/action_icons/PypeAdmin.svg".format( + os.environ.get("PYPE_STATICS_SERVER", "") + ) + + all_project_entities_query = ( + "select id, name, parent_id, link" + " from TypedContext where project_id is \"{}\"" + ) + cust_attr_query = ( + "select value, entity_id from CustomAttributeValue " + "where entity_id in ({}) and configuration_id is \"{}\"" + ) + + def discover(self, session, entities, event): + """Show only on project entity.""" + if len(entities) == 1 and entities[0].entity_type.lower() == "project": + return True + return False + + def launch(self, session, entities, event): + project = entities[0] + + user_message = "This may take some time" + self.show_message(event, user_message, result=True) + self.log.debug("Preparing entities for cleanup.") + + all_entities = session.query( + self.all_project_entities_query.format(project["id"]) + ).all() + + all_entities_ids = [ + "\"{}\"".format(entity["id"]) + for entity in all_entities + if entity.entity_type.lower() != "task" + ] + self.log.debug( + "Collected {} entities to process.".format(len(all_entities_ids)) + ) + entity_ids_joined = ", ".join(all_entities_ids) + + attrs, hier_attrs = get_avalon_attr(session) + + for attr in hier_attrs: + configuration_key = attr["key"] + self.log.debug( + "Looking for cleanup of custom attribute \"{}\"".format( + configuration_key + ) + ) + configuration_id = attr["id"] + call_expr = [{ + "action": "query", + "expression": self.cust_attr_query.format( + entity_ids_joined, configuration_id + ) + }] + + [values] = self.session.call(call_expr) + + data = {} + for item in values["data"]: + value = item["value"] + if value is None: + data[item["entity_id"]] = value + + if not data: + self.log.debug( + "Nothing to clean for \"{}\".".format(configuration_key) + ) + continue + + self.log.debug("Cleaning up {} values for \"{}\".".format( + len(data), configuration_key + )) + for entity_id, value in data.items(): + entity_key = collections.OrderedDict({ + "configuration_id": configuration_id, + "entity_id": entity_id + }) + session.recorded_operations.push( + ftrack_api.operation.DeleteEntityOperation( + "CustomAttributeValue", + entity_key + ) + ) + session.commit() + + return True + + +def register(session, plugins_presets={}): + '''Register plugin. Called when used as an plugin.''' + + CleanHierarchicalAttrsAction(session, plugins_presets).register() From 8fde8167a745ee8ee01c566433164c2fde02ff61 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 6 May 2020 17:22:20 +0200 Subject: [PATCH 114/207] event handler now set thumbnail only if new asset version is created --- pype/ftrack/events/event_thumbnail_updates.py | 72 ++++++++----------- 1 file changed, 28 insertions(+), 44 deletions(-) diff --git a/pype/ftrack/events/event_thumbnail_updates.py b/pype/ftrack/events/event_thumbnail_updates.py index 5421aa7543..c62be9718a 100644 --- a/pype/ftrack/events/event_thumbnail_updates.py +++ b/pype/ftrack/events/event_thumbnail_updates.py @@ -2,57 +2,42 @@ from pype.ftrack import BaseEvent class ThumbnailEvents(BaseEvent): - def launch(self, session, event): - '''just a testing event''' + """Updates thumbnails of entities from new AssetVersion.""" - # self.log.info(event) - # start of event procedure ---------------------------------- - for entity in event['data'].get('entities', []): + for entity in event["data"].get("entities", []): + if ( + entity["action"] == "remove" + or entity["entityType"].lower() != "assetversion" + or "thumbid" not in (entity.get("keys") or []) + ): + continue # update created task thumbnail with first parent thumbnail - if entity['entityType'] == 'task' and entity['action'] == 'add': + version = session.get("AssetVersion", entity["entityId"]) + if not version: + continue - task = session.get('TypedContext', entity['entityId']) - parent = task['parent'] + thumbnail = version.get("thumbnail") + if not thumbnail: + continue - if parent.get('thumbnail') and not task.get('thumbnail'): - task['thumbnail'] = parent['thumbnail'] - self.log.info('>>> Updated thumbnail on [ %s/%s ]'.format( - parent['name'], task['name'] - )) + parent = version["asset"]["parent"] + task = version["task"] + parent["thumbnail_id"] = version["thumbnail_id"] + if parent.entity_type.lower() == "project": + name = parent["full_name"] + else: + name = parent["name"] - # Update task thumbnail from published version - # if (entity['entityType'] == 'assetversion' and - # entity['action'] == 'encoded'): - elif ( - entity['entityType'] == 'assetversion' and - entity['action'] != 'remove' and - 'thumbid' in (entity.get('keys') or []) - ): + task_msg = "" + if task: + task["thumbnail_id"] = version["thumbnail_id"] + task_msg = " and task [ {} ]".format(task["name"]) - version = session.get('AssetVersion', entity['entityId']) - if not version: - continue - - thumbnail = version.get('thumbnail') - if not thumbnail: - continue - - parent = version['asset']['parent'] - task = version['task'] - parent['thumbnail_id'] = version['thumbnail_id'] - if parent.entity_type.lower() == "project": - name = parent["full_name"] - else: - name = parent["name"] - msg = '>>> Updating thumbnail for shot [ {} ]'.format(name) - - if task: - task['thumbnail_id'] = version['thumbnail_id'] - msg += " and task [ {} ]".format(task["name"]) - - self.log.info(msg) + self.log.info(">>> Updating thumbnail for shot [ {} ]{}".format( + name, task_msg + )) try: session.commit() @@ -61,5 +46,4 @@ class ThumbnailEvents(BaseEvent): def register(session, plugins_presets): - '''Register plugin. Called when used as an plugin.''' ThumbnailEvents(session, plugins_presets).register() From eba14f8a8df89b6f7c0b73715b927d16a8f75ae5 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 6 May 2020 17:37:19 +0200 Subject: [PATCH 115/207] clean(nks): Hound recomendations --- .../nukestudio/publish/collect_frame_ranges.py | 1 - .../nukestudio/publish/collect_timecodes.py | 14 ++++++++------ 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_frame_ranges.py b/pype/plugins/nukestudio/publish/collect_frame_ranges.py index 24b23fae01..6993fa5e67 100644 --- a/pype/plugins/nukestudio/publish/collect_frame_ranges.py +++ b/pype/plugins/nukestudio/publish/collect_frame_ranges.py @@ -34,7 +34,6 @@ class CollectClipFrameRanges(pyblish.api.InstancePlugin): frame_start = timeline_in frame_end = frame_start + (timeline_out - timeline_in) - source = instance.data.get("source") data.update({ "sourceFirst": source_in_h, diff --git a/pype/plugins/nukestudio/publish/collect_timecodes.py b/pype/plugins/nukestudio/publish/collect_timecodes.py index b3d4a5e8c5..5ac07314a4 100644 --- a/pype/plugins/nukestudio/publish/collect_timecodes.py +++ b/pype/plugins/nukestudio/publish/collect_timecodes.py @@ -3,7 +3,10 @@ import opentimelineio.opentime as otio_ot class CollectClipTimecodes(pyblish.api.InstancePlugin): - """Collect time with OpenTimelineIO: source_h(In,Out)[timecode, sec], timeline(In,Out)[timecode, sec]""" + """Collect time with OpenTimelineIO: + source_h(In,Out)[timecode, sec] + timeline(In,Out)[timecode, sec] + """ order = pyblish.api.CollectorOrder + 0.101 label = "Collect Timecodes" @@ -79,10 +82,9 @@ class CollectClipTimecodes(pyblish.api.InstancePlugin): }) data.update({ - "otioData": otio_data, - "sourceTimecodeIn": otio_ot.to_timecode(otio_in_h_ratio), - "sourceTimecodeOut": otio_ot.to_timecode(otio_out_h_ratio), - } - ) + "otioData": otio_data, + "sourceTimecodeIn": otio_ot.to_timecode(otio_in_h_ratio), + "sourceTimecodeOut": otio_ot.to_timecode(otio_out_h_ratio) + }) instance.data.update(data) self.log.debug("data: {}".format(instance.data)) From 76e7896e2453433ab2e473e6c5ea544e58dbd932 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 7 May 2020 14:42:36 +0200 Subject: [PATCH 116/207] collect rendered files also remap AVALON_WORKDIR and stagingDir on instances --- .../global/publish/collect_rendered_files.py | 45 ++++++++++++------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index 82c1b5bfd0..fab43b116a 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -27,7 +27,9 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): _context = None def _load_json(self, path): - assert os.path.isfile(path), ("path to json file doesn't exist") + assert os.path.isfile(path), ( + "Path to json file doesn't exist. \"{}\"".format(path) + ) data = None with open(path, "r") as json_file: try: @@ -39,6 +41,16 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): ) return data + def _remap_staging_dir(self, data_object, anatomy): + staging_dir = data_object.get("stagingDir") + if staging_dir: + remapped = anatomy.roots_obj.path_remapper(staging_dir) + if remapped: + data_object["stagingDir"] = remapped + self.log.debug(( + "stagingDir was remapped. To: \"{}\" From: \"{}\"" + ).format(remapped, staging_dir)) + def _process_path(self, data, anatomy): # validate basic necessary data data_err = "invalid json file - missing data" @@ -80,19 +92,13 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): instance_data.get("subset") ) self.log.info("Filling stagignDir...") + + self._remap_staging_dir(instance_data, anatomy) instance.data.update(instance_data) representations = [] for repre_data in instance_data.get("representations") or []: - staging_dir = repre_data.get("stagingDir") - if staging_dir: - remapped = anatomy.roots_obj.path_remapper(staging_dir) - if remapped: - repre_data["stagingDir"] = remapped - self.log.debug(( - "stagingDir was remapped. To: \"{}\" From: \"{}\"" - ).format(remapped, staging_dir)) - + self._remap_staging_dir(repre_data, anatomy) representations.append(repre_data) instance.data["representations"] = representations @@ -117,14 +123,21 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): )) anatomy = context.data["anatomy"] - session_set = False + session_is_set = False for path in paths: path = path.format(**{"root": anatomy.roots}) data = self._load_json(path) - if not session_set: - self.log.info("Setting session using data from file") - api.Session.update(data.get("session")) - os.environ.update(data.get("session")) - session_set = True assert data, "failed to load json file" + if not session_is_set: + session_data = data["session"] + remapped = anatomy.roots_obj.path_remapper( + session_data["AVALON_WORKDIR"] + ) + if remapped: + session_data["AVALON_WORKDIR"] = remapped + + self.log.info("Setting session using data from file") + api.Session.update(session_data) + os.environ.update(session_data) + session_is_set = True self._process_path(data, anatomy) From 8947bb5153fef3f0d6f051454b962e85a91cdc87 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 7 May 2020 14:47:19 +0200 Subject: [PATCH 117/207] colect rendered files use fill_root method from anatomy --- pype/plugins/global/publish/collect_rendered_files.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index fab43b116a..2ae3f0e3a3 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -125,7 +125,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): anatomy = context.data["anatomy"] session_is_set = False for path in paths: - path = path.format(**{"root": anatomy.roots}) + path = anatomy.fill_root(path) data = self._load_json(path) assert data, "failed to load json file" if not session_is_set: From 1afabb00e2e8b649c2c3b7c7eefc813b56983162 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Thu, 7 May 2020 15:04:37 +0200 Subject: [PATCH 118/207] fixed path usage, renderer handling and few code style issues --- .../global/publish/submit_publish_job.py | 155 +++++++++++++----- .../maya/publish/submit_maya_deadline.py | 93 ++++------- 2 files changed, 152 insertions(+), 96 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 6d04c8cb01..8688d161e2 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -1,3 +1,6 @@ +# -*- coding: utf-8 -*- +"""Submit publishing job to farm.""" + import os import json import re @@ -10,7 +13,7 @@ import pyblish.api def _get_script(): - """Get path to the image sequence script""" + """Get path to the image sequence script.""" try: from pype.scripts import publish_filesequence except Exception: @@ -23,8 +26,8 @@ def _get_script(): return os.path.normpath(module_path) -# Logic to retrieve latest files concerning extendFrames def get_latest_version(asset_name, subset_name, family): + """Retrieve latest files concerning extendFrame feature.""" # Get asset asset_name = io.find_one( {"type": "asset", "name": asset_name}, projection={"name": True} @@ -58,9 +61,7 @@ def get_latest_version(asset_name, subset_name, family): def get_resources(version, extension=None): - """ - Get the files from the specific version - """ + """Get the files from the specific version.""" query = {"type": "representation", "parent": version["_id"]} if extension: query["name"] = extension @@ -80,14 +81,25 @@ def get_resources(version, extension=None): return resources -def get_resource_files(resources, frame_range, override=True): +def get_resource_files(resources, frame_range=None): + """Get resource files at given path. + If `frame_range` is specified those outside will be removed. + + Arguments: + resources (list): List of resources + frame_range (list): Frame range to apply override + + Returns: + list of str: list of collected resources + + """ res_collections, _ = clique.assemble(resources) assert len(res_collections) == 1, "Multiple collections found" res_collection = res_collections[0] # Remove any frames - if override: + if frame_range is not None: for frame in frame_range: if frame not in res_collection.indexes: continue @@ -147,7 +159,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "FTRACK_SERVER", "PYPE_SETUP_PATH", "PYPE_METADATA_FILE", - "AVALON_PROJECT" + "AVALON_PROJECT", + "PYPE_LOG_NO_COLORS" ] # pool used to do the publishing job @@ -169,10 +182,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): families_transfer = ["render3d", "render2d", "ftrack", "slate"] def _submit_deadline_post_job(self, instance, job): - """ + """Submit publish job to Deadline. + Deadline specific code separated from :meth:`process` for sake of more universal code. Muster post job is sent directly by Muster submitter, so this type of code isn't necessary for it. + """ data = instance.data.copy() subset = data["subset"] @@ -225,6 +240,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): environment = job["Props"].get("Env", {}) environment["PYPE_METADATA_FILE"] = metadata_path environment["AVALON_PROJECT"] = io.Session["AVALON_PROJECT"] + environment["PYPE_LOG_NO_COLORS"] = "1" i = 0 for index, key in enumerate(environment): if key.upper() in self.enviro_filter: @@ -250,16 +266,20 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): raise Exception(response.text) def _copy_extend_frames(self, instance, representation): - """ + """Copy existing frames from latest version. + This will copy all existing frames from subset's latest version back to render directory and rename them to what renderer is expecting. - :param instance: instance to get required data from - :type instance: pyblish.plugin.Instance - """ + Arguments: + instance (pyblish.plugin.Instance): instance to get required + data from + representation (dict): presentation to operate on + """ import speedcopy + anatomy = instance.context.data["anatomy"] self.log.info("Preparing to copy ...") start = instance.data.get("startFrame") end = instance.data.get("endFrame") @@ -297,9 +317,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # type assert fn is not None, "padding string wasn't found" # list of tuples (source, destination) + staging = representation.get("stagingDir") + staging = anatomy.fill_roots(staging) resource_files.append( (frame, - os.path.join(representation.get("stagingDir"), + os.path.join(staging, "{}{}{}".format(pre, fn.group("frame"), post))) @@ -319,19 +341,20 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "Finished copying %i files" % len(resource_files)) def _create_instances_for_aov(self, instance_data, exp_files): - """ + """Create instance for each AOV found. + This will create new instance for every aov it can detect in expected files list. - :param instance_data: skeleton data for instance (those needed) later - by collector - :type instance_data: pyblish.plugin.Instance - :param exp_files: list of expected files divided by aovs - :type exp_files: list - :returns: list of instances - :rtype: list(publish.plugin.Instance) - """ + Arguments: + instance_data (pyblish.plugin.Instance): skeleton data for instance + (those needed) later by collector + exp_files (list): list of expected files divided by aovs + Returns: + list of instances + + """ task = os.environ["AVALON_TASK"] subset = instance_data["subset"] instances = [] @@ -354,7 +377,19 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): subset_name = '{}_{}'.format(group_name, aov) + anatomy = instance_data.context.data["anatomy"] + staging = os.path.dirname(list(cols[0])[0]) + success, rootless_staging_dir = ( + anatomy.roots_obj.find_root_template_from_path(staging) + ) + if success: + staging = rootless_staging_dir + else: + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(staging)) self.log.info("Creating data for: {}".format(subset_name)) @@ -398,22 +433,25 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): return instances def _get_representations(self, instance, exp_files): - """ + """Create representations for file sequences. + This will return representations of expected files if they are not in hierarchy of aovs. There should be only one sequence of files for most cases, but if not - we create representation from each of them. - :param instance: instance for which we are setting representations - :type instance: pyblish.plugin.Instance - :param exp_files: list of expected files - :type exp_files: list - :returns: list of representations - :rtype: list(dict) - """ + Arguments: + instance (pyblish.plugin.Instance): instance for which we are + setting representations + exp_files (list): list of expected files + Returns: + list of representations + + """ representations = [] collections, remainders = clique.assemble(exp_files) bake_render_path = instance.get("bakeRenderPath") + anatomy = instance.context.data["anatomy"] # create representation for every collected sequence for collection in collections: @@ -435,6 +473,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if bake_render_path: preview = False + staging = os.path.dirname(list(collection)[0]) + success, rootless_staging_dir = ( + anatomy.roots_obj.find_root_template_from_path(staging) + ) + if success: + staging = rootless_staging_dir + else: + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(staging)) + rep = { "name": ext, "ext": ext, @@ -442,7 +492,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "frameStart": int(instance.get("frameStartHandle")), "frameEnd": int(instance.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames - "stagingDir": os.path.dirname(list(collection)[0]), + "stagingDir": staging, "anatomy_template": "render", "fps": instance.get("fps"), "tags": ["review", "preview"] if preview else [], @@ -458,6 +508,19 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # add reminders as representations for remainder in remainders: ext = remainder.split(".")[-1] + + staging = os.path.dirname(remainder) + success, rootless_staging_dir = ( + anatomy.roots_obj.find_root_template_from_path(staging) + ) + if success: + staging = rootless_staging_dir + else: + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(staging)) + rep = { "name": ext, "ext": ext, @@ -490,7 +553,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): instance["families"] = families def process(self, instance): - """ + """Process plugin. + Detect type of renderfarm submission and create and post dependend job in case of Deadline. It creates json file with metadata needed for publishing in directory of render. @@ -631,6 +695,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): ) if success: repre["stagingDir"] = rootless_staging_dir + else: + self.log.warning(( + "Could not find root path for remapping \"{}\"." + " This may cause issues on farm." + ).format(staging_dir)) + repre["stagingDir"] = staging_dir if "publish_on_farm" in repre.get("tags"): # create representations attribute of not there @@ -774,12 +844,21 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): with open(metadata_path, "w") as f: json.dump(publish_job, f, indent=4, sort_keys=True) - def _extend_frames(self, asset, subset, start, end, override): - """ - This will get latest version of asset and update frame range based - on minimum and maximuma values - """ + def _extend_frames(self, asset, subset, start, end): + """Get latest version of asset nad update frame range. + Based on minimum and maximuma values. + + Arguments: + asset (str): asset name + subset (str): subset name + start (int): start frame + end (int): end frame + + Returns: + (int, int): upddate frame start/end + + """ # Frame comparison prev_start = None prev_end = None diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index 7547f34ba1..239bad8f83 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -1,6 +1,17 @@ +# -*- coding: utf-8 -*- +"""Submitting render job to Deadline. + +This module is taking care of submitting job from Maya to Deadline. It +creates job and set correct environments. Its behavior is controlled by +`DEADLINE_REST_URL` environment variable - pointing to Deadline Web Service +and `MayaSubmitDeadline.use_published (bool)` property telling Deadline to +use published scene workfile or not. +""" + import os import json import getpass +import re import clique from maya import cmds @@ -14,7 +25,7 @@ import pype.maya.lib as lib def get_renderer_variables(renderlayer=None): - """Retrieve the extension which has been set in the VRay settings + """Retrieve the extension which has been set in the VRay settings. Will return None if the current renderer is not VRay For Maya 2016.5 and up the renderSetup creates renderSetupLayer node which @@ -25,8 +36,8 @@ def get_renderer_variables(renderlayer=None): Returns: dict - """ + """ renderer = lib.get_renderer(renderlayer or lib.get_current_renderlayer()) render_attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS["default"]) @@ -34,7 +45,7 @@ def get_renderer_variables(renderlayer=None): render_attrs["padding"])) filename_0 = cmds.renderSettings(fullPath=True, firstImageName=True)[0] - + prefix_attr = "defaultRenderGlobals.imageFilePrefix" if renderer == "vray": # Maya's renderSettings function does not return V-Ray file extension # so we get the extension from vraySettings @@ -46,62 +57,33 @@ def get_renderer_variables(renderlayer=None): if extension is None: extension = "png" - filename_prefix = "/_/" + if extension == "exr (multichannel)" or extension == "exr (deep)": + extension = "exr" + + prefix_attr = "vraySettings.fileNamePrefix" + elif renderer == "renderman": + prefix_attr = "rmanGlobals.imageFileFormat" + elif renderer == "redshift": + # mapping redshift extension dropdown values to strings + ext_mapping = ["iff", "exr", "tif", "png", "tga", "jpg"] + extension = ext_mapping[ + cmds.getAttr("redshiftOptions.imageFormat") + ] else: # Get the extension, getAttr defaultRenderGlobals.imageFormat # returns an index number. filename_base = os.path.basename(filename_0) extension = os.path.splitext(filename_base)[-1].strip(".") - filename_prefix = cmds.getAttr("defaultRenderGlobals.imageFilePrefix") + filename_prefix = cmds.getAttr(prefix_attr) return {"ext": extension, "filename_prefix": filename_prefix, "padding": padding, "filename_0": filename_0} -def preview_fname(folder, scene, layer, padding, ext): - """Return output file path with #### for padding. - - Deadline requires the path to be formatted with # in place of numbers. - For example `/path/to/render.####.png` - - Args: - folder (str): The root output folder (image path) - scene (str): The scene name - layer (str): The layer name to be rendered - padding (int): The padding length - ext(str): The output file extension - - Returns: - str - - """ - - fileprefix = cmds.getAttr("defaultRenderGlobals.imageFilePrefix") - output = fileprefix + ".{number}.{ext}" - # RenderPass is currently hardcoded to "beauty" because its not important - # for the deadline submission, but we will need something to replace - # "". - mapping = { - "": "{scene}", - "": "{layer}", - "RenderPass": "beauty" - } - for key, value in mapping.items(): - output = output.replace(key, value) - output = output.format( - scene=scene, - layer=layer, - number="#" * padding, - ext=ext - ) - - return os.path.join(folder, output) - - class MayaSubmitDeadline(pyblish.api.InstancePlugin): - """Submit available render layers to Deadline + """Submit available render layers to Deadline. Renders are submitted to a Deadline Web Service as supplied via the environment variable DEADLINE_REST_URL @@ -194,22 +176,17 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): filename = os.path.basename(filepath) comment = context.data.get("comment", "") - scene = os.path.splitext(filename)[0] dirname = os.path.join(workspace, "renders") renderlayer = instance.data['setMembers'] # rs_beauty - renderlayer_name = instance.data['subset'] # beauty - # renderlayer_globals = instance.data["renderGlobals"] - # legacy_layers = renderlayer_globals["UseLegacyRenderLayers"] deadline_user = context.data.get("deadlineUser", getpass.getuser()) jobname = "%s - %s" % (filename, instance.name) # Get the variables depending on the renderer render_variables = get_renderer_variables(renderlayer) - output_filename_0 = preview_fname(folder=dirname, - scene=scene, - layer=renderlayer_name, - padding=render_variables["padding"], - ext=render_variables["ext"]) + output_filename_0 = re.sub( + "(/d+{{{}}})".format(render_variables["padding"]), + "#" * render_variables["padding"], + render_variables["filename_0"]) try: # Ensure render folder exists @@ -284,7 +261,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): for aov, files in exp[0].items(): col = clique.assemble(files)[0][0] outputFile = col.format('{head}{padding}{tail}') - payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile + payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile # noqa: E501 OutputFilenames[expIndex] = outputFile expIndex += 1 else: @@ -293,7 +270,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile # OutputFilenames[expIndex] = outputFile - # We need those to pass them to pype for it to set correct context keys = [ "FTRACK_API_KEY", @@ -334,7 +310,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): raise Exception(response.text) # Store output dir for unified publisher (filesequence) - instance.data["outputDir"] = os.path.dirname(output_filename_0) + instance.data["outputDir"] = os.path.dirname( + render_variables["filename_0"]) instance.data["deadlineSubmissionJob"] = response.json() def preflight_check(self, instance): From 9a7bd4aa608f31b78cc1a0c84970626aebaab331 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Thu, 7 May 2020 15:22:08 +0200 Subject: [PATCH 119/207] enclose paths in double quotes --- pype/scripts/publish_filesequence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py index 8b99d0560f..905c6b99ba 100644 --- a/pype/scripts/publish_filesequence.py +++ b/pype/scripts/publish_filesequence.py @@ -80,7 +80,7 @@ def __main__(): args = [ os.path.join(pype_root, pype_command), "publish", - " ".join(paths) + " ".join(['"{}"'.format(p) for p in paths]) ] print("Pype command: {}".format(" ".join(args))) From 88124a25891f63c8960f2583ff70b5bf0cb5cc10 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 7 May 2020 15:50:59 +0200 Subject: [PATCH 120/207] ContextCustomAttributeValue is used for querying hierarchical custom attribute values for better stability --- pype/ftrack/lib/avalon_sync.py | 79 +++++++++++++++++++++------------- 1 file changed, 48 insertions(+), 31 deletions(-) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index e915e86184..be9dfd842d 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -692,7 +692,6 @@ class SyncEntitiesFactory: ent_type["name"]: ent_type["id"] for ent_type in ent_types } - attrs = set() # store default values per entity type attrs_per_entity_type = collections.defaultdict(dict) avalon_attrs = collections.defaultdict(dict) @@ -700,9 +699,10 @@ class SyncEntitiesFactory: attrs_per_entity_type_ca_id = collections.defaultdict(dict) avalon_attrs_ca_id = collections.defaultdict(dict) + attribute_key_by_id = {} for cust_attr in custom_attrs: key = cust_attr["key"] - attrs.add(cust_attr["id"]) + attribute_key_by_id[cust_attr["id"]] = key ca_ent_type = cust_attr["entity_type"] if key.startswith("avalon_"): if ca_ent_type == "show": @@ -776,7 +776,7 @@ class SyncEntitiesFactory: "\"{}\"".format(id) for id in sync_ids ]) attributes_joined = ", ".join([ - "\"{}\"".format(name) for name in attrs + "\"{}\"".format(attr_id) for attr_id in attribute_key_by_id.keys() ]) cust_attr_query = ( @@ -794,13 +794,13 @@ class SyncEntitiesFactory: else: [values] = self.session._call(call_expr) - for value in values["data"]: - entity_id = value["entity_id"] - key = value["configuration"]["key"] + for item in values["data"]: + entity_id = item["entity_id"] + key = attribute_key_by_id[item["configuration_id"]] store_key = "custom_attributes" if key.startswith("avalon_"): store_key = "avalon_attrs" - self.entities_dict[entity_id][store_key][key] = value["value"] + self.entities_dict[entity_id][store_key][key] = item["value"] # process hierarchical attributes self.set_hierarchical_attribute(hier_attrs, sync_ids) @@ -824,6 +824,21 @@ class SyncEntitiesFactory: attr["default"] ) + # Add attribute ids to entities dictionary + avalon_attribute_id_by_key = { + attr_key: attr_id + for attr_id, attr_key in attribute_key_by_id.items() + if attr_key.startswith("avalon_") + } + for entity_id, entity_dict in self.entities_dict.items(): + if "avalon_attrs_id" not in self.entities_dict[entity_id]: + self.entities_dict[entity_id]["avalon_attrs_id"] = {} + + for attr_key, attr_id in avalon_attribute_id_by_key.items(): + self.entities_dict[entity_id]["avalon_attrs_id"][attr_key] = ( + attr_id + ) + # Prepare dict with all hier keys and None values prepare_dict = {} prepare_dict_avalon = {} @@ -845,32 +860,34 @@ class SyncEntitiesFactory: entity_ids_joined = ", ".join([ "\"{}\"".format(id) for id in sync_ids ]) - + attributes_joined = ", ".join([ + "\"{}\"".format(attr_id) for attr_id in attribute_key_by_id.keys() + ]) avalon_hier = [] - for configuration_id in attribute_key_by_id.keys(): - call_expr = [{ - "action": "query", - "expression": ( - "select value, entity_id from CustomAttributeValue " - "where entity_id in ({}) and configuration_id is \"{}\"" - ).format(entity_ids_joined, configuration_id) - }] - if hasattr(self.session, "call"): - [values] = self.session.call(call_expr) - else: - [values] = self.session._call(call_expr) + call_expr = [{ + "action": "query", + "expression": ( + "select value, entity_id from ContextCustomAttributeValue " + "where entity_id in ({}) and configuration_id in ({})" + ).format(entity_ids_joined, attributes_joined) + }] + if hasattr(self.session, "call"): + [values] = self.session.call(call_expr) + else: + [values] = self.session._call(call_expr) - for value in values["data"]: - if value["value"] is None: - continue - entity_id = value["entity_id"] - key = attribute_key_by_id[value["configuration_id"]] - if key.startswith("avalon_"): - store_key = "avalon_attrs" - avalon_hier.append(key) - else: - store_key = "hier_attrs" - self.entities_dict[entity_id][store_key][key] = value["value"] + for item in values["data"]: + value = item["value"] + if value is None: + continue + entity_id = item["entity_id"] + key = attribute_key_by_id[item["configuration_id"]] + if key.startswith("avalon_"): + store_key = "avalon_attrs" + avalon_hier.append(key) + else: + store_key = "hier_attrs" + self.entities_dict[entity_id][store_key][key] = value # Get dictionary with not None hierarchical values to pull to childs top_id = self.ft_project_id From aff69af21bb9eb5803dc5972544ca1430dee3f6c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 7 May 2020 15:53:27 +0200 Subject: [PATCH 121/207] removed unused variable --- pype/ftrack/lib/avalon_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index be9dfd842d..293b2d0049 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -830,7 +830,7 @@ class SyncEntitiesFactory: for attr_id, attr_key in attribute_key_by_id.items() if attr_key.startswith("avalon_") } - for entity_id, entity_dict in self.entities_dict.items(): + for entity_id in self.entities_dict.keys(): if "avalon_attrs_id" not in self.entities_dict[entity_id]: self.entities_dict[entity_id]["avalon_attrs_id"] = {} From 5c0c6374627762d858294cb8cad3102e5154365b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 7 May 2020 16:50:01 +0200 Subject: [PATCH 122/207] collectors order, output path naming --- .../global/publish/collect_anatomy_object.py | 8 +++---- .../global/publish/collect_avalon_entities.py | 2 +- .../global/publish/collect_rendered_files.py | 3 ++- .../global/publish/submit_publish_job.py | 21 +++++++------------ .../maya/publish/submit_maya_deadline.py | 12 ++++++----- 5 files changed, 22 insertions(+), 24 deletions(-) diff --git a/pype/plugins/global/publish/collect_anatomy_object.py b/pype/plugins/global/publish/collect_anatomy_object.py index d9e6964050..22d924c88b 100644 --- a/pype/plugins/global/publish/collect_anatomy_object.py +++ b/pype/plugins/global/publish/collect_anatomy_object.py @@ -6,7 +6,7 @@ Requires: Provides: context -> anatomy (pypeapp.Anatomy) """ - +import os from avalon import io from pypeapp import Anatomy import pyblish.api @@ -15,12 +15,12 @@ import pyblish.api class CollectAnatomyObject(pyblish.api.ContextPlugin): """Collect Anatomy object into Context""" - order = pyblish.api.CollectorOrder - 0.11 + order = pyblish.api.CollectorOrder - 0.4 label = "Collect Anatomy Object" def process(self, context): - io.install() - project_name = io.Session.get("AVALON_PROJECT") + # io.install() + project_name = os.environ.get("AVALON_PROJECT") if project_name is None: raise AssertionError( "Environment `AVALON_PROJECT` is not set." diff --git a/pype/plugins/global/publish/collect_avalon_entities.py b/pype/plugins/global/publish/collect_avalon_entities.py index 53f11aa693..a92392a2dc 100644 --- a/pype/plugins/global/publish/collect_avalon_entities.py +++ b/pype/plugins/global/publish/collect_avalon_entities.py @@ -15,7 +15,7 @@ import pyblish.api class CollectAvalonEntities(pyblish.api.ContextPlugin): """Collect Anatomy into Context""" - order = pyblish.api.CollectorOrder - 0.02 + order = pyblish.api.CollectorOrder - 0.1 label = "Collect Avalon Entities" def process(self, context): diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index 2ae3f0e3a3..93bf8c484f 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -20,13 +20,14 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): `PYPE_PUBLISH_DATA`. Those files _MUST_ share same context. """ - order = pyblish.api.CollectorOrder - 0.1 + order = pyblish.api.CollectorOrder - 0.2 targets = ["filesequence"] label = "Collect rendered frames" _context = None def _load_json(self, path): + path = path.strip('\"') assert os.path.isfile(path), ( "Path to json file doesn't exist. \"{}\"".format(path) ) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 8688d161e2..77b8022000 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -197,9 +197,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): output_dir = instance.data["outputDir"] # Convert output dir to `{root}/rest/of/path/...` with Anatomy - anatomy = instance.context.data["anatomy"] success, rootless_path = ( - anatomy.roots_obj.find_root_template_from_path(output_dir) + self.anatomy.roots_obj.find_root_template_from_path(output_dir) ) if not success: # `rootless_path` is not set to `output_dir` if none of roots match @@ -279,7 +278,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ import speedcopy - anatomy = instance.context.data["anatomy"] self.log.info("Preparing to copy ...") start = instance.data.get("startFrame") end = instance.data.get("endFrame") @@ -318,7 +316,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): assert fn is not None, "padding string wasn't found" # list of tuples (source, destination) staging = representation.get("stagingDir") - staging = anatomy.fill_roots(staging) + staging = self.anatomy.fill_roots(staging) resource_files.append( (frame, os.path.join(staging, @@ -377,11 +375,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): subset_name = '{}_{}'.format(group_name, aov) - anatomy = instance_data.context.data["anatomy"] - staging = os.path.dirname(list(cols[0])[0]) success, rootless_staging_dir = ( - anatomy.roots_obj.find_root_template_from_path(staging) + self.anatomy.roots_obj.find_root_template_from_path(staging) ) if success: staging = rootless_staging_dir @@ -451,7 +447,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): representations = [] collections, remainders = clique.assemble(exp_files) bake_render_path = instance.get("bakeRenderPath") - anatomy = instance.context.data["anatomy"] # create representation for every collected sequence for collection in collections: @@ -475,7 +470,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): staging = os.path.dirname(list(collection)[0]) success, rootless_staging_dir = ( - anatomy.roots_obj.find_root_template_from_path(staging) + self.anatomy.roots_obj.find_root_template_from_path(staging) ) if success: staging = rootless_staging_dir @@ -511,7 +506,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): staging = os.path.dirname(remainder) success, rootless_staging_dir = ( - anatomy.roots_obj.find_root_template_from_path(staging) + self.anatomy.roots_obj.find_root_template_from_path(staging) ) if success: staging = rootless_staging_dir @@ -565,6 +560,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): data = instance.data.copy() context = instance.context self.context = context + self.anatomy = instance.context.data["anatomy"] if hasattr(instance, "_log"): data['_log'] = instance._log @@ -624,9 +620,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): except KeyError: source = context.data["currentFile"] - anatomy = instance.context.data["anatomy"] success, rootless_path = ( - anatomy.roots_obj.find_root_template_from_path(source) + self.anatomy.roots_obj.find_root_template_from_path(source) ) if success: source = rootless_path @@ -691,7 +686,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): staging_dir = repre.get("stagingDir") if staging_dir: success, rootless_staging_dir = ( - anatomy.roots_obj.find_root_template_from_path(staging_dir) + self.anatomy.roots_obj.find_root_template_from_path(staging_dir) ) if success: repre["stagingDir"] = rootless_staging_dir diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index 239bad8f83..404ef3de0c 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -44,7 +44,12 @@ def get_renderer_variables(renderlayer=None): padding = cmds.getAttr("{}.{}".format(render_attrs["node"], render_attrs["padding"])) - filename_0 = cmds.renderSettings(fullPath=True, firstImageName=True)[0] + filename_0 = cmds.renderSettings( + fullPath=True, + gin="#" * int(padding), + lut=True, + layer=renderlayer or lib.get_current_renderlayer())[0] + filename_0 = filename_0.replace('_', '_beauty') prefix_attr = "defaultRenderGlobals.imageFilePrefix" if renderer == "vray": # Maya's renderSettings function does not return V-Ray file extension @@ -183,10 +188,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # Get the variables depending on the renderer render_variables = get_renderer_variables(renderlayer) - output_filename_0 = re.sub( - "(/d+{{{}}})".format(render_variables["padding"]), - "#" * render_variables["padding"], - render_variables["filename_0"]) + output_filename_0 = render_variables["filename_0"] try: # Ensure render folder exists From 379554c66f81aad68a2bfcc94413d0db97d7d4fb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 7 May 2020 16:57:55 +0200 Subject: [PATCH 123/207] replaced remapping of root with filling root in collect rendered files --- .../global/publish/collect_rendered_files.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index 93bf8c484f..4ce2d448b9 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -42,15 +42,10 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): ) return data - def _remap_staging_dir(self, data_object, anatomy): + def _fill_staging_dir(self, data_object, anatomy): staging_dir = data_object.get("stagingDir") if staging_dir: - remapped = anatomy.roots_obj.path_remapper(staging_dir) - if remapped: - data_object["stagingDir"] = remapped - self.log.debug(( - "stagingDir was remapped. To: \"{}\" From: \"{}\"" - ).format(remapped, staging_dir)) + data_object["stagingDir"] = anatomy.fill_root(staging_dir) def _process_path(self, data, anatomy): # validate basic necessary data @@ -94,12 +89,12 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): ) self.log.info("Filling stagignDir...") - self._remap_staging_dir(instance_data, anatomy) + self._fill_staging_dir(instance_data, anatomy) instance.data.update(instance_data) representations = [] for repre_data in instance_data.get("representations") or []: - self._remap_staging_dir(repre_data, anatomy) + self._fill_staging_dir(repre_data, anatomy) representations.append(repre_data) instance.data["representations"] = representations From de7fe115320ae2ed43fe2c334acf824a8613241c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 7 May 2020 16:58:08 +0200 Subject: [PATCH 124/207] cleanup commit --- pype/plugins/global/publish/collect_anatomy_object.py | 2 -- pype/plugins/global/publish/submit_publish_job.py | 4 +++- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/collect_anatomy_object.py b/pype/plugins/global/publish/collect_anatomy_object.py index 22d924c88b..8c01ea5c44 100644 --- a/pype/plugins/global/publish/collect_anatomy_object.py +++ b/pype/plugins/global/publish/collect_anatomy_object.py @@ -7,7 +7,6 @@ Provides: context -> anatomy (pypeapp.Anatomy) """ import os -from avalon import io from pypeapp import Anatomy import pyblish.api @@ -19,7 +18,6 @@ class CollectAnatomyObject(pyblish.api.ContextPlugin): label = "Collect Anatomy Object" def process(self, context): - # io.install() project_name = os.environ.get("AVALON_PROJECT") if project_name is None: raise AssertionError( diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 77b8022000..372373db61 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -686,7 +686,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): staging_dir = repre.get("stagingDir") if staging_dir: success, rootless_staging_dir = ( - self.anatomy.roots_obj.find_root_template_from_path(staging_dir) + self.anatomy.roots_obj.find_root_template_from_path( + staging_dir + ) ) if success: repre["stagingDir"] = rootless_staging_dir From 3b3f0a965c5643940af48b233df1cbde6ca35880 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Thu, 7 May 2020 17:03:28 +0200 Subject: [PATCH 125/207] handle published scene names --- pype/plugins/maya/publish/submit_maya_deadline.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index 404ef3de0c..7d6437b81d 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -188,7 +188,15 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # Get the variables depending on the renderer render_variables = get_renderer_variables(renderlayer) - output_filename_0 = render_variables["filename_0"] + filename_0 = render_variables["filename_0"] + if self.use_published: + new_scene = os.path.splitext(filename)[0] + orig_scene = os.path.splitext( + os.path.basename(context.data["currentFile"]))[0] + filename_0 = render_variables["filename_0"].replace( + orig_scene, new_scene) + + output_filename_0 = filename_0 try: # Ensure render folder exists @@ -312,8 +320,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): raise Exception(response.text) # Store output dir for unified publisher (filesequence) - instance.data["outputDir"] = os.path.dirname( - render_variables["filename_0"]) + instance.data["outputDir"] = os.path.dirname(filename_0) instance.data["deadlineSubmissionJob"] = response.json() def preflight_check(self, instance): From 6bb0caa15723d83a5b3308b8c7b8cfdb71537a38 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 7 May 2020 19:13:41 +0200 Subject: [PATCH 126/207] removed unnecessary paths --- .../global/publish/collect_rendered_files.py | 39 +++++++++++-------- .../global/publish/submit_publish_job.py | 3 -- .../nuke/publish/submit_nuke_deadline.py | 2 +- 3 files changed, 23 insertions(+), 21 deletions(-) diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index 4ce2d448b9..5229cd9705 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -119,21 +119,26 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): )) anatomy = context.data["anatomy"] - session_is_set = False - for path in paths: - path = anatomy.fill_root(path) - data = self._load_json(path) - assert data, "failed to load json file" - if not session_is_set: - session_data = data["session"] - remapped = anatomy.roots_obj.path_remapper( - session_data["AVALON_WORKDIR"] - ) - if remapped: - session_data["AVALON_WORKDIR"] = remapped + self.log.info("anatomy: {}".format(anatomy.roots)) + try: + session_is_set = False + for path in paths: + path = anatomy.fill_root(path) + data = self._load_json(path) + assert data, "failed to load json file" + if not session_is_set: + session_data = data["session"] + remapped = anatomy.roots_obj.path_remapper( + session_data["AVALON_WORKDIR"] + ) + if remapped: + session_data["AVALON_WORKDIR"] = remapped - self.log.info("Setting session using data from file") - api.Session.update(session_data) - os.environ.update(session_data) - session_is_set = True - self._process_path(data, anatomy) + self.log.info("Setting session using data from file") + api.Session.update(session_data) + os.environ.update(session_data) + session_is_set = True + self._process_path(data, anatomy) + except Exception as e: + self.log.error(e, exc_info=True) + raise Exception("Error") from e diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 372373db61..e0bd2c6ec0 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -152,12 +152,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): aov_filter = {"maya": ["beauty"]} enviro_filter = [ - "PATH", - "PYTHONPATH", "FTRACK_API_USER", "FTRACK_API_KEY", "FTRACK_SERVER", - "PYPE_SETUP_PATH", "PYPE_METADATA_FILE", "AVALON_PROJECT", "PYPE_LOG_NO_COLORS" diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index 4552d320d6..e41eba3ad7 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -201,7 +201,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): if path.lower().startswith('pype_'): environment[path] = os.environ[path] - environment["PATH"] = os.environ["PATH"] + # environment["PATH"] = os.environ["PATH"] # self.log.debug("enviro: {}".format(environment['PYPE_SCRIPTS'])) clean_environment = {} for key, value in environment.items(): From fdbb6a306efb537f03d58ed3cd78a7ce58d0b269 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 7 May 2020 19:47:28 +0200 Subject: [PATCH 127/207] fix(nks): duplicity in hierarchy tag and not matching names clips --- .../nukestudio/publish/collect_clips.py | 77 ++++++++++++------- .../publish/collect_hierarchy_context.py | 37 +++++++-- 2 files changed, 81 insertions(+), 33 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 746df67485..0e92193b14 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -47,11 +47,32 @@ class CollectClips(api.ContextPlugin): track = item.parent() source = item.source().mediaSource() source_path = source.firstpath() + clip_in = int(item.timelineIn()) + clip_out = int(item.timelineOut()) file_head = source.filenameHead() file_info = next((f for f in source.fileinfos()), None) source_first_frame = file_info.startFrame() is_sequence = False + self.log.debug( + "__ assets_shared: {}".format(context.data["assetsShared"])) + match_range = next( + (k for k, v in context.data["assetsShared"].items() + if (v.get("_clipIn", 0) == clip_in) + and (v.get("_clipOut", 0) == clip_out) + ), False) + + if asset in str(match_range): + match_range = False + + assert (not match_range), ( + "matching clip: {asset}" + " timeline range ({clip_in}:{clip_out})" + " conflicting with {match_range}" + " >> rename any of clips to be the same as the other <<" + ).format( + **locals()) + if not source.singleFile(): self.log.info("Single file") is_sequence = True @@ -89,32 +110,31 @@ class CollectClips(api.ContextPlugin): ) data.update({ - "name": "{0}_{1}".format(track.name(), item.name()), - "item": item, - "source": source, - "timecodeStart": str(source.timecodeStart()), - "timelineTimecodeStart": str(sequence.timecodeStart()), - "sourcePath": source_path, - "sourceFileHead": file_head, - "isSequence": is_sequence, - "track": track.name(), - "trackIndex": track_index, - "sourceFirst": source_first_frame, - "effects": effects, - "sourceIn": int(item.sourceIn()), - "sourceOut": int(item.sourceOut()), - "mediaDuration": (int(item.sourceOut()) - - int(item.sourceIn())) + 1, - "clipIn": int(item.timelineIn()), - "clipOut": int(item.timelineOut()), - "clipDuration": ( - int(item.timelineOut()) - int( - item.timelineIn())) + 1, - "asset": asset, - "family": "clip", - "families": [], - "handleStart": projectdata.get("handleStart", 0), - "handleEnd": projectdata.get("handleEnd", 0)}) + "name": "{0}_{1}".format(track.name(), item.name()), + "item": item, + "source": source, + "timecodeStart": str(source.timecodeStart()), + "timelineTimecodeStart": str(sequence.timecodeStart()), + "sourcePath": source_path, + "sourceFileHead": file_head, + "isSequence": is_sequence, + "track": track.name(), + "trackIndex": track_index, + "sourceFirst": source_first_frame, + "effects": effects, + "sourceIn": int(item.sourceIn()), + "sourceOut": int(item.sourceOut()), + "mediaDuration": source.duration(), + "clipIn": clip_in, + "clipOut": clip_out, + "clipDuration": ( + int(item.timelineOut()) - int( + item.timelineIn())) + 1, + "asset": asset, + "family": "clip", + "families": [], + "handleStart": projectdata.get("handleStart", 0), + "handleEnd": projectdata.get("handleEnd", 0)}) instance = context.create_instance(**data) @@ -122,7 +142,10 @@ class CollectClips(api.ContextPlugin): self.log.info("Created instance.data: {}".format(instance.data)) self.log.debug(">> effects: {}".format(instance.data["effects"])) - context.data["assetsShared"][asset] = dict() + context.data["assetsShared"][asset] = { + "_clipIn": clip_in, + "_clipOut": clip_out + } # from now we are collecting only subtrackitems on # track with no video items diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 5bc9bea7dd..a46baabe1b 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -37,11 +37,13 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): assets_shared = context.data.get("assetsShared") tags = instance.data.get("tags", None) clip = instance.data["item"] - asset = instance.data.get("asset") + asset = instance.data["asset"] sequence = context.data['activeSequence'] width = int(sequence.format().width()) height = int(sequence.format().height()) pixel_aspect = sequence.format().pixelAspect() + clip_in = instance.data["clipIn"] + clip_out = instance.data["clipOut"] fps = context.data["fps"] # build data for inner nukestudio project property @@ -72,6 +74,24 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): # and finding only hierarchical tag if "hierarchy" in t_type.lower(): + match = next( + (k for k, v in assets_shared.items() + if (v["_clipIn"] == clip_in) + and (v["_clipOut"] == clip_out) + ), False) + self.log.warning("Clip matching name: {}".format(match)) + self.log.debug( + "__ assets_shared[match]: {}".format( + assets_shared[match])) + # check if hierarchy key is in match + if not assets_shared[match].get("hierarchy"): + match = False + assert not match, ( + "Two clips above each other with" + " hierarchy tag are not allowed" + " >> keep hierarchy tag only in one of them <<" + ) + d_metadata = dict() parents = list() @@ -82,7 +102,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): if "shot" in template.lower(): instance.data["asset"] = [ t for t in template.split('/')][-1] - template = "/".join([t for t in template.split('/')][0:-1]) + template = "/".join( + [t for t in template.split('/')][0:-1]) # take template from Tag.note and break it into parts template_split = template.split("/") @@ -149,8 +170,12 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): instance.data["hierarchy"] = hierarchy instance.data["parents"] = parents + self.log.info( + "clip: {asset}[{clip_in}:{clip_out}]".format( + **locals())) # adding to asset shared dict - self.log.debug("__ assets_shared: {}".format(assets_shared)) + self.log.debug( + "__ assets_shared: {}".format(assets_shared)) if assets_shared.get(asset): self.log.debug("Adding to shared assets: `{}`".format( asset)) @@ -166,7 +191,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): "resolutionHeight": height, "pixelAspect": pixel_aspect, "fps": fps, - "tasks": instance.data["tasks"] + "tasks": instance.data["tasks"] }) # adding frame start if any on instance @@ -175,8 +200,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): asset_shared.update({ "startingFrame": start_frame }) - - + self.log.debug( + "assets_shared: {assets_shared}".format(**locals())) class CollectHierarchyContext(pyblish.api.ContextPlugin): '''Collecting Hierarchy from instaces and building From 284534d02cb378b15bafe8e4e3a8d1263839b2de Mon Sep 17 00:00:00 2001 From: Toke Stuart Jepsen Date: Fri, 8 May 2020 17:23:05 +0100 Subject: [PATCH 128/207] Nuke file knob was not updating. The file knob was referencing the existing containers file path and not updating to the new representation path. --- pype/plugins/nuke/load/load_sequence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 083cc86474..1ee8f0481e 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -237,7 +237,7 @@ class LoadSequence(api.Loader): repr_cont = representation["context"] - file = self.fname + file = api.get_representation_path(representation) if not file: repr_id = representation["_id"] From 55c3be323f7aa106db3ccf60acd6e6eb57b39c2b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 11 May 2020 15:56:48 +0200 Subject: [PATCH 129/207] added docstring to action_create_project_structure --- .../action_create_project_structure.py | 41 +++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/pype/ftrack/actions/action_create_project_structure.py b/pype/ftrack/actions/action_create_project_structure.py index d418a2e623..e1c5b6b837 100644 --- a/pype/ftrack/actions/action_create_project_structure.py +++ b/pype/ftrack/actions/action_create_project_structure.py @@ -6,6 +6,47 @@ from pypeapp import config, Anatomy class CreateProjectFolders(BaseAction): + """Action create folder structure and may create hierarchy in Ftrack. + + Creation of folder structure and hierarchy in Ftrack is based on presets. + These presets are located in: + `~/pype-config/presets/tools/project_folder_structure.json` + + Example of content: + ```json + { + "__project_root__": { + "prod" : {}, + "resources" : { + "footage": { + "plates": {}, + "offline": {} + }, + "audio": {}, + "art_dept": {} + }, + "editorial" : {}, + "assets[ftrack.Library]": { + "characters[ftrack]": {}, + "locations[ftrack]": {} + }, + "shots[ftrack.Sequence]": { + "scripts": {}, + "editorial[ftrack.Folder]": {} + } + } + } + ``` + Key "__project_root__" indicates root folder (or entity). Each key in + dictionary represents folder name. Value may contain another dictionary + with subfolders. + + Identifier `[ftrack]` in name says that this should be also created in + Ftrack hierarchy. It is possible to specify entity type of item with "." . + If key is `assets[ftrack.Library]` then in ftrack will be created entity + with name "assets" and entity type "Library". It is expected Library entity + type exist in Ftrack. + """ identifier = "create.project.structure" label = "Create Project Structure" From d9138da2b3d91eb9b98edf78544f7fd1e5e2e205 Mon Sep 17 00:00:00 2001 From: Toke Stuart Jepsen Date: Mon, 11 May 2020 15:11:21 +0100 Subject: [PATCH 130/207] AOV toggling was not collected correctly. Attribute overrides for AOV only works for the legacy render layers, because the render setup do not connect to the attributes. --- pype/plugins/maya/publish/collect_render.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 456b584191..dbc0594c7c 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -122,6 +122,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): workspace = context.data["workspaceDir"] self._rs = renderSetup.instance() + current_layer = self._rs.getVisibleRenderLayer() maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()} self.maya_layers = maya_render_layers @@ -306,6 +307,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin): instance.data.update(data) self.log.debug("data: {}".format(json.dumps(data, indent=4))) + # Restore current layer. + self.log.info("Restoring to {}".format(current_layer.name())) + self._rs.switchToLayer(current_layer) + def parse_options(self, render_globals): """Get all overrides with a value, skip those without @@ -400,6 +405,8 @@ class ExpectedFiles: multipart = False def get(self, renderer, layer): + renderSetup.instance().switchToLayerUsingLegacyName(layer) + if renderer.lower() == "arnold": return self._get_files(ExpectedFilesArnold(layer)) elif renderer.lower() == "vray": From 6b47ec9b2c00eb45d38c489930a15f9060872206 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 11 May 2020 16:37:00 +0200 Subject: [PATCH 131/207] Removed publish method from avalon_apps rest api --- pype/avalon_apps/rest_api.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/pype/avalon_apps/rest_api.py b/pype/avalon_apps/rest_api.py index 1267ee3992..a552d70907 100644 --- a/pype/avalon_apps/rest_api.py +++ b/pype/avalon_apps/rest_api.py @@ -70,24 +70,6 @@ class AvalonRestApi(RestApi): _asset, identificator, _project_name )) - @RestApi.route("/publish/", - url_prefix="/premiere", methods="GET") - def publish(self, request): - """ - http://localhost:8021/premiere/publish/shot021?json_in=this/path/file_in.json&json_out=this/path/file_out.json - """ - asset_name = request.url_data["asset_name"] - query = request.query - data = request.request_data - - output = { - "message": "Got your data. Thanks.", - "your_data": data, - "your_query": query, - "your_asset_is": asset_name - } - return CallbackResult(data=self.result_to_json(output)) - def result_to_json(self, result): """ Converts result of MongoDB query to dict without $oid (ObjectId) keys with help of regex matching. From 165177834d41656ba6436b67d472548cad1dc4fe Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 11 May 2020 16:38:34 +0200 Subject: [PATCH 132/207] rest api handler does not log 4 error records at once --- pype/services/rest_api/lib/handler.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/pype/services/rest_api/lib/handler.py b/pype/services/rest_api/lib/handler.py index dc94808237..fc4410a0bc 100644 --- a/pype/services/rest_api/lib/handler.py +++ b/pype/services/rest_api/lib/handler.py @@ -255,10 +255,9 @@ class Handler(http.server.SimpleHTTPRequestHandler): try: in_data = json.loads(in_data_str) except Exception as e: - log.error("Invalid JSON recieved:") - log.error("-" * 80) - log.error(in_data_str) - log.error("-" * 80) + log.error("Invalid JSON recieved: \"{}\"".format( + str(in_data_str) + )) raise Exception("Invalid JSON recieved") from e request_info = RequestInfo( From 5ad17e5ac9384b7ba63e70ad581699938c309e01 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 11 May 2020 16:43:36 +0200 Subject: [PATCH 133/207] fixed store thumbnails action --- pype/ftrack/actions/action_store_thumbnails_to_avalon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py index 5e601b2150..c95010c5ce 100644 --- a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py +++ b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py @@ -52,7 +52,7 @@ class StoreThumbnailsToAvalon(BaseAction): }) session.commit() - project = get_project_from_entity(entities[0]) + project = self.get_project_from_entity(entities[0]) project_name = project["full_name"] anatomy = Anatomy(project_name) From d4899b69dc90f52901720e66562ac07f4621d43e Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 11 May 2020 19:27:58 +0200 Subject: [PATCH 134/207] feat(nks): adding empty black frames at handle start if missing --- .../publish/collect_clip_resolution.py | 21 +++++ .../nukestudio/publish/collect_clips.py | 2 +- .../publish/collect_hierarchy_context.py | 10 +-- .../nukestudio/publish/collect_plates.py | 10 +-- .../nukestudio/publish/collect_reviews.py | 12 ++- .../publish/extract_review_cutup_video.py | 87 ++++++++++++++++--- 6 files changed, 119 insertions(+), 23 deletions(-) create mode 100644 pype/plugins/nukestudio/publish/collect_clip_resolution.py diff --git a/pype/plugins/nukestudio/publish/collect_clip_resolution.py b/pype/plugins/nukestudio/publish/collect_clip_resolution.py new file mode 100644 index 0000000000..b70f8f2f95 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_clip_resolution.py @@ -0,0 +1,21 @@ +import pyblish.api + + +class CollectClipResolution(pyblish.api.InstancePlugin): + """Collect clip geometry resolution""" + + order = pyblish.api.CollectorOrder + 0.101 + label = "Collect Clip Resoluton" + hosts = ["nukestudio"] + + def process(self, instance): + sequence = instance.context.data['activeSequence'] + resolution_width = int(sequence.format().width()) + resolution_height = int(sequence.format().height()) + pixel_aspect = sequence.format().pixelAspect() + + instance.data.update({ + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "pixelAspect": pixel_aspect + }) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 0e92193b14..81ab9b40dd 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -124,7 +124,7 @@ class CollectClips(api.ContextPlugin): "effects": effects, "sourceIn": int(item.sourceIn()), "sourceOut": int(item.sourceOut()), - "mediaDuration": source.duration(), + "mediaDuration": int(source.duration()), "clipIn": clip_in, "clipOut": clip_out, "clipDuration": ( diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index a46baabe1b..edf08dec6e 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -39,9 +39,9 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): clip = instance.data["item"] asset = instance.data["asset"] sequence = context.data['activeSequence'] - width = int(sequence.format().width()) - height = int(sequence.format().height()) - pixel_aspect = sequence.format().pixelAspect() + resolution_width = instance.data["resolutionWidth"] + resolution_height = instance.data["resolutionHeight"] + pixel_aspect = instance.data["pixelAspect"] clip_in = instance.data["clipIn"] clip_out = instance.data["clipOut"] fps = context.data["fps"] @@ -187,8 +187,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): "asset": asset, "hierarchy": hierarchy, "parents": parents, - "resolutionWidth": width, - "resolutionHeight": height, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, "pixelAspect": pixel_aspect, "fps": fps, "tasks": instance.data["tasks"] diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index 4f21b02e3a..5e8c0ecedc 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -64,15 +64,15 @@ class CollectPlates(api.InstancePlugin): # adding SourceResolution if Tag was present if instance.data.get("sourceResolution") and instance.data.get("main"): item = instance.data["item"] - width = int(item.source().mediaSource().width()) - height = int(item.source().mediaSource().height()) + resolution_width = int(item.source().mediaSource().width()) + resolution_height = int(item.source().mediaSource().height()) pixel_aspect = int(item.source().mediaSource().pixelAspect()) self.log.info("Source Width and Height are: `{0} x {1} : {2}`".format( - width, height, pixel_aspect)) + resolution_width, resolution_height, pixel_aspect)) data.update({ - "width": width, - "height": height, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, "pixelAspect": pixel_aspect }) diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index 6919950b77..63b8b7d397 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -110,7 +110,15 @@ class CollectReviews(api.InstancePlugin): representation.update({ "frameStart": instance.data.get("sourceInH"), "frameEnd": instance.data.get("sourceOutH"), - "tags": ["cut-up", "delete"] + "tags": ["_cut-bigger", "delete"] + }) + elif media_duration < clip_duration_h: + self.log.debug("Media duration higher: {}".format( + (media_duration - clip_duration_h))) + representation.update({ + "frameStart": instance.data.get("sourceInH"), + "frameEnd": instance.data.get("sourceOutH"), + "tags": ["_cut-smaller", "delete"] }) instance.data["representations"].append(representation) @@ -133,7 +141,7 @@ class CollectReviews(api.InstancePlugin): self.log.debug("__ thumb_path: {}".format(thumb_path)) thumb_frame = instance.data["sourceIn"] + ((instance.data["sourceOut"] - instance.data["sourceIn"])/2) - + self.log.debug("__ thumb_frame: {}".format(thumb_frame)) thumbnail = item.thumbnail(thumb_frame).save( thumb_path, format='png' diff --git a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py index 445a26a184..1db6fbd13c 100644 --- a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py +++ b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py @@ -6,8 +6,8 @@ import pype class ExtractReviewCutUpVideo(pype.api.Extractor): """Cut up clips from long video file""" - order = api.ExtractorOrder - # order = api.CollectorOrder + 0.1023 + # order = api.ExtractorOrder + order = api.CollectorOrder + 0.1023 label = "Extract Review CutUp Video" hosts = ["nukestudio"] families = ["review"] @@ -22,7 +22,16 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): # get representation and loop them representations = inst_data["representations"] + # resolution data + resolution_width = inst_data["resolutionWidth"] + resolution_height = inst_data["resolutionHeight"] + pixel_aspect = inst_data["pixelAspect"] + + # frame range data + media_duration = inst_data["mediaDuration"] + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") # filter out mov and img sequences representations_new = representations[:] @@ -32,7 +41,9 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): tags = repre.get("tags", []) - if "cut-up" not in tags: + if not next( + (t for t in tags + if t in ["_cut-bigger", "_cut-smaller"]), None): continue self.log.debug("__ repre: {}".format(repre)) @@ -49,21 +60,77 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): full_input_path = os.path.join( staging_dir, file) + full_output_dir = os.path.join( + staging_dir, "cuts") + + os.path.isdir(full_output_dir) or os.makedirs(full_output_dir) + full_output_path = os.path.join( - staging_dir, new_file_name) + full_output_dir, new_file_name) self.log.debug("__ full_input_path: {}".format(full_input_path)) self.log.debug("__ full_output_path: {}".format(full_output_path)) + # check if audio stream is in input video file + ffprob_cmd = ( + "{ffprobe_path} -i {full_input_path} -show_streams " + "-select_streams a -loglevel error" + ).format(**locals()) + self.log.debug("ffprob_cmd: {}".format(ffprob_cmd)) + audio_check_output = pype.api.subprocess(ffprob_cmd) + self.log.debug("audio_check_output: {}".format(audio_check_output)) + + # translate frame to sec + start_sec = float(frame_start) / fps + duration_sec = float(frame_end - frame_start + 1) / fps + input_args.append("-y") + + if start_sec < 0: + audio_empty = "" + audio_output = "" + audio_layer = "" + v_inp_idx = 0 + black_duration = abs(start_sec) + start_sec = 0 + duration_sec = float(frame_end - ( + frame_start + (black_duration * fps)) + 1) / fps + + if audio_check_output: + # adding input for empty audio + input_args.append("-f lavfi -i anullsrc") + audio_empty = ( + "[0]atrim=duration={black_duration}[ga0];" + ).format(**locals()) + audio_output = ":a=1" + audio_layer = "[ga0]" + v_inp_idx = 1 + + # adding input for video black frame + input_args.append(( + "-f lavfi -i \"color=c=black:" + "s={resolution_width}x{resolution_height}:r={fps}\"" + ).format(**locals())) + + # concutting black frame togather + output_args.append(( + "-filter_complex \"" + "{audio_empty}" + "[{v_inp_idx}]trim=duration={black_duration}[gv0];" + "[gv0]{audio_layer}[1:v]" + "concat=n=2:v=1{audio_output}\"" + ).format(**locals())) + + input_args.append("-ss {:0.2f}".format(start_sec)) + input_args.append("-t {:0.2f}".format(duration_sec)) input_args.append("-i {}".format(full_input_path)) - start_sec = float(frame_start) / fps - input_args.append("-ss {:0.2f}".format(start_sec)) + # check if not missing frames at the end + self.log.debug("media_duration: {}".format(media_duration)) + self.log.debug("frame_end: {}".format(frame_end)) - output_args.append("-c copy") - duration_sec = float(frame_end - frame_start + 1) / fps - output_args.append("-t {:0.2f}".format(duration_sec)) + # make sure it is having no frame to frame comprassion + output_args.append("-intra") # output filename output_args.append(full_output_path) @@ -90,7 +157,7 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): "step": 1, "fps": fps, "name": "cut_up_preview", - "tags": ["cut-up", "review", "delete"] + self.tags_addition, + "tags": ["review", "delete"] + self.tags_addition, "ext": ext, "anatomy_template": "publish" } From 4ff38405d142668c282ca2a4bf6528558dcab65b Mon Sep 17 00:00:00 2001 From: Toke Stuart Jepsen Date: Mon, 11 May 2020 22:10:29 +0100 Subject: [PATCH 135/207] Deadline wait for scene file to sync. --- pype/plugins/fusion/publish/submit_deadline.py | 3 +++ pype/plugins/maya/publish/submit_maya_deadline.py | 3 +++ pype/plugins/nuke/publish/submit_nuke_deadline.py | 3 +++ 3 files changed, 9 insertions(+) diff --git a/pype/plugins/fusion/publish/submit_deadline.py b/pype/plugins/fusion/publish/submit_deadline.py index 6b65f9fe05..e5deb1b070 100644 --- a/pype/plugins/fusion/publish/submit_deadline.py +++ b/pype/plugins/fusion/publish/submit_deadline.py @@ -68,6 +68,9 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin): # Top-level group name "BatchName": filename, + # Asset dependency to wait for at least the scene file to sync. + "AssetDependency0": filepath, + # Job name, as seen in Monitor "Name": filename, diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index 7547f34ba1..b25c05643c 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -226,6 +226,9 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # Top-level group name "BatchName": filename, + # Asset dependency to wait for at least the scene file to sync. + "AssetDependency0": filepath, + # Job name, as seen in Monitor "Name": jobname, diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index 7990c20112..25556fc8bf 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -128,6 +128,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # Top-level group name "BatchName": script_name, + # Asset dependency to wait for at least the scene file to sync. + "AssetDependency0": script_path, + # Job name, as seen in Monitor "Name": jobname, From 307c12de5b1da1a384681abccbcbcf2504572e36 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 12 May 2020 10:55:55 +0200 Subject: [PATCH 136/207] small fix using string key instead of not existing variable `version` --- pype/plugins/nukestudio/publish/collect_reviews.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index b91d390e2e..c7fb5222b0 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -159,7 +159,7 @@ class CollectReviews(api.InstancePlugin): version_data.update({k: instance.data[k] for k in transfer_data}) if 'version' in instance.data: - version_data["version"] = instance.data[version] + version_data["version"] = instance.data["version"] # add to data of representation version_data.update({ From ea4afbefb99cc4ba19f2c1fe4bae4894786d7266 Mon Sep 17 00:00:00 2001 From: Toke Stuart Jepsen Date: Tue, 12 May 2020 15:24:03 +0100 Subject: [PATCH 137/207] Check inventory versions on file open. --- pype/nuke/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py index e775468996..5ab996b78a 100644 --- a/pype/nuke/__init__.py +++ b/pype/nuke/__init__.py @@ -61,7 +61,6 @@ def reload_config(): reload(module) - def install(): ''' Installing all requarements for Nuke host ''' @@ -72,6 +71,9 @@ def install(): avalon.register_plugin_path(avalon.Creator, CREATE_PATH) avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH) + # Register Avalon event for workfiles loading. + avalon.on("workio.open_file", lib.check_inventory_versions) + pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled) workfile_settings = lib.WorkfileSettings() # Disable all families except for the ones we explicitly want to see From a270c55a31ec1fb80e2c4a12574d3a6d851c474b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 12 May 2020 16:59:49 +0200 Subject: [PATCH 138/207] fix(nks): collect timecodes not used --- pype/plugins/nukestudio/{publish => _unused}/collect_timecodes.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pype/plugins/nukestudio/{publish => _unused}/collect_timecodes.py (100%) diff --git a/pype/plugins/nukestudio/publish/collect_timecodes.py b/pype/plugins/nukestudio/_unused/collect_timecodes.py similarity index 100% rename from pype/plugins/nukestudio/publish/collect_timecodes.py rename to pype/plugins/nukestudio/_unused/collect_timecodes.py From 7d689cfe258b8915bae62a1c7b690d9ea0afe590 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 12 May 2020 17:00:35 +0200 Subject: [PATCH 139/207] feat(nks): reediting shorter or longer clips with empty frames --- .../publish/extract_review_cutup_video.py | 107 ++++++++++++++---- 1 file changed, 83 insertions(+), 24 deletions(-) diff --git a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py index 1db6fbd13c..4a546910ad 100644 --- a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py +++ b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py @@ -22,10 +22,9 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): # get representation and loop them representations = inst_data["representations"] - # resolution data + # get resolution default resolution_width = inst_data["resolutionWidth"] resolution_height = inst_data["resolutionHeight"] - pixel_aspect = inst_data["pixelAspect"] # frame range data media_duration = inst_data["mediaDuration"] @@ -86,24 +85,39 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): input_args.append("-y") - if start_sec < 0: - audio_empty = "" - audio_output = "" - audio_layer = "" + # check if not missing frames at start + if (start_sec < 0) or (media_duration < frame_end): + # init empty variables + video_empty_start = video_layer_start = "" + audio_empty_start = audio_layer_start = "" + video_empty_end = video_layer_end = "" + audio_empty_end = audio_layer_end = "" + audio_input = audio_output = "" v_inp_idx = 0 - black_duration = abs(start_sec) - start_sec = 0 - duration_sec = float(frame_end - ( - frame_start + (black_duration * fps)) + 1) / fps + concat_n = 1 + + # try to get video native resolution data + try: + resolution_output = pype.api.subprocess(( + "{ffprobe_path} -i {full_input_path} -v error " + "-select_streams v:0 -show_entries " + "stream=width,height -of csv=s=x:p=0" + ).format(**locals())) + + x, y = resolution_output.split("x") + resolution_width = int(x) + resolution_height = int(y) + except Exception as E: + self.log.warning( + "Video native resolution is untracable: {}".format(E)) if audio_check_output: # adding input for empty audio input_args.append("-f lavfi -i anullsrc") - audio_empty = ( - "[0]atrim=duration={black_duration}[ga0];" - ).format(**locals()) + + # define audio empty concat variables + audio_input = "[1:a]" audio_output = ":a=1" - audio_layer = "[ga0]" v_inp_idx = 1 # adding input for video black frame @@ -112,23 +126,68 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): "s={resolution_width}x{resolution_height}:r={fps}\"" ).format(**locals())) - # concutting black frame togather + if (start_sec < 0): + # recalculate input video timing + empty_start_dur = abs(start_sec) + start_sec = 0 + duration_sec = float(frame_end - ( + frame_start + (empty_start_dur * fps)) + 1) / fps + + # define starting empty video concat variables + video_empty_start = ( + "[{v_inp_idx}]trim=duration={empty_start_dur}[gv0];" + ).format(**locals()) + video_layer_start = "[gv0]" + + if audio_check_output: + # define starting empty audio concat variables + audio_empty_start = ( + "[0]atrim=duration={empty_start_dur}[ga0];" + ).format(**locals()) + audio_layer_start = "[ga0]" + + # alter concat number of clips + concat_n += 1 + + # check if not missing frames at the end + if (media_duration < frame_end): + # recalculate timing + empty_end_dur = float(frame_end - media_duration + 1) / fps + duration_sec = float(media_duration - frame_start) / fps + + # define ending empty video concat variables + video_empty_end = ( + "[{v_inp_idx}]trim=duration={empty_end_dur}[gv1];" + ).format(**locals()) + video_layer_end = "[gv1]" + + if audio_check_output: + # define ending empty audio concat variables + audio_empty_end = ( + "[0]atrim=duration={empty_end_dur}[ga1];" + ).format(**locals()) + audio_layer_end = "[ga0]" + + # alter concat number of clips + concat_n += 1 + + # concatting black frame togather output_args.append(( "-filter_complex \"" - "{audio_empty}" - "[{v_inp_idx}]trim=duration={black_duration}[gv0];" - "[gv0]{audio_layer}[1:v]" - "concat=n=2:v=1{audio_output}\"" + "{audio_empty_start}" + "{video_empty_start}" + "{audio_empty_end}" + "{video_empty_end}" + "{video_layer_start}{audio_layer_start}[1:v]{audio_input}" + "{video_layer_end}{audio_layer_end}" + "concat=n={concat_n}:v=1{audio_output}\"" ).format(**locals())) + # append ffmpeg input video clip input_args.append("-ss {:0.2f}".format(start_sec)) input_args.append("-t {:0.2f}".format(duration_sec)) input_args.append("-i {}".format(full_input_path)) - # check if not missing frames at the end - self.log.debug("media_duration: {}".format(media_duration)) - self.log.debug("frame_end: {}".format(frame_end)) - # make sure it is having no frame to frame comprassion output_args.append("-intra") @@ -170,5 +229,5 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): representations_new.remove(repre) self.log.debug( - "new representations: {}".format(representations_new)) + "Representations: {}".format(representations_new)) instance.data["representations"] = representations_new From 777eea927586e7ec5c778a6da1c0dbb38ccc8faa Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Tue, 12 May 2020 17:21:42 +0200 Subject: [PATCH 140/207] support darwin for launching apps --- pype/ftrack/lib/ftrack_app_handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/ftrack/lib/ftrack_app_handler.py b/pype/ftrack/lib/ftrack_app_handler.py index 21c49e7819..30e7d5866c 100644 --- a/pype/ftrack/lib/ftrack_app_handler.py +++ b/pype/ftrack/lib/ftrack_app_handler.py @@ -259,7 +259,7 @@ class AppAction(BaseAction): executable=execfile, args=[], environment=env ) - elif sys.platform.startswith("linux"): + elif sys.platform.startswith("linux") or sys.platform.startswith("darwin"): execfile = os.path.join(path.strip('"'), self.executable) if not os.path.isfile(execfile): msg = "Launcher doesn't exist - {}".format(execfile) @@ -303,7 +303,7 @@ class AppAction(BaseAction): ) } - popen = avalonlib.launch( + popen = avalon.lib.launch( # noqa: F841 "/usr/bin/env", args=["bash", execfile], environment=env ) From 8f3f3a4d921622e1e53bb6cc6f6b110fc028bb58 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 12 May 2020 17:22:05 +0200 Subject: [PATCH 141/207] feat(nks): PR comments --- .../nukestudio/publish/collect_clips.py | 3 ++- .../nukestudio/publish/collect_reviews.py | 27 +++++++++++-------- .../publish/extract_review_cutup_video.py | 10 ++++--- 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 81ab9b40dd..94ec72a2ed 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -55,7 +55,8 @@ class CollectClips(api.ContextPlugin): is_sequence = False self.log.debug( - "__ assets_shared: {}".format(context.data["assetsShared"])) + "__ assets_shared: {}".format( + context.data["assetsShared"])) match_range = next( (k for k, v in context.data["assetsShared"].items() if (v.get("_clipIn", 0) == clip_in) diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index 63b8b7d397..aa8c60767c 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -36,9 +36,10 @@ class CollectReviews(api.InstancePlugin): return if not track: - self.log.debug( - "Skipping \"{}\" because tag is not having `track` in metadata".format(instance) - ) + self.log.debug(( + "Skipping \"{}\" because tag is not having" + "`track` in metadata" + ).format(instance)) return # add to representations @@ -68,10 +69,11 @@ class CollectReviews(api.InstancePlugin): rev_inst.data["name"])) if rev_inst is None: - raise RuntimeError( - "TrackItem from track name `{}` has to be also selected".format( - track) - ) + raise RuntimeError(( + "TrackItem from track name `{}` has to" + "be also selected" + ).format(track)) + instance.data["families"].append("review") file_path = rev_inst.data.get("sourcePath") @@ -140,15 +142,18 @@ class CollectReviews(api.InstancePlugin): thumb_path = os.path.join(staging_dir, thumb_file) self.log.debug("__ thumb_path: {}".format(thumb_path)) - thumb_frame = instance.data["sourceIn"] + ((instance.data["sourceOut"] - instance.data["sourceIn"])/2) + thumb_frame = instance.data["sourceIn"] + ( + (instance.data["sourceOut"] - instance.data["sourceIn"]) / 2) self.log.debug("__ thumb_frame: {}".format(thumb_frame)) thumbnail = item.thumbnail(thumb_frame).save( thumb_path, format='png' ) - self.log.debug("__ sourceIn: `{}`".format(instance.data["sourceIn"])) - self.log.debug("__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame)) + self.log.debug( + "__ sourceIn: `{}`".format(instance.data["sourceIn"])) + self.log.debug( + "__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame)) self.log.debug("__ thumbnail: {}".format(thumbnail)) @@ -177,7 +182,7 @@ class CollectReviews(api.InstancePlugin): version_data.update({k: instance.data[k] for k in transfer_data}) if 'version' in instance.data: - version_data["version"] = instance.data[version] + version_data["version"] = instance.data["version"] # add to data of representation version_data.update({ diff --git a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py index 4a546910ad..13fc8dbcf7 100644 --- a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py +++ b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py @@ -40,9 +40,13 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): tags = repre.get("tags", []) - if not next( - (t for t in tags - if t in ["_cut-bigger", "_cut-smaller"]), None): + # check if supported tags are in representation for activation + filter_tag = False + for tag in ["_cut-bigger", "_cut-smaller"]: + if tag in tags: + filter_tag = True + break + if not filter_tag: continue self.log.debug("__ repre: {}".format(repre)) From 4342db71ce57f2930d09b6966aa72d07e29e2b81 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 12 May 2020 17:46:20 +0200 Subject: [PATCH 142/207] feat(nks): adding comment making more clear the process --- .../nukestudio/publish/collect_hierarchy_context.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index edf08dec6e..0cb7e7f56a 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -74,11 +74,14 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): # and finding only hierarchical tag if "hierarchy" in t_type.lower(): - match = next( - (k for k, v in assets_shared.items() - if (v["_clipIn"] == clip_in) - and (v["_clipOut"] == clip_out) - ), False) + # check if any clip with the same clip range + # is alerady in asset shared so + match = next(( + k for k, v in assets_shared.items() + if (v["_clipIn"] == clip_in) + and (v["_clipOut"] == clip_out) + ), False) + self.log.warning("Clip matching name: {}".format(match)) self.log.debug( "__ assets_shared[match]: {}".format( From 4c5d0fafd78a0c6e481210413f455464bb0c055a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Tue, 12 May 2020 17:48:55 +0200 Subject: [PATCH 143/207] fixed PEP issues --- pype/ftrack/lib/ftrack_app_handler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/ftrack/lib/ftrack_app_handler.py b/pype/ftrack/lib/ftrack_app_handler.py index 30e7d5866c..f91695edf0 100644 --- a/pype/ftrack/lib/ftrack_app_handler.py +++ b/pype/ftrack/lib/ftrack_app_handler.py @@ -259,7 +259,8 @@ class AppAction(BaseAction): executable=execfile, args=[], environment=env ) - elif sys.platform.startswith("linux") or sys.platform.startswith("darwin"): + elif (sys.platform.startswith("linux") + or sys.platform.startswith("darwin")): execfile = os.path.join(path.strip('"'), self.executable) if not os.path.isfile(execfile): msg = "Launcher doesn't exist - {}".format(execfile) From 0d7ed78843412a0d474c8a558d448f3f0926f6bf Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 12 May 2020 17:49:56 +0200 Subject: [PATCH 144/207] feat(nks): resolving PR comment --- pype/plugins/nukestudio/publish/extract_review_cutup_video.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py index 13fc8dbcf7..30225ae7f2 100644 --- a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py +++ b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py @@ -87,8 +87,6 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): start_sec = float(frame_start) / fps duration_sec = float(frame_end - frame_start + 1) / fps - input_args.append("-y") - # check if not missing frames at start if (start_sec < 0) or (media_duration < frame_end): # init empty variables @@ -196,6 +194,7 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): output_args.append("-intra") # output filename + output_args.append("-y") output_args.append(full_output_path) mov_args = [ From 77d3611b9fd39df2866432305d55b67085bfb989 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 13 May 2020 11:14:06 +0200 Subject: [PATCH 145/207] feat(nks): processing PR comments - better readability --- .../nukestudio/publish/collect_clips.py | 19 ++++++++++++++----- .../publish/collect_hierarchy_context.py | 12 ++++++++---- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 94ec72a2ed..5625a471dc 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -57,19 +57,28 @@ class CollectClips(api.ContextPlugin): self.log.debug( "__ assets_shared: {}".format( context.data["assetsShared"])) - match_range = next( + + # Check for clips with the same range + # this is for testing if any vertically neighbouring + # clips has been already processed + clip_matching_with_range = next( (k for k, v in context.data["assetsShared"].items() if (v.get("_clipIn", 0) == clip_in) and (v.get("_clipOut", 0) == clip_out) ), False) - if asset in str(match_range): - match_range = False + # check if clip name is the same in matched + # vertically neighbouring clip + # if it is then it is correct and resent variable to False + # not to be rised wrong name exception + if asset in str(clip_matching_with_range): + clip_matching_with_range = False - assert (not match_range), ( + # rise wrong name exception if found one + assert (not clip_matching_with_range), ( "matching clip: {asset}" " timeline range ({clip_in}:{clip_out})" - " conflicting with {match_range}" + " conflicting with {clip_matching_with_range}" " >> rename any of clips to be the same as the other <<" ).format( **locals()) diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 0cb7e7f56a..38040f8c51 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -74,21 +74,25 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): # and finding only hierarchical tag if "hierarchy" in t_type.lower(): - # check if any clip with the same clip range - # is alerady in asset shared so + # Check for clips with the same range + # this is for testing if any vertically neighbouring + # clips has been already processed match = next(( k for k, v in assets_shared.items() if (v["_clipIn"] == clip_in) and (v["_clipOut"] == clip_out) ), False) - self.log.warning("Clip matching name: {}".format(match)) self.log.debug( "__ assets_shared[match]: {}".format( assets_shared[match])) - # check if hierarchy key is in match + + # check if hierarchy key is present in matched + # vertically neighbouring clip if not assets_shared[match].get("hierarchy"): match = False + + # rise exception if multiple hierarchy tag found assert not match, ( "Two clips above each other with" " hierarchy tag are not allowed" From aa0ae99daa95c7a5b320914f4eb20e648db7123d Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 13 May 2020 11:33:03 +0200 Subject: [PATCH 146/207] fix(nks): plugin should be in extract order.. previous was for testing --- pype/plugins/nukestudio/publish/extract_review_cutup_video.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py index 30225ae7f2..dd4fc0cfff 100644 --- a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py +++ b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py @@ -6,8 +6,8 @@ import pype class ExtractReviewCutUpVideo(pype.api.Extractor): """Cut up clips from long video file""" - # order = api.ExtractorOrder - order = api.CollectorOrder + 0.1023 + order = api.ExtractorOrder + # order = api.CollectorOrder + 0.1023 label = "Extract Review CutUp Video" hosts = ["nukestudio"] families = ["review"] From cb12a83ff94a477880a7b869f6a5dc872b1483fd Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 13 May 2020 11:43:06 +0200 Subject: [PATCH 147/207] feat(nks): only copy codec if source should be shortened --- pype/plugins/nukestudio/publish/extract_review_cutup_video.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py index dd4fc0cfff..8647f0817d 100644 --- a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py +++ b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py @@ -190,6 +190,10 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): input_args.append("-t {:0.2f}".format(duration_sec)) input_args.append("-i {}".format(full_input_path)) + # add copy audio video codec if only shortening clip + if "_cut-bigger" in tags: + output_args.append("-c:v copy") + # make sure it is having no frame to frame comprassion output_args.append("-intra") From a3a9ffd581d4a67696221c92353d0bd3bc605045 Mon Sep 17 00:00:00 2001 From: Toke Stuart Jepsen Date: Wed, 13 May 2020 12:54:35 +0100 Subject: [PATCH 148/207] Add Harmony icon --- res/app_icons/harmony.png | Bin 0 -> 30038 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 res/app_icons/harmony.png diff --git a/res/app_icons/harmony.png b/res/app_icons/harmony.png new file mode 100644 index 0000000000000000000000000000000000000000..f0f6c82c6edfb6b3a3d43b9d1bf495c5928eabef GIT binary patch literal 30038 zcmbTdcT`hfvp;+iLKi`bs7Mh-dPk6&@D)YrEg+%ySU{AHD431n_KtUb+MP52 zfX>y;;=2EJLjyGjtf!3qVH=rXPj84C05p)n-u4b2PX0o-om||!G=*2|iNZo|j+(+& z7YyYLy)QYrx?K(NbutezvTz9Ta8Pv=MrsLZ1gk*|Je~aQg@Qfrc=@RXYYPA6s|I~P zye%s%^tXw>hom+nRMp4>9PF`L}Sy@J2>4LJdyp)iFoVA>M`txNoh$$D3;Lxg?CS6DttKlQ6ci*Aq$q>+b&-`*a4BEbZlY_P;pjIQcpFx_SG%VZDS7IojXG2KZ|VLz4bW1y657!~bUN z<@awxK_-(8w)d8mmywh8^gPt-Z)-n)GpGNE@xQkAv$*T+Bx~m6hYj#`fX2i5?0*qM zw)?+3I&26@qh{>u1`Ue+9UZJgfTxp}|1}*=Vd#mBqno3eip~`|mCIL@w6zs3DqobB zzo?*~t*xS?ctz)m>O~zD?f>HOztLWnSGlYxC;yL%lDv|<{AE?`D{`tj+B%BLDi*tE~7P@4v zEF@-V@8ITjctiYffc~Ygj+3ujpp)YjU#zFlUkj_b{Vx*8DPDy9M@30aMM3Vb=5iMm zb#xRJw3U=Dsi-I`2>+Yb@&Aw2WFcW>58dN`xXXXtf_(7s>wh~x^y9y$kCPWPXMCZ_ zQ9#vG2LPXY*L1Wkf=8Dc8DBc0({}MIjXd@Bb)pJg9G4XyM`lfx$h2+~A6&>eGcSW;$!P1c3KfkbK zjoR~PzHnu}WCeFHu}R7Jqw(joq>4r{G^ewO?>P?VHq+q+b$I!^Z~%XA68`>19A1U~ zeuW)g|F5Q~(Dt}k>2Vd}9i;+N#@l5_&o&Qry|L$`$tU^JW{U#(#!5V2RaK6zC+N}$;NM)Kd01RPpp+gw8vPpVkD+7x;|`uyNQ>1dv3N}20;jqcd;=*r;q6}K}u zo+(~<79P>;l+8@tIpT>gT6P;X8cKGX3VN1KVelk=4@|1E*^XV_v|^ZBAjq~*e4NKG zevdhG){t_P-c!Y`zBPsq53!<$5g={5z}bMUF&B@}v?^W?oAFi*wx~+KGF+F`H?%&) zFjv?No}=!}I4@l+jwRi-80)PJ$BZR~EO{S;Y=CH9@|w7`H8$iCnpB16Un`m@S}q!u z8C2QQqHO^DC%2-zQ`b*km@1?n1TTzz(}R$netWEk}Mx zPS23amOu%DK+jB35ZTl6{8Ko)Ls!W%FMfb~_PfDx{RY%q7LmR=C!6kInOxb3wy9&i zCA1kX$n?2c-$rGK5eS%m;J?wy{VN(hTuJiet_(LCi{7e{kARG`l?l}NZ>b(Er=V+& z6_lgL0ueL`gvf5%LIRGXCjRUYB1luP+u1Y&fuie!< zpvz|fJ_s-3>*L*2a?CV$#;C??_1)#CP?UcF|CB@S7pXf611I_G-c7lCmiDP^8kKMm zauN8E;YD?co1EiCUt9l?9hh#_WFqX?O-EKue3mt{FQ9xD01Yz>mXA}(S`kRK=7zRd z^R2M~O##Y%t`9%+@g&|&U0q;n)KDr%eZ;~g-x%jG(l!})WeaE?Z;j<$s2(cZ@%mly z>ur$zZ?X5UVaR>1Q-jop9+r%stEcUUStay+1_{M8fk zMN{&J_(nF{kjdq*k1$Mx9v&cN<@qEi!Xq;@j}2RmT_|?KOg)CB!jPi@CFM)9QxENu ze$+1H=!YRtoAxYOmD;Nn2=J==Ld%kF{RH7GARxX;+CLSEPa0bE;l>C7bw({jW721k zSNF#wg6v$&veapqck?88+esbMdMseNm2WFJMBu4wgopU+t~j?m09Uh_xn)R^Zcvmq z?y1f0fJ8y9kL~Hn< z$~$~zzTzX zvw2r^=@y^y*2LN#+eNwq;mw1O-O<64PaCWl%g;2(#{tSsf95ZNyFWYb(O%ioU#u5+yK?+^5v8%?#> zykWjfaGcE))uNH7*AZK4Tz|j?v81bwEXGP_>GfY~(MI?(OH)};zz0pTF?$2MijuyO zH8VbVL_Hwz-izpr9vU?#y1}L-sGKl zur?9@oqu{2_It3UTjnEHQ;*KaU(L{3e}sH_jR%1BH7y~lmtqZC3Y@VU&v%Y${tB>w zW$9Mn+iU{HJ+=cfe&Mhs`*9dj;N94De1pK5hXkuLVi!TfsZ@go*kp zUcr|ej1PKSxun-TLniMk$e7bZ(vQ2W^c(LnI@z>OntgiV(f<4OeJjkGza;_{=LGKj zAcoHvgog|EgipY~PnU&uxq&)|9l3v_t;MHIKN$Qat-2?8nPrlB?hNNa9gg11)t*PF zdQarw%zaS~Kzh?^y%>-DVa)zAQu0RdS@x$IZGtCy%GHIouI|VV`3wr4x+^o``m5%3 z+ssraDIDR$2J2Jc!Km(I9al(ZP5v2n((RW9tc*Y<_~`Y9518UY?C4c|7qO0b*WZq{ucADDyne&@^WyDug^!{#ZMLqr z#wedVIv+8ULkwLueLMgy)8`X><20AKBk}21&qeEjB-OUBiU`2J47i$@xoup&3o<$A zhUySPy9)BeE$M`S7Y3u41~?CCBYn7VqHCo;4j<-yR{h$2CPE1-%}BxE^iy@JPo+;3 z?CpRsD(-un=@twkDwbBHO|F?fCf8^Rqr85P$7e_JslH}3djWf0bj4Dae(0SN@Kj1& zdHnqY?d*XO;w1a5Td_xwWR{7ZD% zrqM@pugl`SUv&HSTS(W$!hbS6iaK>dt3l1yqPLMS`;wWF=UvFjy3ZGJes08dATpc!^^&Bnm78e?ObQXB= zk3v51R&k_`FD#{ByD)w z;)3o~VdyZ}y_Ilj5{p-GqIjVh(P8XAIVkT60dFbNpEsUF4sY(6kGl5+2x-y6^=dP# zDkqc}Ni}r~E+mDh{7TXm;FsUm4igKrf3jZJ>a=aYFbAkPOw(Py>q&uy)0dz6}b zoVw56*L{}h!(eqV9}Lj+h0bHWs9{D|83S0?<9T<(9~(Cou>@N_^%E*LvMbiac@B@F zsKZM$O^#m9CjWQkL^TT$KF!e8UyY(aeDI$$>>PFumU_Wp76wdT`SWp*)#C6m@b@%( zAr+p=c6y;72g2pNfN6&R!S^(DEU+}`^dJ&pCsYVNQHQ}b0GSy%>l)%(hL;Xwi{Wdo zi2&w=!tfzc=l84o*QB?;*1SHUazaIoJ@>Hne?)*ze$Yqg@RFr(&(nOzjtBLG%BN`E z5#{mPrIqbVP5)D$C^YatMElP{`(I>&b`V)sdi56?dizzw-tiFUal37k&{j59lG$Hf z{?o0tVUbJsNMEeK&_pkBC^}?VaZWu)#EXIkN-Uk ze=YKthHxksp(*No^GNekTj`zB#~0QoZRT=(9MnEtFcegA7087jHn9M1WGV3-x$y@> zu;o{&X*#Kre|Y;}s{d!42UMyiY?>U+EB}_lKebTO|HMS5V9rn-v z*SB0x#wlI6x-WRhfr;*Llu<*BL3q$VlYik*zCK1_f`MQikPPctS#dsT_vEJyzec#( z`8nrfCiQ{goq>=)Yk2L&@Es1knoPHz$i;kyLuQQ`PmR)lk-uv}dm{wl=ulMLnpOw2 zlDl#XMcldndQ!-B#Jv{Vowt=?qR;aev9LJ~SNO}$SVKkQCzPU(;SFxH5w<7J`mgO; zo>tA@l(M;-VV2RW3P|&*a62eKBS$#;@XC3nyV05N^R-Z~Jz$68F7g0lHn`8_2MK`B zF(Ihu5KF{wi*^=7vvzc8zBlmtL4y+0007oMTp%tCEo*3yRk)Xc-Ik!LdnCKt0@L&vg{5h3CMB>dLERV zF%4_4H(pPe|KbSt_47)|q_UN@yJbdcz9=oG)CpmESZ7AnUD0!=;*-t_g2f9I1>8b#jLUG1^Ezfbes}Qe@xSTYM~pftrAm^l#qc--Y_ZfjB+Aa0Hd6F z=Qr=ar(kmdh<^e61bD-IzhHDZru4PsRq_+d1`ZW~hcNLiHKP(szIP+wN4ebIll|BI z^XlMN!Ax>omCbO4lfiwP{2!+wl1uGu>|^Go``;3%Tg^UiU$fp~03?9!#=4uKyMtvy z>XimOS#_#xBW`}>V16Cvi$=fi`Fpp@;!gQr`)8&^V|JhhmJe(l=Ojq%%rXWp3sM|7 zVYA2grqP5{{Hw|!uOUHkD|$fdK~FdB-P9(j?qjb^2yAE`E&@J=Lt+jJS-XzhUE6sV zYweCkm>^O!net^ym1fale`o_Xv4D3J*MJ6lB|X1-av(NFo4!D@|*B^_UU)CO;d#%2Jqig0(mmG zM~B45tRJXkrDrkPJ%D4C;Huz;pq1&9w>_-((w>xt$_2D%lhMC+pR*^MV92Q zs|H?hHDyS>#N4c)ja<>M{w?ZtupM&j<`n?iu$Ux(@$o*>5h7Y^`!4p|2>E_|)~ha2=v#rBthE;%xmi8gz8N|rd9i?mNE0Jt}6d0qoq z4UDgnpwOpqy2mP6Gg1XQ$dAFZToKJZpD!SPUhh7Ll0X&1Ig)^|C4%JukQ*}fqf5eQ z|B(m2O6xqJS9sHGKmc04>?ugxz7F!aYIzlZs$C+0fqeIhFp_K&BOo(JQ zNKdpGNj}(DX)Qz(5}#5EZk+F&MKOM*6}2EORpn>EA2}FK9}zY}dS$ydN(BAl4Tb%c zqFqMN%(F~^y^`|ca}$TG&X8eC7fvqZ{+4a3?H{e^F70t(gmsS)c>!%(b0GQMf=tCweKm$YtcuA zz+a0!rbbhkbp*sK!BurqhAUtBTq&i;Ih6X-_E~?fLxdsxRv&v7@$-V8>csT}<00d; zbEUNnQ{&;;7U;n=7J@-VqD5I&As=8$i@yI`vHI2P(|=gs4kkjYIW^XR?kf&3E5Vt# zwY~lR(z0%$&Vm9}>b@cXL-C-h=qCi2fSf_KAkqiv?F#T+{ylS6@3eab zh|fJNb1KJ7H0NcN|6~^I8XkBEN?xz8_LOf5`)XnJ_%3q3NH1+k@c0fV?MJtMVnV|| zm^Q{~HWYxepnvAv95A{vRCQ*1J8reCx@7y)z9+M9yIfz+t6`&v@N@7tlNf2T2hx%2 zLgrkYh!-s?vtepXfkf2W$YxQo@kQ67cGy`vc~|(?%;}H=L+k4o={Nw6Gq>D))g~>u zY6h?0Z-18-*XX9u#~`lACL}E6S^g7u<0OYT!LCB^`@A5N@Y%<2w~$r(PFnke;cE3M zBKX}$?C4>N)z{Fcz2itJF)$o!d6y=;6+js>C=HW$3 z+W*EVVPC6$%(;Q?{94YdQ2|)vP3DI#H0HeIeV2q^*$>Z*S&hU|ZTe6B6t9=i%32mk zpsp4fg1vntUXQg`aBgwW;3cfl0M6T$md}eyMobiW=2SVgXNG$;YtH`8nIV#O zZLC`xaZwuY_UzfTGj`2liWu-bkNM|!;&l2@ZzLkWVKjOJylMlhj~NHOka+9}=7e1i zeS7;T?F>1(iV+NbuvNKRA-Cs84H)w7Vu5BysE^^Ggx_$Gt4)dLxy^m>Dp``f6Kxd! z0{xS$)g(g5p}YBol~`w`LvRX2e(27>B}fF785ZzQYLS9MmN|Ab$6OmIu4@C`F_5-z?%g% zNKN_ViFcz!(NAvfp3al4&zxu7KOsYuF`|jXyI4Yi*=tQ^o=oF+%13N;j$BWet}kER zi+41{Wy)8>Ls7k86IOTgF;+;2m3wdT%^ z_R!c&9mH4i{jd>MeGobj(I)Y^xX@Qs4&G28N=P%8ahJ^%@l2ZWP{`BF_q;krxFDZ= zB6#yjic$DReL)tdUAoQ@}uFLl%K*|r@y8v!di@rJ z#BwLqvF`I4--CwI{P@I5ZPIq`FjkEi5KemnFQ-v5+rSCm=V z39SP&SkTS0h|j<8D&!+9I_TO-lyft()+@oHFU^X8;n+^NgDFuhW#Eiqr0HZ_B;J3` zRA~q!z0W6qRuRsN8#V@eb@kPX;vzxA(Gsg)<;eMTF@~x3oJGOWuYXo>xj3?0qUeVC1m~J9B=o& zE*~W|gF%vL<)pQzhFnh}@j9T7G1F91Ck1=RT$I<15t!WBo}nG*mrzQ=wVX+;U0k{3 zIE?E`_!aR+7YVqoh%7kBNp2c5yAM*zXZQpCPnb+=S7y=O?q|M_x@I%-jv3aAHAZKR zj$ji50y2odf%Eeh|weC;95&`!n|7k*OB`e3s;Kno zun;$!oqqH_?u^eTEc$>rF+Y|*^rY{kN(4;{?Xj2eXDq5KbCP{|vLZAJTIPoy zenO|IzEU`>B+dsD0@VINh$Zb-GJSpfW}a{rxJpReT$#C7hVJ=U6wL_EoGjSb#MtwY z)@)~FRr3QwY*p5&sKON^FL?erqf8+(qkIC`J-k?)mf`tILm&)o3@w84`UOJ1naZ ze9Hl+_k?ighBC{jbL8)P%!%2fMn3}YZ8V6qcM@M2+6EMuhqRAa&M&D*;@5ro{b$Aj z(IXqCeWf*(yORBhC=0f=kCUqx432${BAQhuG4?kh)_w8*4~;Djc%MuJeUz~YZ9GcB zG@?Xsn_L*Yar=R&;j@%*4oAheHt6dcBs<*DZCv@J75mrL?@%G=c$28DPkqdv)i-Y$;$g)m&;-Lf58>PTJ_dB$Qv7vtB_tEr!4yRJ6F zPac+g-$oZMl0hX`f^k-`LZ`Ce9n!P?w#x*EAqg0By)8#-cFZe2Fw2azU*5BamyW=^ zy`p{J<;>SY#*uHkMh+e^lg~*N@tLU`m$ZNT+(IcvH5Gr@SNHtm8XB?iQ&Aa$;(v<6 z&v|Ca)1>(nZbVw;Z&c81_d6}` zm7cr+AEc-4DWRnHRz(2|PfdK-1k#rI;lxm;1J*W=)5^9mJ_WBkl0J9}ZBE_4-1k-c z>TX&LsFkq&q$aJynCF%rtnYAvO1ZAy*cQ zo8H`S4wkA&K5|2E!6W7bhq}cW%UOi)D8q-Gck5+3Q+1@#g*wt;cj&>VCrL!dv<=-Y zhd`6g2N5@#Gry7CZ)-4721}MZUYsVa<~^Yg-%opM@6(0AF>ml0u3=ofq^w6p4RLQy zNbEnlmN^qH_XI^e7x=34T3Fn`&Iz8KFYo=?p_nRL7kIAD}b|7`as8 zwcd-vk2ux84hwIpaPuiAD@=JcYpn7-c;oS%>H7XFr-BJHKPntro8#^i7TwM~+f7`4 z$RU>A6&lCfz@FCPTYC&4X~1Fa4sUyQjLuBiu3tN zDoZe(tQ8*3RGLuv>g~q|-CU8nq&zNuyOvg?f%jW!dgtI-okj5dGI@-3^3lQMqA#d^ z#MG)%En6{7Pg!L@3paI4oj#-dv#6-jsQQtBS5N4`1g63k0^NE-z3iG~7du8|1+qk2i8p+8*r7Hk zc$23BnP~OPdaZ4m8^XeXE$6O8~*G12~My`dGCwb$dCXprORgBKE_|k~wRc8p9rqi#AX8s^{8tr#XOB(+$!v z>*1pp&9b_mn?@WdPrM;y91=lojU*Ah(r~GB$B$cg7T(;@2@n86&nz*r859U7HtF4u z=7j-WvO8ShF`%zM0`v4ocxlzpu|3#f-{c6)>^AAO;S2J`hm42G08N2CkxqP66}zj= zO3YxgGWnk<0$G)POb~w~CQ7*<3GWelyazZO)zZyJHokg#RNVWFOa>(2 zir+AXLEJpqX|f8|;OmQYe|Zb;SqUCzz=5mK3C08tk*zY~^odDOD3K!_{jc5@!@{<+ zKYXPzHSn{N!>Xf zi39*1V2)8MB%yd2G@c=^evdnT6h?U3R&6kkDMLWx>34+o>0nu8)Rq9zLY5P3!8~1T zygB9r>6V4eVkAt02Gkvfe{oY>7Wr8$kYQ6GGqWT z7NFvOEFna7Px4Sq6WCn#BtGgWZ9*`y-165EU@ndm(1cP1OfYn!pRnHRP|=&P+sEdt=uTk z?MD-kwYo;7vO&mL<~{~H|Agw%QJ(P#awOTZTzRO#Mk50><)>h-O|b)18<>d_tmjw= zP?y9`+ds`uA;q2r4m%}!PgEKYwSf9pZvntJJOV{rqj@~FOb#z->PdhEc?f<6Yd>ECSB{DCq7V`0 zKM9^ut^okoEH5shFIP|yoX)s-%SzzQ9r76E9YhA}6TMhU^ZjVd9&X%8h+2axN+Ey- z5j68?)hd}f6bxloUa)BbuR20;poNp@L}#5PK!}cbM>qyF-rKIqlv(F`q`#H4Gzm$? zjgquu$@=t_jopUU{HE9Ar1!-Ms0#!t^BMlTG3L$EXUTO)wnP4l{KV_8IorV&E+{ME zJ_jW?-fm?=GHCC+i~C&%0L61n%+EGPm>#mAcAw*?jctqX{E^at9VU7XF5holSJ&4N z-M_2>>zlbgpfbSd#QA-Ji_-!H5I7QP(N;KvxZ{k9a)7^EUv+Wc(y+sY+4S-cqR!$S zKMWtB2}}epCrTv=AMYDq3JLJ@_?|<{MA^2hdvx6G2o4kbz97vUiPl73oWZet$UsBq zFk2Ijs!5F|afss+z+CwqB+Cx7Zb4$IfKh^xA#gP-B@0s`hAz%`-zUDvc>~-qheJw7 zpUnZ+r)jg>9f1OIDu9~CoMgiMSXpzXk~UI0`AFsyj`xPxN=KRq{~l}LiIhljKvoQX zXq?nXR$;ib%&%b_#(=+fUup8*7k_VKy-5{qqyR;+-Gt*|`_R|Z_>qAYVsYl;!`1U$ z76e-A%<}>`Jgc+)40qWN zyu^ry4j#2s_X6Y&PN`S6H@gI1`X-(TKGLIYrFYcM^m4A{v|{9E#d`%!N?aJtVJ-2f zIEy6C!&RXIw8Hr2`o{Nu;sTbB?n&I~^IJN&(v<%MIlmU&CaDWKPA(15PB!s( zZg2%kXSGFzO6+I|tYznfk1k0sT1-_V(e08=rAw8&m*?{w4=(rbj$Wx;{!wK;mRc3F zv3wT&MW4ZtkYW60bm4H|)a3ST)X4bPzAwMm=)qoa&t=ctO>SSA-rQUNhWn_rDRK%P zI=v}0%=g?*A>4!Pob*E*xjvRD_NOF0``Ot5N_>?#*Ibatm=_!jao7Nhwp^O7)_T<- zf$y5o2w(DMS5y0y$AF9x1!kXh+kAK2`A$y>K6DcYVIf=C)r;Hch`Gh> z-S>iZx+1hS)})$Q-j3SUow>aDd}$F{Wc$vp`&f==QI+ASvg=sBXLGQR5vUjr`Z)kpK+d>j)9Uvw+4G8RxSs{~07&mt<$q=)qIsc6iZTv?^vcfMYnmo?6rjqt&Lp}}u zHNB(x+vK7K|8|ri;dUjRKgy34Frb61s7%I`RDfH3|omvySz9Nr7`+Cso7c=#5$@gk|*cctl?stQ$G#1d2V zQXTuFHvHO`fBd<(L=91b7e<;Kchh4pYvZerF$i<~zC0cl_$V`d^wKLR&FcuRhL-vI z@34n+FBuNi1`xUip7NhE=UuxkJc;6o@Zy075Wjz75*~Ze^aCr1Vmf`5TPIVJY z5v&&&3BX*aAUSQkzCeFlJ5W#-Et;Da=k?O#DQi^lWW^5{0>yg;w}?Um7s6*gd2t|o zNK2wjOQXt-{WS81ncDlY-SO8i$zJ9=jn!g*Y^gOg+I!~G<7?Ubl>Fz|Z;nus>)mH! zcdF9vvJx7wo>1|MYzk{k>5U@~kjKi|-~WhH3s`I}Wg;Byris$_C3{VcHn2WG4hSh-$UMqKBpsGmRgn-FAb&FxnhO#fD7N37XQRHdy=3E`4YtN3ZcqCS=;GB@b zR(%aFSZhJ{o%JKc+i+Y9g1Y`7i})Qu=AYg7+QggQ#k&);pvT9fL*Q`0{UXk}e~cri zk#jFk71Zp1|3G6@p-N_NNLI3_lQuoFjLQK9T;Upao0;1Wi7AyqDOEo^Cdw{bt1C-} zj$LtbtEhG#Yp}FduNDyFkC|P|K04wpP2WU8Peram!HN6zJ+D_Rx@ zCan-CQAaSrCH%kwnRb_9Vr~N%45o#KO{dPC7;UiN1c1W!?6MxYo~|JB$b6d~`U6 zlgN;mNZMQG-}~3S<2rWlq9yl@Pf32Z)wLb-)NkI+UdB@yrUN}P@D53*T5lqn;SN?u zRrdqb;@|pQu14T^Y|4i?_9_bG=3KHTGmDW&2OST5x$jIFTe;Uu+_bXI9MEl%W^p$? zK9)LgrKOZt2Q^(>j5N&J&0W{F=#XtKCkHd@_yJvDFYjT0F?N! z^UqHr6UbH?{e}@F1Sqzqn?^%Pa04S^Yod!!qu7o{?+@fi>@Nz^|C}pB{kh;+8rxFb z$B}=E&XW6d=$XAxwpEqn%}#kt(kV*+UfGo=jC;OP6}yFXQFiPQ9LiYx%RY2!WHl_J zoGW@V)`*=3p=d+1yO7;em3tE^YjMYzlH!leTv>P0V0{DM|JyIiep#^c1tumb&Zn%y$4_y_XY zQBM;%UQM}oRm@|^GU?z~3VMT=#9X(7rv8#!_u6#5RcmmtYF2q`y7jg;vJf!f-D?zJ zro{GAqYI7(#||pa98hOz^Dx(1JX8}4K>6U{*7Ue7bL(VUK**qAz`VY9oE|lPo!$8( zv?6ouEyuFw{}9PUw-@21I)~qElCraiy@+7ut^O6Cv2ODEtLZ62SJPUx4%aB5uM{O{ z!>i-XD_gUG0E#lKEI|bf=^paX(one>xEAfEjCnAI(mHj_g53n z=j(=k?(|aXZ;=~AdxqZ)JDVofndis`fv*B0%X!ftdrsrgBQ%N8Y-IAbezTxL1g6W8 zYI&yAj5k~_2NzaWsz?)Ciz^xxSm#{Jt{|xtjsB7|eMG$494)~(O8-p%&0U)3O%FH| zAyBv*Z{ACuOh5|IT<76(&t?MD9n^WO38 zs|snf5YNS!Ym;$DA3jn}dd~H0+*>dtQtR+k)|#?1^@+y3KjPMcp($9NpyYfIbfR7w zxzUbc#D|jAlJA^K>>L~5h^wo z%hPljkh;2qy?d#Xk=pfjvvx;KdNXctJMZHi3>+_`Oe(#vy&%e4uyt8*9bIr;S^L)P zdQJoQtcqNzng21Kn0Vks!<$|HLrhrybaNV+V&D~_A5>JT;$Lms?EjS5Y_HD$Y)FK#uEe#Rm&6eSCDd>&WeK0&zL%{)6hz=ipN60) zd(JO@;_DgNFKqnBDcSot<^$D){q%j@pNVvf9m>$bU8O|QRUs98l4Jk}|)Gc>mI z)6m9%ciTz0i*s9%{3Dv{Kbay7%fY46eHTD{zJluB#6}sfOTx2ahwl?rcMCd+I~QlbI}4#exTiF)mK}=c zp~iMBVk*gD~m6g zd<%|*#@_CrFI#$=Jw_v;?mL_T46eq1$icqvlI^0r+eqxd&E4s(U6eI#$IKpJhXS7* zgYm_P2`w^0%TgCp{oaqEmvf@ubR|++-_t+G45;GAiZ_kZoN&IW#{V#a*OD|>Gd2G- z*}Q#joVpr9`bhR#}Z^1KV_BZ^brx#zJpb00dZ%UpaP(HK1XUI<@KxeO#RrsIAG3Qfi^j% z=<^9K4%FM%lI-eEQKD4AxF*X;q7wO~iH%!$^(msvv!_VT6hB!x0V%P3roDDH3 z4!6>v^5Apsh-OQ4ovc<|FXdlJV5*=-i0yftd3H5_D@bcbghuHU+f13n-@3FW`$v@3 ze$B=6ui2B|He4>su&idL{~$`{JqCq#T=uQ95l9E4P4f2{+`>0mLnQ>5Zm>qO;ib6k zLl{6 z+xeXoHr$HYwiEskJAm8KaGMX3*~ug0*Zf~rDQ>FxfxaKGqmH!mdyM>GCapRW{W88g z%<(QMR^rZHX#Oe6v>y!s!jGB*erK)KsDc^!Vd^|G{GZOe;{^(vNxRT+Xa*mT-$*`vpi@#4=nP7XuZf;a zC#SUCd&gM~_8Z$$HQP6a3cNi?duBwl3!8br*wrKs{G{Ii$B*X%v&m07C}6>W2m5FW zwX>WU!%QluVRRF;Qu_i=7a<}Lrojf;a7 z(j)k=Hlom)=qERTTK0n-tiQ87Qg!S8CM}-Y>C^NLaWP4@wyE&_CqaKyq~04n7=&M7 zALe$Lx)g6Dq(ymO=BohnH<3Kb)OPYtE}8nHROYz>Qf*vU4b0(`OAC^gP2CjTql4zt z3!UV5)KO|YW z>B+CMzluUqoiE%>H}u7hXF57Tm7;v{-*AC@!6jwNPGJ8y{+kr2e{Tf8kcUYu0ol%z zQ*x2>bYj16NiOZ}GC^a@M3{Y@9j62w6+Fot8fowZ3V4htPxQd2Ih@s)X*G}4@i!mO zhUd&-s(q4bE<0I^ldGmn}1jqe*E`oV<37^n|Q&904EzZCN?7383 z-}7Q1ejRCNZ~&a&#C^+K6g&iyIuV5Gtu==t1;t_W47=~A+}!W0gCbqrC}D2;b|lR; zlSQ9qMoSR@?vl_vdSG9y6X4o}g~9qF8!Xz7H%I9|J)#0kg?JR?Wq0F0JDzGTPr4w# zAd0Osi)<<;r+g!xrkFETfIg?dtj*~JVav)cA`7uct@k5+1s4Dr^WnmM2I)25JInYV zfLL5r4rdWw@wG&bp@hb7-9hkAFWKl9Bw0L}ANRw&bD~On>d6U$F88`(j%Wd*j1bOd&{or+q2p92!Ts z$lsuSeEbtLQZeiq&nvYVA5OdP>wH%XHs|itc*HT&zU?DRB6ILOFTgB)26keg7gHXr z%5HouPxTmwGrS0JUdel_=ae!?FCp_`cCpxP7F@2Z)|kJwsomr$g4I!-$tSpnvJbUrwVsnv85k=fkDpMMW z@3_Wh1Feflnx;gl>GG#nF2w% zkY1i}p@JpHQFrcFpI^_i>@$#VgevQH6}D=@2-xg>Jpg6d}6>fDT_#)T%EWboQP>0J+)K)s~G zCNOXd;1jHUnw}z+f5VBR8ge}on70%sST#=|_?iwL7>do)QW#Jq2Uh2~jwLAB(D!rK z`{FH5E7&+xVL|EN^nHD&95ge5+3gD+?pJ3J1TN&;2a}|Kj+uCFB)lYwnW5+5%8>ec=HRdqj7gWJZd48?psBM=U5atgBzx#%v=kg?^jfte;%L^_`zF@qfCks;=o)pU)_qP-;yBR`NNdN4>Lg> z(7jiXO#5djj?aJ&!uhPGxEPJ0H$+&7|MV{0XnkrK*N!q{)g(CisM8(d**~^2PYj#_ z%@_!_#$auejfxB%oUqp$ex$Epnqf#~x)uHjN^%|Mj-by0EEjOT^fT@Io!R~{@PLe6 zfb~gEGw{JG1IJDNRgVAfS47C4xSOTk&;9n4E{5L|;8&6QGTfi+Lr47AOBWJ=bn=U@ z-LnT)zij4y`9v+Ib3)QXpDZ}Qf8XhTLUK#CeElyC#oj-sz5YVoeT#~KX;!so&S_5w zBYw-J)zJo(inNe66D8{P&@r{Pd-7O;#q@&T+Q?YGo1}M}vO>U(pqzqV8m=x@gNa}phdl~UIn}slcGPVPZry4IbXAWXgURp@3=4ai_62kQhH}5(R z@u4CS+<=`H4Gm2mpyDeny99Kk6{SdYjLZ`#ucX1OCTjdbUcPiqU0{qN` z@GqooX63P^r;a?ulWZ8#b!6DD2e3KE3& zvO5lQnmTy%Jpn4>%PKd@S;?pugYTV%;;05 zVl!LQra8^CX}c6TYfl;f+ohUZn0^s9O*Jp?cvsdI2=5ED+wD+8N$#9*`jb)GF@)>g z-d!GlhkS1|c58Iy$Ii2=Wi}qDAjIBN@&kX)vizvg#_k=>Z`A3yWYNsvWb-k-i^?nq zifEuebiBQfZ~8$%vnzc!8aUieCy((EpfUu`)LT|)+J>L_HZp(N*;B>6-s-*|jg#_3 zVtm^889k`L_Ws(~anJe>hOV0uDin>Ru#ZntETY0GWN3!Lvm`$`mnN=HA?u|^ih?^x z72kIQy`K4k%cCVjf$1#zpxI8)CRYNkuUqh~($jRluH@|sanI>lzu!pni7G~hAHx%0 zk~7Oc`oZ3;I%)dS6U+p*{$Cqk85Y&|Mtc~Bltxe_RYDpBq!~~`S}E!74(XUdL1~mu zL6Gk55&`L!X6SCDC+_(B-{*e1^I^{O%$eD<&#rgvcdxZ}8ZifX>AeDo2+Z7BcbQ2aaN1omEcUJ2S7_@7hi7Wsl=X!MmRNMHdpT43Y=Q2xNiJ+#j||dSu5!VWx*P zX^#6!N<&|To#=69Bs2*t7Vf}-A&e03QI~&o@F?pMUA)VIh zz;cNQIq@BK&75S(i%4^^cItsu{?;>oa_l3z!SHXz1_Y*|9Y%SNT5a-J0ut3@ubbAS z$jPaExjEDBNI8h?)o8Kwc5rvT{*%*jgE3=`dP^zY%4@*~lttIYS#T2G|F*T7u_J(M zjtNi_<9YD@Bx~Zu+c96i#{oR%(i~UL_bri4Xr9%X-XMPH78NXw^rvsI>1`0tQwHzn zf0aYlZk>^{L>Ps;BKBGrL@ZDp51AqA=lx6~2qP(Drp*VK7m`hRyG%&t$WK>PLbIN7 zp0c=IwdqP@PEIk9koVJQ9YB_dbZYT3{{}{U?Hc=p7X`eY&IdTey1nC8X9f`+3CLVR znI~@@sH2kRap;jXwv7&EO@SjU>iIynLLYHH@C*}H{f;=&d#0^Z4ZlfQc04eC~8rU%MFP%^y2N@H(dOcfp07JUnkay_GRYaF1rT1C(%*s;QaalkuG)A!5gaO7+<@~!>BcRME`XN zp)~Cp1QX5rGwM~Nypal23282Rs4SGCecA zASOUj99SXSGJZ8EEEE#XOeCwGF%=wb)^d}dN5*%rX9YM}UoRz#jI2HVITNA6fkpLH z#19~Dxpsb%V%YlbSlEkff`X60bW&EoKd(Hk0J4!H^gv@{Pd$=vWk>FAF0}w9RsINgs5rrq+>B| z7Vob>Gu|F4&ZWWs=qd(0W1qHz9P90m9yp8{k+ti*x`GH}njMq3e@Q)LxQl^Mg8xu* zSBAK+fzObW(dVkPzR{`b8el5p9ac3V;Agd%GTE!kcdqr{4mo=A$4pf^LSRm{`E%Nh zfT$r4ut+*K&WC}N|CGZFB+A1VuSc)9Z-?RDP@bip(?q>02F=9zpxG~+LXam zk`8b2<>fhhkQ`9EA%9dlQ$z>^vBF{)gd3B-Q=0t~VPg=JL}{GM1P3sE5aTa|&MI2h z!FtpG(KxsU&_6Q4L~h$QAmw~o2i-Kx+u*%6D)_);8p>m(xs&MDt?>+Y$&OivYl}f+ zW^-OAemWJ}03!c58K}$#nU-7N4ZTS2s;Gf606SmOuYo=$$N?I|_o0HlV82oG%e43E z8U>HdQ2~pX(aG>zW4k)3!A&(lz(LDH)U)E$+fH%Vk)-{MR$VAVwGPOp#VEU_fWwE$ zX7ZS3$PBdjuc*-cr^#3hJKJfSqyO>`-}sS>_3?`3VmqO3AFW_zD)&z~sbK976&dVU z$J17~RnMFL2%=rM$A+x_I&m+2OUKMIOhyNCkiJtp##lqfeNnS_d+Tg>L#Ztab@v5!ski(Mq905EI+eY*V$FnbI?lM7V1uUD& z7a6_?NZ1RuC@JFhc4WoIv;EmNH~X2=rM50**uK3(dntmomlrHup|sREPyFqm?S_q% z1ae_jpzk$L`26-aYb}t%k;VaIi~y12pFv-x>Yu334$#95=DhcL{}kEY`{sk$d2O*w zlwb|SeCQBL{_{wrvc@48E^qo6(DzJLof#5NHoDrn#eV(f^1b|V^8k2X$!@!O9Kqf3 zkluHGFd1CI8yxtI78m_eloj~vofv&ly_x}iL-1u6K$_%sAmH3VO?)gB4_eC&2s++f ze9mgi+VZBWToW0P&8fxanR@^&>D zYO(6>3z|e9w;}{+1=up$p9O9ZtQKmX{_Hg2_&gfO+sYr8f1}QUrS=ff=h9p(;dkgQ zyjwBga8FJ;uW(PUGXN>5Stsi+_&!i@-i4I7LgQIsl$96wc`Wse?_>bOVXb8->SEEH zyaR(bG_g{YC0wqXMUPu}gIjYx`YEopbEIasO>(Lp&LNsSF6qxkDtJfe0^LQfU!Rnh z$ygWmA;&jfe5&R3p)sQ}Sl7%0dF5Lqf0C zkuxc?H;O}=GQe?KN5t-YP?IG?KwuWX(1DQjEsiv}(~=orc2&M?Z+c$ufAxv9t+a7F zA~GRsLim-lbWzVMC2soixtuOjzs_?)`#zTKyL8CaEqjOItdp@tWPM;T@QfNG0>*s? zn|lDV2X%TY-}Dz>du_E0vL3L394Qge(4{k<;4?yu9vWxW*9oU|i$NHc;zQ-;?@n^k3{U30p9lF;W8pm&L->IlC4%Jy zo`1;WWPs6v8ZD6Gzy?>qd3Qwa|4%E{sd|xk= z%{xzB!Uzi}If@+z(C3ZbrK~p*Ef1QXpUougDUlVcICXfJ9H!8Vs377YorX(Qg)C0Jq3r|vQqQOA&J#ofGXM}OrTgVHh1HZ6v_ZT~Jhr@0fxn6>t-N zQ6kJMd6B1p7tA42)19&K*v9is;Ww*;SQ0r8d$#m6k2kEsaF)B&sMHNyEX>ZPypc4` z3)CZ(PAENfK(1A>r4tqi%K>i;zJtboUAy#ookkpxuWQ|39G0QF-7mYA zc|49q-L}Ez2V`dv)UTHRert60O)$4VVezQSAKKu#aV?Mxrg7k)VHipLk}1gL$YRMM|`&4 zZQ^7VAJWXLy;-#f^jM$Kq7B`arYoB2@~(<5KMEy#P(G5H$-x7fJ$~T#fs24y85;c~ zNp^J?TBE?5UMri{fN+-XurOFo{Rd_oDuJYD`5=-MWV*e5E^Gp1`Zp;{aV7ZzJzkVA z2s{DOl6(-L6{9$4GS+1ZY7=h;L61_3;r!oOmEu%nV@c>TVNc8lJ^@koZY(F4)7v?)_j= zm4CUY*Tyd!OKn#A7+Sehp284ikfdxU*6|$^CL5?e!=e?sw7*#8Uy*o(9+tQ5r;y%4 z+W?wC4O55R1*=dBaqP5_dCaU)+~hGSyO2b-mKvzdmu@_(M&yBbgfN1Rch%qjsCn;u zZ^(CoU`XEK{_QR)BkQ$-gyMqG?Kyl#tWIhWM*l$5t%r2X6LX-Tiv2s4eicPKk;+5+ z-f8Zpz{TYr1-RcFZ{j;nzf6Me%1^tG(5b(|Uy9G?${|n}Epel2w5q%*z2ji$wg> z_G5qxE27G&IP+KfQf8!V3Xr`vmri(J*9^I0-ID(!zFjGt1Ua2tGm zB;#!d%#mi7o`qw<7tfzVxbyb7#e@9ENt%${nLYYq>2#<&5GM|DMrH=ULvaE%#7o0#tsg zPzvrdd5ho1@kQm_N6U>?+n#VGVq0@0NVvDGp;^Ec9dB0s8f6n;aL7nz9$s{Xk9I$~eu5ep1 zH&*S>Mq^#?SbcPpa1x=xp96jl&(9HJ+JilN-=%8GKpUSvm&uy%WYtMAeA`~~I{W^) zZO7#@dik;iF*|#Z_}aoXiAY`tqD5h5q1l6g*2>RkZOO-uT&OV@`}-RN8EP?`PXbBi z$c+QXuiR-tTq`av{HCcAL+srCkq^AqH>-ahj#j%J@$5=yjR!0G znt30aiU}eGv{}#A>yE#MS)lyRL^)zm8K=IP@ZFWI1B;z4iZ&E4E4^a7gU zI?!1LH~A&39JwS_2>w+PtXQM;viHN+U{~!#{HK!TQ57xI@SD#r3ds9cseNq`TM&$? z)Rz|53Ck1j*X@j!gR8B}GRAK9u80^2%M?8{LV@kwbUjw0Z^uIIlA|+XscU9{;xP>! zf+h+5kxTDS2gz;)L-H{s$K^}c%ILGM*6a?WD!Ov^OHw{vT~<>CW&7u5^<@L)VQCq0 z)_FtO2Hv&1DkT)3!s)M^hv~h0!#NI%_xf$>Qzw(N_&MCMs}REp~oJjf1u|JmIV& z?!(Bl(gRVG`3Jm%1-Vv#{Gtl)pW7o@j%jAv=gdEJL=`M(&sr*8#sao9Zz)F~QEio3 zQ7*<#)Gvm8NN7r6u~&jiYh5OnqN$%Uyuh1-Dk>wCe78K%UBi!U2wvL-N`qkm6+J=o zUS%877ZL}1W^$XlCczc?S?_51Xzgtuuf!a>Vat4 zQ7aJw=uH)W)6OF2*~f6+isSJjbKN2`EGCG0U00t+qUw6ps${p@90~s>d6WsKb;ZRkHlffN&>9}78t(PmeNj0?J19ts z*JX04!QTZLKzTl?LX{lzBwabfoxxK}tatE&lsg5W7R5vI;Z}_@!BQJGBu?*?fS_=i zOA+~Pma$iL-k44~r zi8Hr2i50WAa&}@OY7C`ckXw8$V5X5b5o$lV^t6e&eOGZOS;@-%_}9oZWVvc>uELl` zsX(PsX5#fY!Z5M&haJX&V2z1_u>T}((E}N)aIfW0bXxVd2MC6@HO$g8+E}e8#tFqH z6Fli0+2AUwIO+S|PD5qLUh#L5w~P1UlfW&u#e?=fT9T}wwbTwRLIG9`GD%kbLrof* zD3kfavHbuf3_bSDwN_~pNJ0u+VS4@E_|3BTaPH!-i4gZMxE8DXWu-n z5WgL(OOJvnPGQt4!Ag-5jR(5f7$6phqAp(( zIrO-)PWH zcuAd!uS3_>;vwsP^2Z_Pa4R$sEhJ(KrzK&AX)!>>1g@T-l^71lyYBSYrnq1nBdA=QI9JhLJi-y(^ROFixTs8 zN3UcS0NZrFnS7LzCbZMNZA?Rqv&hQ4Go|F}8yT$2hqId5pTY2RnyR-ek#+but3KG}e% zJG*7eLW&<=_d8W}?3xG>}NIPu!;PPSF~$aOy!<1Q^A&Z z#!aeXA@jx4_R6I_{s6kcvmoS_?t|QCCd{U+L{c9UJodejkW#L>d8mVjd%`mW3ILz{ zLAr?MKQ&&?wO3=rWY`&!9~+k(1P<`9XzFZGC1vjG@cQynC6(u>_`-h9GYfe;eJ_f3 zDD?1wQ1ZTGKA_S@rc@X7zZY}7KCEN|Vv<~20twxT1xYNsbzF z{er$o4I=2aydzGTF~4IupvL-19{x%{tagfr^&UND49h<;4KH2iOlVV(?W~L+PELZK zMGu`2XYpIrIm!L?BnNSU0{DZXLZuh#@sb(9itZaE-#ugyUOZukTQ=bx&ya|@ZkwMC zfk-_fSIp6=G9@iaQT!O`N2f;b+<&``+?mr79II7#@m_Q0`EqN}X;On716YSQAn-fC zA2qdvkT@L%GF|2EhuIB0eJn?7{MX9uFix&#FYVRQl1CjAzO~S|*-Iqp&u6D+O>ZO+ ziZ^W)gU4I&s|cT4u9q(&=NZX2;eYpscT(n=u{-;T)Ci;gx=C~GH$8UvJVVBq^_mIWU3ZWh{#wvy01=Ac zxl4t0ZBo%iRblqJ)$x)}N3%lQO!38+*Ah@ppkBk}3vMz`JPznJdjmj$12b&qxXCi+l823QcimHRXR8&F%80HO2w2~~k7pBI zF!~*Oi4X;uM#mC1GKQ(%udR70=5M88*0uXX__PJp^1@Z#QXBlI(ktzPmZX6Po2 ze6y-}aLM~1zm8$sAxiRw>=1Q$73XRnMSowMV=f6T#rqcaasIJw{d(#p>&XLf=PnyC z0Uo54FIpjNB6)rNWpp_?;@Jvw7uSXL1G>1--sMK8+R3*J-V>^~*r%Y*aO27PjHGmM zn<|a^eruG9)G)_$1&1EQm|p5;Lr^@m(H;M!X=Ety8I^jHdhMhJz+F7^Nr`HVM7s-x z1IKhoF75%e^9{rP?k^EHjxLoc4C|TcQZ~$z_d;+!+|ob?Cu_H7;CZ)~(e*VP3XBTB z&TZD9UsZ(0Qbbr!ztU0uS7mnKRswI+YX2qVv@-Q?iMzpTIUp^k_C`+l@7mfMe~PD5 zB*3}#J1=pNfdWZP|HcS+MJwi*QzcV?bTY0i{V^=*(iF09vR@pvzkk!gsYW z5tAPJ;gI#=yhN~wkaVd_m~OL&N$;UAY7*m~9Mg^8P9d)+L+Gl`3!U-};;+sw{P;_ox}e?;z%S@vHn5 z%531A)W1O=_*F!`bSc8L>(3oX4l7-ty(sHO2S5ri*se?#e=S??^9(AS1hL6rf3TSG zgO2OR(sM`Pk!22A5X4B+b7iSvYT zB;j~B*jSBqo6F}4&u-k`B?spiMdpU5rg~x~SR+?-0)>KMvcPP3&L7A7om;@`YBP$@ zf){J?5!YyybqB`$LcN+_qpLv+t&VJ|bj;~MtAeb|pi6zM7}i4s&OehDpp%`ExG+ii z@^v<|F`AGZw5{pFCAtj|N06;p8q93Qt$G{3`k367Reao>q~y{vhJWu93o^A8I@pG! zL6IcJra(xJa(JB)s3NurF-L>N^WJa{Os?_vCr zR+xWYyjyOPZlxYkU76@9i!EyjBswMnTEp>~IKdJ1#h*N|MaGlw95Y_!o5OOkY`%)} zm_9ZwNSM!+cE4xEk>$;vsADACLsimGYbJhq$Wh63Z+rL$(VPa8-$4~V z=z~L#4ss|@7$c_X0g&l(JbH9)0gadXW@G%r=sn2o&aF5@w>Una@EDA`vdw0e9Ss>p zf=0;RR>~fouVX&}M@*$lr|d_Ph_J7zlipN+YrnWe-ai!>_lScR1`Pjj8H~BvHiuWR zcuc#1|JRD7r(K*uBSHg)GvWbs5Gg9Q)y-JRyTo^=YM@RxIh=!2X~nQ0>5#@c9CwGi z&;*T_Os^(m@h6;moYL2t$?hx%SU0NJ!*K;QmV*|#x6`|;Ely?*zVsLXLqbzI?gW=gDn+s9?$6n1{y4sr117qAauPz>-2_BFwOmaln>m)6j^Ki3Bt5sj&zQKeyLbuCFt|_@OUUTNLx|5P>2Nm-a!Ez<{ zGXNt%Ab3L7d4l<+Yr;m>Iu;XEu|}s^H=#D+;BWTgK3_nw_b=17SFX#g-B3!n84eBm zo~*=pWwB(UxzLk>e6Sqt6U@4U85SZ{8@$-c?gI17qHNy(q0 zk2f;;ZCd5k#r_p5)e*nW&ZQpb?CFzhCPP4}MdsG;{0|G>%amLy{jj|1ySp2VlG zjvO)jg5?+C!8*VRKNe*>3p%bqA(GWC;Q;6yPsV%T81RXq+r`Ou8%73~sKdYg z^^Z6A_wCI7UuoG`_a7@njJw2z}kFX&xEf;mzMh^45x@9;#C=&ZVl#~ZQ6)#e zszn}Ej0dwF9W$HpJ^nGef&NgvH1J4!D}W7;WRfp%5<(@SS?RH1*T*F_7>Tf0Yq`*Bg2*I^;5n z$Ra<3E9Vme{7?XZp|M@T00RCi?>Pgh$PdTRYD}Eqpk<9`PKkfrrKJRJWo^iPLh-US z8dEG8(Akeia(CVYm79BM6`4TH!5JYP#>~N2bWkY^wx{5hE{0&fhUsZSS?tE`5Vj;L zH`91D=rJ;e3^MY+F^aj5`Pcv6|4rrpUikmJF#YcW``_jN-u(Z52-CzzQ<(x(x%TW< T!6jG!exN9)CR_f}^uzxFMZ*H^ literal 0 HcmV?d00001 From 15815360e4da47e31e731aa6b1081109ba78f2d7 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 13 May 2020 15:00:04 +0200 Subject: [PATCH 149/207] fix(global): integrate didn't understand new Anatomy --- pype/plugins/global/publish/integrate_new.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index f7ea4eebf9..6e237e46e9 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -301,6 +301,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): sequence_repre = isinstance(files, list) repre_context = None if sequence_repre: + self.log.debug( + "files: {}".format(files)) src_collections, remainder = clique.assemble(files) self.log.debug( "src_tail_collections: {}".format(str(src_collections))) @@ -342,7 +344,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if repre.get("frameStart"): frame_start_padding = ( - anatomy.templates["render"]["padding"] + anatomy.templates["render"].get( + "padding", + anatomy.templates["frame_padding"] + ) ) index_frame_start = int(repre.get("frameStart")) From f1dd4fcf4c44f34c1fe492b4bef06bd2558f4e33 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 13 May 2020 15:05:36 +0200 Subject: [PATCH 150/207] fix(global): integrate swap order of padding request from anatomy --- pype/plugins/global/publish/integrate_new.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 6e237e46e9..b054d8c309 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -343,12 +343,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): index_frame_start = None if repre.get("frameStart"): - frame_start_padding = ( - anatomy.templates["render"].get( - "padding", - anatomy.templates["frame_padding"] - ) + # TODO: bck compatibility `templates["render"]["padding"]` + frame_start_padding = anatomy.templates.get( + "frame_padding", + anatomy.templates["render"].get("padding") ) + index_frame_start = int(repre.get("frameStart")) # exception for slate workflow From 59c5f363905e89a707352b05f4c72575f6061f72 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 13 May 2020 15:06:14 +0200 Subject: [PATCH 151/207] fix(nks): frame start was not correctly calculated --- pype/plugins/nukestudio/publish/collect_clips.py | 2 +- pype/plugins/nukestudio/publish/collect_frame_ranges.py | 1 - pype/plugins/nukestudio/publish/collect_plates.py | 4 +--- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 5625a471dc..d39e25bfc6 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -51,7 +51,7 @@ class CollectClips(api.ContextPlugin): clip_out = int(item.timelineOut()) file_head = source.filenameHead() file_info = next((f for f in source.fileinfos()), None) - source_first_frame = file_info.startFrame() + source_first_frame = int(file_info.startFrame()) is_sequence = False self.log.debug( diff --git a/pype/plugins/nukestudio/publish/collect_frame_ranges.py b/pype/plugins/nukestudio/publish/collect_frame_ranges.py index 6993fa5e67..1cb5e5dd1e 100644 --- a/pype/plugins/nukestudio/publish/collect_frame_ranges.py +++ b/pype/plugins/nukestudio/publish/collect_frame_ranges.py @@ -36,7 +36,6 @@ class CollectClipFrameRanges(pyblish.api.InstancePlugin): frame_end = frame_start + (timeline_out - timeline_in) data.update({ - "sourceFirst": source_in_h, "sourceInH": source_in_h, "sourceOutH": source_out_h, "frameStart": frame_start, diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index 5e8c0ecedc..770cef7e3f 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -102,9 +102,6 @@ class CollectPlatesData(api.InstancePlugin): instance.data["representations"] = list() version_data = dict() - context = instance.context - anatomy = context.data.get("anatomy", None) - padding = int(anatomy.templates['render']['padding']) name = instance.data["subset"] source_path = instance.data["sourcePath"] @@ -149,6 +146,7 @@ class CollectPlatesData(api.InstancePlugin): source_first_frame = instance.data.get("sourceFirst") source_file_head = instance.data.get("sourceFileHead") + self.log.debug("source_first_frame: `{}`".format(source_first_frame)) if instance.data.get("isSequence", False): self.log.info("Is sequence of files") From cb42c78a8c9f3fba54268cf73cfedd8c39dd39c7 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 13 May 2020 15:06:43 +0200 Subject: [PATCH 152/207] fix(nks): cutting videos added to own folder `cuts` --- pype/plugins/nukestudio/publish/extract_review_cutup_video.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py index 8647f0817d..b5c0e8387d 100644 --- a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py +++ b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py @@ -215,7 +215,7 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): repre_new = { "files": new_file_name, - "stagingDir": staging_dir, + "stagingDir": full_output_dir, "frameStart": frame_start, "frameEnd": frame_end, "frameStartFtrack": frame_start, From 23e6e05d79722c61ade0e1c27ecf768a0ecb0288 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 13 May 2020 18:41:32 +0200 Subject: [PATCH 153/207] removed unused collect project root plugin from nukestudio --- .../nukestudio/publish/collect_project_root.py | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100644 pype/plugins/nukestudio/publish/collect_project_root.py diff --git a/pype/plugins/nukestudio/publish/collect_project_root.py b/pype/plugins/nukestudio/publish/collect_project_root.py deleted file mode 100644 index 1b21a6b641..0000000000 --- a/pype/plugins/nukestudio/publish/collect_project_root.py +++ /dev/null @@ -1,15 +0,0 @@ -import pyblish.api -import avalon.api as avalon -import os - -class CollectActiveProjectRoot(pyblish.api.ContextPlugin): - """Inject the active project into context""" - - label = "Collect Project Root" - order = pyblish.api.CollectorOrder - 0.1 - - def process(self, context): - S = avalon.Session - context.data["projectroot"] = os.path.normpath( - os.path.join(S['AVALON_PROJECTS'], S['AVALON_PROJECT']) - ) From c2efa69e209148e97798d494b1264b0811e546a2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 13 May 2020 18:41:56 +0200 Subject: [PATCH 154/207] removed AVALON_PROJECTS from required in session schema --- schema/session-2.0.json | 1 - 1 file changed, 1 deletion(-) diff --git a/schema/session-2.0.json b/schema/session-2.0.json index 006a9e2dbf..d37f2ac822 100644 --- a/schema/session-2.0.json +++ b/schema/session-2.0.json @@ -9,7 +9,6 @@ "additionalProperties": true, "required": [ - "AVALON_PROJECTS", "AVALON_PROJECT", "AVALON_ASSET", "AVALON_CONFIG" From b44b8683a0d1f2fb5d3f99171b4b74b73f0136ce Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 13 May 2020 18:42:43 +0200 Subject: [PATCH 155/207] skip AVALON PROJECTS in adaobe communicator context collector --- pype/plugins/adobecommunicator/publish/collect_context.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pype/plugins/adobecommunicator/publish/collect_context.py b/pype/plugins/adobecommunicator/publish/collect_context.py index 139dd86480..6d05825844 100644 --- a/pype/plugins/adobecommunicator/publish/collect_context.py +++ b/pype/plugins/adobecommunicator/publish/collect_context.py @@ -39,10 +39,8 @@ class CollectContextDataFromAport(pyblish.api.ContextPlugin): # get avalon session data and convert \ to / _S = avalon.session - projects = Path(_S["AVALON_PROJECTS"]).resolve() asset = _S["AVALON_ASSET"] workdir = Path(_S["AVALON_WORKDIR"]).resolve() - _S["AVALON_PROJECTS"] = str(projects) _S["AVALON_WORKDIR"] = str(workdir) context.data["avalonSession"] = _S From a9fc6614a97eb7961374310ad80e9d24022aae0d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 13 May 2020 18:43:00 +0200 Subject: [PATCH 156/207] changed docstring with mentioned AVAON_PROJECTS --- pype/ftrack/actions/action_delivery.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/actions/action_delivery.py b/pype/ftrack/actions/action_delivery.py index 9d686929de..23da81d383 100644 --- a/pype/ftrack/actions/action_delivery.py +++ b/pype/ftrack/actions/action_delivery.py @@ -340,7 +340,7 @@ class Delivery(BaseAction): repre_path = self.path_from_represenation(repre, anatomy) # TODO add backup solution where root of path from component - # is repalced with AVALON_PROJECTS root + # is repalced with root if not frame: self.process_single_file( repre_path, anatomy, anatomy_name, anatomy_data From 8402e616a170122583103ac7922c523443780e47 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 13 May 2020 18:43:37 +0200 Subject: [PATCH 157/207] delete old version does not do root validation --- .../actions/action_delete_old_versions.py | 30 +------------------ 1 file changed, 1 insertion(+), 29 deletions(-) diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py index c13845f58c..30f786e93f 100644 --- a/pype/ftrack/actions/action_delete_old_versions.py +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -42,36 +42,8 @@ class DeleteOldVersions(BaseAction): return False def interface(self, session, entities, event): + # TODO Add roots existence validation items = [] - root = os.environ.get("AVALON_PROJECTS") - if not root: - msg = "Root path to projects is not set." - items.append({ - "type": "label", - "value": "ERROR: {}".format(msg) - }) - self.show_interface( - items=items, title=self.inteface_title, event=event - ) - return { - "success": False, - "message": msg - } - - if not os.path.exists(root): - msg = "Root path does not exists \"{}\".".format(str(root)) - items.append({ - "type": "label", - "value": "ERROR: {}".format(msg) - }) - self.show_interface( - items=items, title=self.inteface_title, event=event - ) - return { - "success": False, - "message": msg - } - values = event["data"].get("values") if values: versions_count = int(values["last_versions_count"]) From b778163c86fb8196e1365508d245a8d227af7362 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 13 May 2020 19:02:16 +0200 Subject: [PATCH 158/207] set root environments during install of pype --- pype/__init__.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pype/__init__.py b/pype/__init__.py index 505db4c57f..775b75e2b0 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -3,7 +3,7 @@ import os from pyblish import api as pyblish from avalon import api as avalon from .lib import filter_pyblish_plugins -from pypeapp import config, Roots +from pypeapp import config, Roots, Anatomy import logging @@ -100,8 +100,9 @@ def install(): avalon.register_plugin_path(avalon.InventoryAction, path) if project_name: - root_obj = Roots(project_name) - avalon.register_root(root_obj.roots) + anatomy = Anatomy(project_name) + anatomy.set_root_environments() + avalon.register_root(anatomy.roots) # apply monkey patched discover to original one avalon.discover = patched_discover From 5fff3e7bdc17b6b38e0593c35f0db181b6ecfcce Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 13 May 2020 19:21:19 +0200 Subject: [PATCH 159/207] temporarily disable avalon-launcher --- pype/avalon_apps/avalon_app.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/pype/avalon_apps/avalon_app.py b/pype/avalon_apps/avalon_app.py index 35ab4c1eb7..30e7a7412b 100644 --- a/pype/avalon_apps/avalon_app.py +++ b/pype/avalon_apps/avalon_app.py @@ -33,23 +33,23 @@ class AvalonApps: return icon = QtGui.QIcon(launcher_lib.resource("icon", "main.png")) - aShowLauncher = QtWidgets.QAction(icon, "&Launcher", parent_menu) + # aShowLauncher = QtWidgets.QAction(icon, "&Launcher", parent_menu) aLibraryLoader = QtWidgets.QAction("Library", parent_menu) - aShowLauncher.triggered.connect(self.show_launcher) + # aShowLauncher.triggered.connect(self.show_launcher) aLibraryLoader.triggered.connect(self.show_library_loader) - parent_menu.addAction(aShowLauncher) + # parent_menu.addAction(aShowLauncher) parent_menu.addAction(aLibraryLoader) - def show_launcher(self): - # if app_launcher don't exist create it/otherwise only show main window - if self.app_launcher is None: - root = os.path.realpath(os.environ["AVALON_PROJECTS"]) - io.install() - APP_PATH = launcher_lib.resource("qml", "main.qml") - self.app_launcher = launcher_widget.Launcher(root, APP_PATH) - self.app_launcher.window.show() + # def show_launcher(self): + # # if app_launcher don't exist create it/otherwise only show main window + # if self.app_launcher is None: + # root = os.path.realpath(os.environ["AVALON_PROJECTS"]) + # io.install() + # APP_PATH = launcher_lib.resource("qml", "main.qml") + # self.app_launcher = launcher_widget.Launcher(root, APP_PATH) + # self.app_launcher.window.show() def show_library_loader(self): libraryloader.show( From 22fff1f0bbcc859361ad254c7771db62cad68585 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 13 May 2020 19:41:14 +0200 Subject: [PATCH 160/207] fixed nuke and nukestudio AVALON_PROJECTS --- pype/nuke/lib.py | 4 +++- pype/nukestudio/lib.py | 25 ++++++++++++++++--------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 163fcd27b7..bd39666ab9 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -973,7 +973,9 @@ class WorkfileSettings(object): self.set_colorspace() def set_favorites(self): - projects_root = os.getenv("AVALON_PROJECTS") + anatomy = get_anatomy() + work_template = anatomy.templates["work"]["path"] + projects_root = anatomy.root_value_for_template(work_template) work_dir = os.getenv("AVALON_WORKDIR") asset = os.getenv("AVALON_ASSET") project = os.getenv("AVALON_PROJECT") diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py index 774a9d45bf..e0e6d8750c 100644 --- a/pype/nukestudio/lib.py +++ b/pype/nukestudio/lib.py @@ -6,7 +6,7 @@ import pyblish.api import avalon.api as avalon from avalon.vendor.Qt import (QtWidgets, QtGui) import pype.api as pype -from pypeapp import Logger +from pypeapp import Logger, Anatomy log = Logger().get_logger(__name__, "nukestudio") @@ -30,11 +30,16 @@ def set_workfiles(): # show workfile gui workfiles.show(workdir) + def sync_avalon_data_to_workfile(): # import session to get project dir - S = avalon.Session + project_name = avalon.Session["AVALON_PROJECT"] + + anatomy = Anatomy(project_name) + work_template = anatomy.templates["work"]["path"] + work_root = anatomy.root_value_for_template(work_template) active_project_root = os.path.normpath( - os.path.join(S['AVALON_PROJECTS'], S['AVALON_PROJECT']) + os.path.join(work_root, project_name) ) # getting project project = hiero.core.projects()[-1] @@ -350,17 +355,19 @@ def CreateNukeWorkfile(nodes=None, # create root node and save all metadata root_node = hiero.core.nuke.RootNode() - root_path = os.environ["AVALON_PROJECTS"] + anatomy = Anatomy(os.environ["AVALON_PROJECT"]) + work_template = anatomy.templates["work"]["path"] + root_path = anatomy.root_value_for_template(work_template) nuke_script.addNode(root_node) # here to call pype.nuke.lib.BuildWorkfile script_builder = nklib.BuildWorkfile( - root_node=root_node, - root_path=root_path, - nodes=nuke_script.getNodes(), - **kwargs - ) + root_node=root_node, + root_path=root_path, + nodes=nuke_script.getNodes(), + **kwargs + ) class ClipLoader: From b6929fc41be8f6fc62f7f117eafe648c0cd90833 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 13 May 2020 19:47:55 +0200 Subject: [PATCH 161/207] using find_root_template_from_path method directly on anatomy object --- pype/plugins/global/publish/integrate_new.py | 2 +- pype/plugins/global/publish/submit_publish_job.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index f7ea4eebf9..fc785f6065 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -651,7 +651,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): source = context.data["currentFile"] anatomy = instance.context.data["anatomy"] success, rootless_path = ( - anatomy.roots_obj.find_root_template_from_path(source) + anatomy.find_root_template_from_path(source) ) if success: source = rootless_path diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 9741a7135a..eb8264dc23 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -195,7 +195,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): output_dir = instance.data["outputDir"] # Convert output dir to `{root}/rest/of/path/...` with Anatomy success, rootless_path = ( - self.anatomy.roots_obj.find_root_template_from_path(output_dir) + self.anatomy.find_root_template_from_path(output_dir) ) if not success: # `rootless_path` is not set to `output_dir` if none of roots match @@ -379,7 +379,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): staging = os.path.dirname(list(cols[0])[0]) success, rootless_staging_dir = ( - self.anatomy.roots_obj.find_root_template_from_path(staging) + self.anatomy.find_root_template_from_path(staging) ) if success: staging = rootless_staging_dir @@ -471,7 +471,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): staging = os.path.dirname(list(collection)[0]) success, rootless_staging_dir = ( - self.anatomy.roots_obj.find_root_template_from_path(staging) + self.anatomy.find_root_template_from_path(staging) ) if success: staging = rootless_staging_dir @@ -506,7 +506,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): staging = os.path.dirname(remainder) success, rootless_staging_dir = ( - self.anatomy.roots_obj.find_root_template_from_path(staging) + self.anatomy.find_root_template_from_path(staging) ) if success: staging = rootless_staging_dir @@ -619,7 +619,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): source = context.data["currentFile"] success, rootless_path = ( - self.anatomy.roots_obj.find_root_template_from_path(source) + self.anatomy.find_root_template_from_path(source) ) if success: source = rootless_path @@ -684,7 +684,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): staging_dir = repre.get("stagingDir") if staging_dir: success, rootless_staging_dir = ( - self.anatomy.roots_obj.find_root_template_from_path( + self.anatomy.find_root_template_from_path( staging_dir ) ) From 8f1ffbffcd93a4f9a5ba3a7117b07242f00e0b0a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 13 May 2020 19:48:18 +0200 Subject: [PATCH 162/207] tried to fix validate ass relative paths --- .../publish/validate_ass_relative_paths.py | 90 +++++++++++++------ 1 file changed, 63 insertions(+), 27 deletions(-) diff --git a/pype/plugins/maya/publish/validate_ass_relative_paths.py b/pype/plugins/maya/publish/validate_ass_relative_paths.py index b0fd12a550..0373f1bbdf 100644 --- a/pype/plugins/maya/publish/validate_ass_relative_paths.py +++ b/pype/plugins/maya/publish/validate_ass_relative_paths.py @@ -37,50 +37,86 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin): scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True)) scene_name, _ = os.path.splitext(scene_basename) - project_root = "{}{}{}".format( - os.environ.get("AVALON_PROJECTS"), - os.path.sep, - os.environ.get("AVALON_PROJECT") - ) assert self.maya_is_true(relative_texture) is not True, \ ("Texture path is set to be absolute") assert self.maya_is_true(relative_procedural) is not True, \ ("Procedural path is set to be absolute") + anatomy = instance.context.data["anatomy"] + texture_search_path = texture_search_path.replace("\\", "/") procedural_search_path = procedural_search_path.replace("\\", "/") - project_root = project_root.replace("\\", "/") - assert project_root in texture_search_path, \ + texture_success, texture_search_rootless_path = ( + anatomy.find_root_template_from_path( + texture_search_path + ) + ) + procedural_success, procedural_search_rootless_path = ( + anatomy.find_root_template_from_path( + texture_search_path + ) + ) + + assert not texture_success, \ ("Project root is not in texture_search_path") - assert project_root in procedural_search_path, \ + assert not procedural_success, \ ("Project root is not in procedural_search_path") @classmethod def repair(cls, instance): - texture_search_path = cmds.getAttr( - "defaultArnoldRenderOptions.tspath" + texture_path = cmds.getAttr("defaultArnoldRenderOptions.tspath") + procedural_path = cmds.getAttr("defaultArnoldRenderOptions.pspath") + + anatomy = instance.context.data["anatomy"] + texture_success, texture_rootless_path = ( + anatomy.find_root_template_from_path(texture_path) ) - procedural_search_path = cmds.getAttr( - "defaultArnoldRenderOptions.pspath" + procedural_success, procedural_rootless_path = ( + anatomy.find_root_template_from_path(procedural_path) ) - project_root = "{}{}{}".format( - os.environ.get("AVALON_PROJECTS"), - os.path.sep, - os.environ.get("AVALON_PROJECT"), - ).replace("\\", "/") + all_root_paths = anatomy.all_root_paths() - cmds.setAttr("defaultArnoldRenderOptions.tspath", - project_root + os.pathsep + texture_search_path, - type="string") - cmds.setAttr("defaultArnoldRenderOptions.pspath", - project_root + os.pathsep + procedural_search_path, - type="string") - cmds.setAttr("defaultArnoldRenderOptions.absolute_procedural_paths", - False) - cmds.setAttr("defaultArnoldRenderOptions.absolute_texture_paths", - False) + if not texture_success: + final_path = cls.find_absolute_path( + texture_rootless_path, all_root_paths + ) + if final_path is None: + raise AssertionError("Ass is loaded out of defined roots.") + + cmds.setAttr( + "defaultArnoldRenderOptions.tspath", + final_path, + type="string" + ) + cmds.setAttr( + "defaultArnoldRenderOptions.absolute_texture_paths", + False + ) + + if not procedural_success: + final_path = cls.find_absolute_path( + texture_rootless_path, all_root_paths + ) + if final_path is None: + raise AssertionError("Ass is loaded out of defined roots.") + cmds.setAttr( + "defaultArnoldRenderOptions.pspath", + final_path, + type="string" + ) + cmds.setAttr( + "defaultArnoldRenderOptions.absolute_procedural_paths", + False + ) + + @staticmethod + def find_absolute_path(relative_path, all_root_paths): + for root_path in all_root_paths: + possible_path = os.path.join(root_path, relative_path) + if os.path.exists(possible_path): + return possible_path def maya_is_true(self, attr_val): """ From 9de5f00d695dced60b92a0a7069b0ab4c623160e Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 13 May 2020 22:42:09 +0200 Subject: [PATCH 163/207] wrong variable --- pype/plugins/global/publish/extract_burnin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index b61ed82645..f1c1bef2f5 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -546,7 +546,7 @@ class ExtractBurnin(pype.api.Extractor): if len(matching_profiles) == 1: return matching_profiles[0] - return self.profile_exclusion(profile) + return self.profile_exclusion(matching_profiles) def profile_exclusion(self, matching_profiles): """Find out most matching profile by host, task and family match. From b566741db928d7f5c6847f151b2431ed9bd21dcf Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 13 May 2020 22:42:27 +0200 Subject: [PATCH 164/207] change ftrack review tag from `preview` to `ftrackreview` --- pype/plugins/ftrack/publish/integrate_ftrack_instances.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py index 59fb507788..11b569fd12 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py @@ -63,7 +63,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): "name": "thumbnail" # Default component name is "main". } comp['thumbnail'] = True - elif comp.get('preview') or ("preview" in comp.get('tags', [])): + elif comp.get('ftrackreview') or ("ftrackreview" in comp.get('tags', [])): ''' Ftrack bug requirement: - Start frame must be 0 From a1d35fb5bdf5485950aa6bef43665ad78ce343ed Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 13 May 2020 23:27:49 +0200 Subject: [PATCH 165/207] avalon-launcher can be launched again but without setting root --- pype/avalon_apps/avalon_app.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/pype/avalon_apps/avalon_app.py b/pype/avalon_apps/avalon_app.py index 30e7a7412b..d3190a9d53 100644 --- a/pype/avalon_apps/avalon_app.py +++ b/pype/avalon_apps/avalon_app.py @@ -33,23 +33,22 @@ class AvalonApps: return icon = QtGui.QIcon(launcher_lib.resource("icon", "main.png")) - # aShowLauncher = QtWidgets.QAction(icon, "&Launcher", parent_menu) + aShowLauncher = QtWidgets.QAction(icon, "&Launcher", parent_menu) aLibraryLoader = QtWidgets.QAction("Library", parent_menu) - # aShowLauncher.triggered.connect(self.show_launcher) + aShowLauncher.triggered.connect(self.show_launcher) aLibraryLoader.triggered.connect(self.show_library_loader) - # parent_menu.addAction(aShowLauncher) + parent_menu.addAction(aShowLauncher) parent_menu.addAction(aLibraryLoader) - # def show_launcher(self): - # # if app_launcher don't exist create it/otherwise only show main window - # if self.app_launcher is None: - # root = os.path.realpath(os.environ["AVALON_PROJECTS"]) - # io.install() - # APP_PATH = launcher_lib.resource("qml", "main.qml") - # self.app_launcher = launcher_widget.Launcher(root, APP_PATH) - # self.app_launcher.window.show() + def show_launcher(self): + # if app_launcher don't exist create it/otherwise only show main window + if self.app_launcher is None: + io.install() + APP_PATH = launcher_lib.resource("qml", "main.qml") + self.app_launcher = launcher_widget.Launcher(APP_PATH) + self.app_launcher.window.show() def show_library_loader(self): libraryloader.show( From 0d311d63c822e721337a63a147617874981017af Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 14 May 2020 10:15:28 +0200 Subject: [PATCH 166/207] removed unsused import --- pype/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/__init__.py b/pype/__init__.py index 775b75e2b0..9ca0380bf3 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -3,7 +3,7 @@ import os from pyblish import api as pyblish from avalon import api as avalon from .lib import filter_pyblish_plugins -from pypeapp import config, Roots, Anatomy +from pypeapp import config, Anatomy import logging From ef0f346195bccddc716e13b29ee84e80c5564b18 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 14 May 2020 09:59:05 +0100 Subject: [PATCH 167/207] fixing the default host import to be from pype --- pype/services/adobe_communicator/lib/publish.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/services/adobe_communicator/lib/publish.py b/pype/services/adobe_communicator/lib/publish.py index 2e7d993a60..a6fe991025 100644 --- a/pype/services/adobe_communicator/lib/publish.py +++ b/pype/services/adobe_communicator/lib/publish.py @@ -18,7 +18,7 @@ def main(env): # Register Host (and it's pyblish plugins) host_name = env["AVALON_APP"] # TODO not sure if use "pype." or "avalon." for host import - host_import_str = f"avalon.{host_name}" + host_import_str = f"pype.{host_name}" try: host_module = importlib.import_module(host_import_str) From f30f0099fd65416ffcf4d282d6accbd4e885e9c9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 14 May 2020 10:59:40 +0200 Subject: [PATCH 168/207] for padding is used frame_padding from anatomy templates but kept backwards compatible "frame" --- pype/nuke/lib.py | 9 ++++++++- pype/plugins/global/publish/integrate_master_version.py | 7 +++++-- pype/plugins/global/publish/integrate_new.py | 7 +++++-- pype/plugins/nukestudio/publish/collect_plates.py | 7 ++++++- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index bd39666ab9..a706753755 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -177,9 +177,16 @@ def format_anatomy(data): log.debug("__ anatomy.templates: {}".format(anatomy.templates)) try: - padding = int(anatomy.templates['render']['padding']) + # TODO: bck compatibility with old anatomy template + padding = int( + anatomy.templates["render"].get( + "frame_padding", + anatomy.templates["render"].get("padding") + ) + ) except KeyError as e: msg = ("`padding` key is not in `render` " + "or `frame_padding` on is not available in " "Anatomy template. Please, add it there and restart " "the pipeline (padding: \"4\"): `{}`").format(e) diff --git a/pype/plugins/global/publish/integrate_master_version.py b/pype/plugins/global/publish/integrate_master_version.py index e6e4247dd8..d82c3be075 100644 --- a/pype/plugins/global/publish/integrate_master_version.py +++ b/pype/plugins/global/publish/integrate_master_version.py @@ -356,8 +356,11 @@ class IntegrateMasterVersion(pyblish.api.InstancePlugin): _anatomy_filled = anatomy.format(anatomy_data) _template_filled = _anatomy_filled["master"]["path"] head, tail = _template_filled.split(frame_splitter) - padding = ( - anatomy.templates["render"]["padding"] + padding = int( + anatomy.templates["render"].get( + "frame_padding", + anatomy.templates["render"].get("padding") + ) ) dst_col = clique.Collection( diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index fc785f6065..08c390d040 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -341,8 +341,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): index_frame_start = None if repre.get("frameStart"): - frame_start_padding = ( - anatomy.templates["render"]["padding"] + frame_start_padding = int( + anatomy.templates["render"].get( + "frame_padding", + anatomy.templates["render"].get("padding") + ) ) index_frame_start = int(repre.get("frameStart")) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index 8a79354bbf..3e5ba51b60 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -104,7 +104,12 @@ class CollectPlatesData(api.InstancePlugin): version_data = dict() context = instance.context anatomy = context.data.get("anatomy", None) - padding = int(anatomy.templates['render']['padding']) + padding = int( + anatomy.templates["render"].get( + "frame_padding", + anatomy.templates["render"].get("padding") + ) + ) name = instance.data["subset"] source_path = instance.data["sourcePath"] From 2c854faadf7142cb6e5f689d601f9f0335e40fb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Thu, 14 May 2020 12:05:43 +0200 Subject: [PATCH 169/207] fixed ass path validator --- .../publish/validate_ass_relative_paths.py | 91 ++++++++----------- 1 file changed, 38 insertions(+), 53 deletions(-) diff --git a/pype/plugins/maya/publish/validate_ass_relative_paths.py b/pype/plugins/maya/publish/validate_ass_relative_paths.py index 0373f1bbdf..b64e23e92c 100644 --- a/pype/plugins/maya/publish/validate_ass_relative_paths.py +++ b/pype/plugins/maya/publish/validate_ass_relative_paths.py @@ -44,72 +44,57 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin): anatomy = instance.context.data["anatomy"] - texture_search_path = texture_search_path.replace("\\", "/") - procedural_search_path = procedural_search_path.replace("\\", "/") + # Use project root variables for multiplatform support, see: + # https://docs.arnoldrenderer.com/display/A5AFMUG/Search+Path + # ':' as path separator is supported by Arnold for all platforms. + keys = anatomy.root_environments().keys() + paths = [] + for k in keys: + paths.append("[{}]".format(k)) - texture_success, texture_search_rootless_path = ( - anatomy.find_root_template_from_path( - texture_search_path - ) - ) - procedural_success, procedural_search_rootless_path = ( - anatomy.find_root_template_from_path( - texture_search_path - ) + self.log.info("discovered roots: {}".format(":".join(paths))) + + assert ":".join(paths) in texture_search_path, ( + "Project roots are not in texture_search_path" ) - assert not texture_success, \ - ("Project root is not in texture_search_path") - assert not procedural_success, \ - ("Project root is not in procedural_search_path") + assert ":".join(paths) in procedural_search_path, ( + "Project roots are not in procedural_search_path" + ) @classmethod def repair(cls, instance): texture_path = cmds.getAttr("defaultArnoldRenderOptions.tspath") procedural_path = cmds.getAttr("defaultArnoldRenderOptions.pspath") + # Use project root variables for multiplatform support, see: + # https://docs.arnoldrenderer.com/display/A5AFMUG/Search+Path + # ':' as path separator is supported by Arnold for all platforms. anatomy = instance.context.data["anatomy"] - texture_success, texture_rootless_path = ( - anatomy.find_root_template_from_path(texture_path) + keys = anatomy.root_environments().keys() + paths = [] + for k in keys: + paths.append("[{}]".format(k)) + + cmds.setAttr( + "defaultArnoldRenderOptions.tspath", + ":".join([p for p in paths + [texture_path] if p]), + type="string" ) - procedural_success, procedural_rootless_path = ( - anatomy.find_root_template_from_path(procedural_path) + cmds.setAttr( + "defaultArnoldRenderOptions.absolute_texture_paths", + False ) - all_root_paths = anatomy.all_root_paths() - - if not texture_success: - final_path = cls.find_absolute_path( - texture_rootless_path, all_root_paths - ) - if final_path is None: - raise AssertionError("Ass is loaded out of defined roots.") - - cmds.setAttr( - "defaultArnoldRenderOptions.tspath", - final_path, - type="string" - ) - cmds.setAttr( - "defaultArnoldRenderOptions.absolute_texture_paths", - False - ) - - if not procedural_success: - final_path = cls.find_absolute_path( - texture_rootless_path, all_root_paths - ) - if final_path is None: - raise AssertionError("Ass is loaded out of defined roots.") - cmds.setAttr( - "defaultArnoldRenderOptions.pspath", - final_path, - type="string" - ) - cmds.setAttr( - "defaultArnoldRenderOptions.absolute_procedural_paths", - False - ) + cmds.setAttr( + "defaultArnoldRenderOptions.pspath", + ":".join([p for p in paths + [procedural_path] if p]), + type="string" + ) + cmds.setAttr( + "defaultArnoldRenderOptions.absolute_procedural_paths", + False + ) @staticmethod def find_absolute_path(relative_path, all_root_paths): From c34664a370e6a4f6f1cc12daf8ade1a60fa3e98c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 14 May 2020 15:45:19 +0200 Subject: [PATCH 170/207] replace backslashes with forwardslashed in nukestudio --- pype/nukestudio/lib.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py index e0e6d8750c..3a8e35f100 100644 --- a/pype/nukestudio/lib.py +++ b/pype/nukestudio/lib.py @@ -38,9 +38,9 @@ def sync_avalon_data_to_workfile(): anatomy = Anatomy(project_name) work_template = anatomy.templates["work"]["path"] work_root = anatomy.root_value_for_template(work_template) - active_project_root = os.path.normpath( + active_project_root = ( os.path.join(work_root, project_name) - ) + ).replace("\\", "/") # getting project project = hiero.core.projects()[-1] From 031f9ae9030c6ac3f09067a79bef37f605042b81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Thu, 14 May 2020 16:30:23 +0200 Subject: [PATCH 171/207] added 10sec timeout for connection to deadline --- pype/plugins/global/publish/submit_publish_job.py | 2 +- pype/plugins/maya/publish/submit_maya_deadline.py | 4 ++++ pype/plugins/nuke/publish/submit_nuke_deadline.py | 2 +- 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 9741a7135a..f6ad9f76af 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -262,7 +262,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # self.log.info(json.dumps(payload, indent=4, sort_keys=True)) url = "{}/api/jobs".format(self.DEADLINE_REST_URL) - response = requests.post(url, json=payload) + response = requests.post(url, json=payload, timeout=10) if not response.ok: raise Exception(response.text) diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index b5e4cfe98c..c65f13c653 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -352,6 +352,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): """ if 'verify' not in kwargs: kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa + # add 10sec timeout before bailing out + kwargs['timeout'] = 10 return requests.post(*args, **kwargs) def _requests_get(self, *args, **kwargs): @@ -366,4 +368,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): """ if 'verify' not in kwargs: kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa + # add 10sec timeout before bailing out + kwargs['timeout'] = 10 return requests.get(*args, **kwargs) diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index 4b68056e09..6a1654f77e 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -251,7 +251,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self.expected_files(instance, render_path) self.log.debug("__ expectedFiles: `{}`".format( instance.data["expectedFiles"])) - response = requests.post(self.deadline_url, json=payload) + response = requests.post(self.deadline_url, json=payload, timeout=10) if not response.ok: raise Exception(response.text) From 844ea5be7f92a05326e148599d6d49cd025abd03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Thu, 14 May 2020 17:17:10 +0200 Subject: [PATCH 172/207] removing unused collect deadline user plugin --- .../global/publish/collect_deadline_user.py | 64 ------------------- 1 file changed, 64 deletions(-) delete mode 100644 pype/plugins/global/publish/collect_deadline_user.py diff --git a/pype/plugins/global/publish/collect_deadline_user.py b/pype/plugins/global/publish/collect_deadline_user.py deleted file mode 100644 index 125f9d0d26..0000000000 --- a/pype/plugins/global/publish/collect_deadline_user.py +++ /dev/null @@ -1,64 +0,0 @@ -""" -Requires: - environment -> DEADLINE_PATH - -Provides: - context -> deadlineUser (str) -""" - -import os -import subprocess - -import pyblish.api -from pype.plugin import contextplugin_should_run - -CREATE_NO_WINDOW = 0x08000000 - - -def deadline_command(cmd): - # Find Deadline - path = os.environ.get("DEADLINE_PATH", None) - assert path is not None, "Variable 'DEADLINE_PATH' must be set" - - executable = os.path.join(path, "deadlinecommand") - if os.name == "nt": - executable += ".exe" - assert os.path.exists( - executable), "Deadline executable not found at %s" % executable - assert cmd, "Must have a command" - - query = (executable, cmd) - - process = subprocess.Popen(query, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - creationflags=CREATE_NO_WINDOW) - out, err = process.communicate() - - return out - - -class CollectDeadlineUser(pyblish.api.ContextPlugin): - """Retrieve the local active Deadline user""" - - order = pyblish.api.CollectorOrder + 0.499 - label = "Deadline User" - hosts = ['maya', 'fusion'] - families = ["renderlayer", "saver.deadline"] - - def process(self, context): - """Inject the current working file""" - - # Workaround bug pyblish-base#250 - if not contextplugin_should_run(self, context): - return - - user = deadline_command("GetCurrentUserName").strip() - - if not user: - self.log.warning("No Deadline user found. " - "Do you have Deadline installed?") - return - - self.log.info("Found Deadline user: {}".format(user)) - context.data['deadlineUser'] = user From 91b44a2f9f8ca48cc4d1bd6eedb0e1ff5c647f23 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 14 May 2020 23:19:03 +0200 Subject: [PATCH 173/207] attempt to fix with doubled ffmpeg args --- pype/plugins/global/publish/extract_review.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 660c7290b7..c12a3a89df 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -125,7 +125,8 @@ class ExtractReview(pyblish.api.InstancePlugin): ).format(str(tags))) continue - for output_def in outputs: + for _output_def in outputs: + output_def = copy.deepcopy(_output_def) # Make sure output definition has "tags" key if "tags" not in output_def: output_def["tags"] = [] From 8ed2b664b47f67d1a8484be35a8650561bec66f8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 15 May 2020 10:03:56 +0200 Subject: [PATCH 174/207] only first output of extract burnin keep ftrackreview --- pype/plugins/global/publish/extract_burnin.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index f1c1bef2f5..2397912e56 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -132,10 +132,18 @@ class ExtractBurnin(pype.api.Extractor): filled_anatomy = anatomy.format_all(burnin_data) burnin_data["anatomy"] = filled_anatomy.get_solved() + first_output = True + files_to_delete = [] for filename_suffix, burnin_def in repre_burnin_defs.items(): new_repre = copy.deepcopy(repre) + # Keep "ftrackreview" tag only on first output + if first_output: + first_output = False + elif "ftrackreview" in new_repre["tags"]: + new_repre["tags"].remove("ftrackreview") + burnin_options = copy.deepcopy(profile_options) burnin_values = copy.deepcopy(profile_burnins) From 1e0143a55c4b5074b4edff064b86407cd75c2ef3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 15 May 2020 10:41:02 +0200 Subject: [PATCH 175/207] modified few logs --- pype/plugins/global/publish/extract_review.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index c12a3a89df..c235235c6a 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -639,7 +639,7 @@ class ExtractReview(pyblish.api.InstancePlugin): (float(input_width) * pixel_aspect) / input_height ) output_res_ratio = float(output_width) / float(output_height) - self.log.debug("resolution_ratio: `{}`".format(input_res_ratio)) + self.log.debug("input_res_ratio: `{}`".format(input_res_ratio)) self.log.debug("output_res_ratio: `{}`".format(output_res_ratio)) # Round ratios to 2 decimal places for comparing @@ -700,13 +700,15 @@ class ExtractReview(pyblish.api.InstancePlugin): # scaling none square pixels and 1920 width if "reformat" in new_repre["tags"]: if input_res_ratio < output_res_ratio: - self.log.debug("lower then output") + self.log.debug( + "Input's resolution ratio is lower then output's" + ) width_scale = int(output_width * scale_factor_by_width) width_half_pad = int((output_width - width_scale) / 2) height_scale = output_height height_half_pad = 0 else: - self.log.debug("heigher then output") + self.log.debug("Input is heigher then output") width_scale = output_width width_half_pad = 0 height_scale = int(input_height * scale_factor_by_width) From 5bfafb28b5bf96902c57b658a5d1d78c97b9651a Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 15 May 2020 11:52:07 +0200 Subject: [PATCH 176/207] fix(nks): don't add copy codec if -filter_complex added into cmd --- .../nukestudio/publish/extract_review_cutup_video.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py index b5c0e8387d..a4fbf90bed 100644 --- a/pype/plugins/nukestudio/publish/extract_review_cutup_video.py +++ b/pype/plugins/nukestudio/publish/extract_review_cutup_video.py @@ -87,8 +87,13 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): start_sec = float(frame_start) / fps duration_sec = float(frame_end - frame_start + 1) / fps + empty_add = None + # check if not missing frames at start if (start_sec < 0) or (media_duration < frame_end): + # for later swithing off `-c:v copy` output arg + empty_add = True + # init empty variables video_empty_start = video_layer_start = "" audio_empty_start = audio_layer_start = "" @@ -191,7 +196,7 @@ class ExtractReviewCutUpVideo(pype.api.Extractor): input_args.append("-i {}".format(full_input_path)) # add copy audio video codec if only shortening clip - if "_cut-bigger" in tags: + if ("_cut-bigger" in tags) and (not empty_add): output_args.append("-c:v copy") # make sure it is having no frame to frame comprassion From b0dc653db2508d9f862bd88d2a904f9bea84b88d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Fri, 15 May 2020 13:54:24 +0200 Subject: [PATCH 177/207] pass PYPE_DEV to deadline job --- pype/plugins/maya/publish/submit_maya_deadline.py | 3 ++- pype/plugins/nuke/publish/submit_nuke_deadline.py | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index c65f13c653..89e7393fe5 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -291,7 +291,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "AVALON_PROJECT", "AVALON_ASSET", "AVALON_TASK", - "PYPE_USERNAME" + "PYPE_USERNAME", + "PYPE_DEV" ] environment = dict({key: os.environ[key] for key in keys diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index 6a1654f77e..3731cd25f0 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -183,7 +183,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "BatchName": responce_data["Props"]["Batch"], "JobDependency0": responce_data["_id"], "ChunkSize": 99999999 - }) + }) # Include critical environment variables with submission keys = [ @@ -195,7 +195,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "FTRACK_SERVER", "PYBLISHPLUGINPATH", "NUKE_PATH", - "TOOL_ENV" + "TOOL_ENV", + "PYPE_DEV" ] environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **api.Session) From 13bf4d9a118ce62f2772d0520aa566526263141f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 15 May 2020 14:53:46 +0200 Subject: [PATCH 178/207] skipped reformat tag --- pype/plugins/global/publish/extract_review.py | 40 ++++++++++--------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index c235235c6a..fc1e6377ef 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -609,13 +609,6 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("input_width: `{}`".format(input_width)) self.log.debug("resolution_height: `{}`".format(input_height)) - # Skip processing if both conditions are not met - if "reformat" not in new_repre["tags"] and not letter_box: - self.log.debug('Tag "reformat" and "letter_box" key are not set.') - new_repre["resolutionWidth"] = input_width - new_repre["resolutionHeight"] = input_height - return filters - # NOTE Setting only one of `width` or `heigth` is not allowed output_width = output_def.get("width") output_height = output_def.get("height") @@ -634,6 +627,21 @@ class ExtractReview(pyblish.api.InstancePlugin): "Output resolution is {}x{}".format(output_width, output_height) ) + # Skip processing if resolution is same as input's and letterbox is + # not set + if ( + output_width == input_width + and output_height == input_height + and not letter_box + ): + self.log.debug( + "Output resolution is same as input's" + " and \"letter_box\" key is not set. Skipping reformat part." + ) + new_repre["resolutionWidth"] = input_width + new_repre["resolutionHeight"] = input_height + return filters + # defining image ratios input_res_ratio = ( (float(input_width) * pixel_aspect) / input_height @@ -666,18 +674,12 @@ class ExtractReview(pyblish.api.InstancePlugin): if letter_box: ffmpeg_width = output_width ffmpeg_height = output_height - if "reformat" in new_repre["tags"]: - if input_res_ratio == output_res_ratio: - letter_box /= pixel_aspect - elif input_res_ratio < output_res_ratio: - letter_box /= scale_factor_by_width - else: - letter_box /= scale_factor_by_height - else: + if input_res_ratio == output_res_ratio: letter_box /= pixel_aspect - if input_res_ratio != output_res_ratio: - ffmpeg_width = input_width - ffmpeg_height = int(input_height * pixel_aspect) + elif input_res_ratio < output_res_ratio: + letter_box /= scale_factor_by_width + else: + letter_box /= scale_factor_by_height # QUESTION Is scale required when ffmpeg_width is same as # output_width and ffmpeg_height as output_height @@ -698,7 +700,7 @@ class ExtractReview(pyblish.api.InstancePlugin): filters.extend([scale_filter, "setsar=1", top_box, bottom_box]) # scaling none square pixels and 1920 width - if "reformat" in new_repre["tags"]: + if input_height != output_height or input_width != output_width: if input_res_ratio < output_res_ratio: self.log.debug( "Input's resolution ratio is lower then output's" From 46e3c540cde689e1ade23113cc16af3ba3bc0e13 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 18 May 2020 14:48:12 +0200 Subject: [PATCH 179/207] fixed multipartExr check --- pype/plugins/global/publish/extract_burnin.py | 19 ++++++++++++------- pype/plugins/global/publish/extract_review.py | 19 +++++++++++++------ 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 2397912e56..42b67891e9 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -60,6 +60,18 @@ class ExtractBurnin(pype.api.Extractor): self.log.debug(instance.data["representations"]) def main_process(self, instance): + # ffmpeg doesn't support multipart exrs + if instance.data.get("multipartExr") is True: + instance_label = ( + getattr(instance, "label", None) + or instance.data.get("label") + or instance.data.get("name") + ) + self.log.info(( + "Instance \"{}\" contain \"multipartExr\". Skipped." + ).format(instance_label)) + return + # TODO get these data from context host_name = pyblish.api.registered_hosts()[-1] task_name = os.environ["AVALON_TASK"] @@ -320,13 +332,6 @@ class ExtractBurnin(pype.api.Extractor): "Representation \"{}\" don't have \"burnin\" tag. Skipped." ).format(repre["name"])) return False - - # ffmpeg doesn't support multipart exrs - if "multipartExr" in repre["tags"]: - self.log.info(( - "Representation \"{}\" contain \"multipartExr\" tag. Skipped." - ).format(repre["name"])) - return False return True def filter_burnins_by_tags(self, burnin_defs, tags): diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index fc1e6377ef..d18cb9aef6 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -56,6 +56,17 @@ class ExtractReview(pyblish.api.InstancePlugin): instance.data["representations"].remove(repre) def main_process(self, instance): + if instance.data.get("multipartExr") is True: + instance_label = ( + getattr(instance, "label", None) + or instance.data.get("label") + or instance.data.get("name") + ) + self.log.info(( + "Instance \"{}\" contain \"multipartExr\". Skipped." + ).format(instance_label)) + return + host_name = pyblish.api.registered_hosts()[-1] task_name = os.environ["AVALON_TASK"] family = self.main_family_from_instance(instance) @@ -96,11 +107,7 @@ class ExtractReview(pyblish.api.InstancePlugin): # Loop through representations for repre in tuple(instance.data["representations"]): tags = repre.get("tags") or [] - if ( - "review" not in tags - or "multipartExr" in tags - or "thumbnail" in tags - ): + if "review" not in tags or "thumbnail" in tags: continue input_ext = repre["ext"] @@ -1122,7 +1129,7 @@ class ExtractReview(pyblish.api.InstancePlugin): tags = repre.get("tags", []) - if "multipartExr" in tags: + if instance.data.get("multipartExr") is True: # ffmpeg doesn't support multipart exrs continue From 1d27c19897dd6a917e9250261eb3333c13ff449f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 18 May 2020 14:51:54 +0200 Subject: [PATCH 180/207] moved multipartExr much earlier --- pype/plugins/global/publish/extract_burnin.py | 24 +++++++++---------- pype/plugins/global/publish/extract_review.py | 23 +++++++++--------- 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 42b67891e9..3930f4de9b 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -42,6 +42,18 @@ class ExtractBurnin(pype.api.Extractor): fields = None def process(self, instance): + # ffmpeg doesn't support multipart exrs + if instance.data.get("multipartExr") is True: + instance_label = ( + getattr(instance, "label", None) + or instance.data.get("label") + or instance.data.get("name") + ) + self.log.info(( + "Instance \"{}\" contain \"multipartExr\". Skipped." + ).format(instance_label)) + return + # QUESTION what is this for and should we raise an exception? if "representations" not in instance.data: raise RuntimeError("Burnin needs already created mov to work on.") @@ -60,18 +72,6 @@ class ExtractBurnin(pype.api.Extractor): self.log.debug(instance.data["representations"]) def main_process(self, instance): - # ffmpeg doesn't support multipart exrs - if instance.data.get("multipartExr") is True: - instance_label = ( - getattr(instance, "label", None) - or instance.data.get("label") - or instance.data.get("name") - ) - self.log.info(( - "Instance \"{}\" contain \"multipartExr\". Skipped." - ).format(instance_label)) - return - # TODO get these data from context host_name = pyblish.api.registered_hosts()[-1] task_name = os.environ["AVALON_TASK"] diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index d18cb9aef6..dee6729eca 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -42,6 +42,18 @@ class ExtractReview(pyblish.api.InstancePlugin): to_height = 1080 def process(self, instance): + # ffmpeg doesn't support multipart exrs + if instance.data.get("multipartExr") is True: + instance_label = ( + getattr(instance, "label", None) + or instance.data.get("label") + or instance.data.get("name") + ) + self.log.info(( + "Instance \"{}\" contain \"multipartExr\". Skipped." + ).format(instance_label)) + return + # Use legacy processing when `profiles` is not set. if self.profiles is None: return self.legacy_process(instance) @@ -56,17 +68,6 @@ class ExtractReview(pyblish.api.InstancePlugin): instance.data["representations"].remove(repre) def main_process(self, instance): - if instance.data.get("multipartExr") is True: - instance_label = ( - getattr(instance, "label", None) - or instance.data.get("label") - or instance.data.get("name") - ) - self.log.info(( - "Instance \"{}\" contain \"multipartExr\". Skipped." - ).format(instance_label)) - return - host_name = pyblish.api.registered_hosts()[-1] task_name = os.environ["AVALON_TASK"] family = self.main_family_from_instance(instance) From a37c944d01def5fc3dac692f6617ba54cc941bf2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 19 May 2020 11:28:15 +0200 Subject: [PATCH 181/207] ftrack session created during publishing has set auto connect of event hub to True --- pype/plugins/ftrack/publish/collect_ftrack_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/ftrack/publish/collect_ftrack_api.py b/pype/plugins/ftrack/publish/collect_ftrack_api.py index 0aad3b2433..151b8882a3 100644 --- a/pype/plugins/ftrack/publish/collect_ftrack_api.py +++ b/pype/plugins/ftrack/publish/collect_ftrack_api.py @@ -22,7 +22,7 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): ftrack_log.setLevel(logging.WARNING) # Collect session - session = ftrack_api.Session() + session = ftrack_api.Session(auto_connect_event_hub=True) self.log.debug("Ftrack user: \"{0}\"".format(session.api_user)) context.data["ftrackSession"] = session From 5d045b776a0c1b703699fd70bb72c1b4ad07cc91 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 19 May 2020 15:22:22 +0200 Subject: [PATCH 182/207] use copy.deepcopy(...) instead of calling .copy() on dictionaries --- pype/ftrack/lib/avalon_sync.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 920f3431a7..f354cfbc20 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -2,6 +2,7 @@ import os import re import queue import collections +import copy from pype.ftrack.lib.io_nonsingleton import DbConnector @@ -756,19 +757,19 @@ class SyncEntitiesFactory: prepared_avalon_attr_ca_id = avalon_attrs_ca_id.get(attr_key) if prepared_attrs: self.entities_dict[entity_id]["custom_attributes"] = ( - prepared_attrs.copy() + copy.deepcopy(prepared_attrs) ) if prepared_attrs_ca_id: self.entities_dict[entity_id]["custom_attributes_id"] = ( - prepared_attrs_ca_id.copy() + copy.deepcopy(prepared_attrs_ca_id) ) if prepared_avalon_attr: self.entities_dict[entity_id]["avalon_attrs"] = ( - prepared_avalon_attr.copy() + copy.deepcopy(prepared_avalon_attr) ) if prepared_avalon_attr_ca_id: self.entities_dict[entity_id]["avalon_attrs_id"] = ( - prepared_avalon_attr_ca_id.copy() + copy.deepcopy(prepared_avalon_attr_ca_id) ) # TODO query custom attributes by entity_id @@ -852,7 +853,7 @@ class SyncEntitiesFactory: # Skip project because has stored defaults at the moment if entity_dict["entity_type"] == "project": continue - entity_dict["hier_attrs"] = prepare_dict.copy() + entity_dict["hier_attrs"] = copy.deepcopy(prepare_dict) for key, val in prepare_dict_avalon.items(): entity_dict["avalon_attrs"][key] = val @@ -909,7 +910,7 @@ class SyncEntitiesFactory: while not hier_down_queue.empty(): hier_values, parent_id = hier_down_queue.get() for child_id in self.entities_dict[parent_id]["children"]: - _hier_values = hier_values.copy() + _hier_values = copy.deepcopy(hier_values) for key in attributes_by_key.keys(): if key.startswith("avalon_"): store_key = "avalon_attrs" @@ -1891,7 +1892,7 @@ class SyncEntitiesFactory: parents_queue.put((self.ft_project_id, [], False)) while not parents_queue.empty(): ftrack_id, parent_parents, changed = parents_queue.get() - _parents = parent_parents.copy() + _parents = copy.deepcopy(parent_parents) if ftrack_id not in hierarchy_changing_ids and not changed: if ftrack_id != self.ft_project_id: _parents.append(self.entities_dict[ftrack_id]["name"]) From 4363af53d385d3a5cd3689e01517dba48cc40bb2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 19 May 2020 15:22:33 +0200 Subject: [PATCH 183/207] fix issue of hierachical enumerators with multiselection --- pype/ftrack/lib/avalon_sync.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index f354cfbc20..6bf0cd9754 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -879,7 +879,13 @@ class SyncEntitiesFactory: for item in values["data"]: value = item["value"] - if value is None: + # WARNING It is not possible to propage enumerate hierachical + # attributes with multiselection 100% right. Unseting all values + # will cause inheritance from parent. + if ( + value is None + or (isinstance(value, (tuple, list)) and not value) + ): continue entity_id = item["entity_id"] key = attribute_key_by_id[item["configuration_id"]] From 4027dbfe833c8a85af54d6a58be89aa842eb3df4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 19 May 2020 20:57:11 +0200 Subject: [PATCH 184/207] override argument added before output file --- pype/plugins/global/publish/extract_review.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index dee6729eca..228adb4686 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -282,9 +282,6 @@ class ExtractReview(pyblish.api.InstancePlugin): ffmpeg_video_filters = out_def_ffmpeg_args.get("video_filters") or [] ffmpeg_audio_filters = out_def_ffmpeg_args.get("audio_filters") or [] - # Add argument to override output file - ffmpeg_input_args.append("-y") - # Prepare input and output filepaths self.input_output_paths(new_repre, output_def, temp_data) @@ -354,6 +351,9 @@ class ExtractReview(pyblish.api.InstancePlugin): lut_filters = self.lut_filters(new_repre, instance, ffmpeg_input_args) ffmpeg_video_filters.extend(lut_filters) + # Add argument to override output file + ffmpeg_output_args.append("-y") + # NOTE This must be latest added item to output arguments. ffmpeg_output_args.append( "\"{}\"".format(temp_data["full_output_path"]) From c0b85b71bd301689b05bb442d914a804f1561b0c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 19 May 2020 21:25:24 +0200 Subject: [PATCH 185/207] comments changes --- pype/plugins/global/publish/extract_review.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 228adb4686..8bf6ba36f5 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -93,7 +93,7 @@ class ExtractReview(pyblish.api.InstancePlugin): profile, instance_families ) if not _profile_outputs: - self.log.warning(( + self.log.info(( "Skipped instance. All output definitions from selected" " profile does not match to instance families. \"{}\"" ).format(str(instance_families))) @@ -508,7 +508,7 @@ class ExtractReview(pyblish.api.InstancePlugin): filename_base = "{}_{}".format(filename, filename_suffix) # Temporary tempalte for frame filling. Example output: - # "basename.%04d.mov" when `frame_end` == 1001 + # "basename.%04d.exr" when `frame_end` == 1001 repr_file = "{}.%{:0>2}d.{}".format( filename_base, len(str(frame_end)), output_ext ) @@ -678,10 +678,7 @@ class ExtractReview(pyblish.api.InstancePlugin): ) # letter_box - letter_box = output_def.get("letter_box") if letter_box: - ffmpeg_width = output_width - ffmpeg_height = output_height if input_res_ratio == output_res_ratio: letter_box /= pixel_aspect elif input_res_ratio < output_res_ratio: @@ -689,10 +686,8 @@ class ExtractReview(pyblish.api.InstancePlugin): else: letter_box /= scale_factor_by_height - # QUESTION Is scale required when ffmpeg_width is same as - # output_width and ffmpeg_height as output_height scale_filter = "scale={}x{}:flags=lanczos".format( - ffmpeg_width, ffmpeg_height + output_width, output_height ) top_box = ( From e6ef67e46c85db0289bf4789d82f533bb61d47a3 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 20 May 2020 13:55:32 +0200 Subject: [PATCH 186/207] fix(nuke): fixing family `render2d` consistency --- pype/plugins/nuke/publish/collect_writes.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 1850df2d00..c70953d23f 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -115,7 +115,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): # Add version data to instance version_data = { - "colorspace": node["colorspace"].value(), + "colorspace": node["colorspace"].value(), } instance.data["family"] = "write" @@ -150,6 +150,11 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "deadlinePriority": deadlinePriority }) + if "render" in families: + instance.data["family"] = "render2d" + if "render" not in families: + instance.data["families"].insert(0, "render") + if "prerender" in families: instance.data.update({ "family": "prerender", From 152cfbb48e1286b318ff02cc88cf8cc9215b78b1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 20 May 2020 15:28:42 +0200 Subject: [PATCH 187/207] few minor fixes --- pype/plugins/global/publish/extract_review.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 8bf6ba36f5..e1a739b87c 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -303,7 +303,7 @@ class ExtractReview(pyblish.api.InstancePlugin): ) elif temp_data["without_handles"]: - # QUESTION Shall we change this to use filter: + # TODO use frames ubstead if `-ss`: # `select="gte(n\,{handle_start}),setpts=PTS-STARTPTS` # Pros: # 1.) Python is not good at float operation @@ -667,7 +667,7 @@ class ExtractReview(pyblish.api.InstancePlugin): float(output_width) / (input_width * pixel_aspect) ) scale_factor_by_height = ( - float(output_height) / (input_height * pixel_aspect) + float(output_height) / input_height ) self.log.debug( @@ -716,7 +716,7 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("Input is heigher then output") width_scale = output_width width_half_pad = 0 - height_scale = int(input_height * scale_factor_by_width) + height_scale = int(input_height * scale_factor_by_height) height_half_pad = int((output_height - height_scale) / 2) self.log.debug("width_scale: `{}`".format(width_scale)) From 351d7cb03cd82c1be360425b666e8a20c63996b1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 20 May 2020 15:49:28 +0200 Subject: [PATCH 188/207] hopefully fixed issues with rescaling --- pype/plugins/global/publish/extract_review.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index e1a739b87c..7321fa04ce 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -615,7 +615,7 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("pixel_aspect: `{}`".format(pixel_aspect)) self.log.debug("input_width: `{}`".format(input_width)) - self.log.debug("resolution_height: `{}`".format(input_height)) + self.log.debug("input_height: `{}`".format(input_height)) # NOTE Setting only one of `width` or `heigth` is not allowed output_width = output_def.get("width") @@ -703,12 +703,16 @@ class ExtractReview(pyblish.api.InstancePlugin): filters.extend([scale_filter, "setsar=1", top_box, bottom_box]) # scaling none square pixels and 1920 width - if input_height != output_height or input_width != output_width: + if ( + input_height != output_height + or input_width != output_width + or pixel_aspect != 1 + ): if input_res_ratio < output_res_ratio: self.log.debug( "Input's resolution ratio is lower then output's" ) - width_scale = int(output_width * scale_factor_by_width) + width_scale = int(output_width * scale_factor_by_height) width_half_pad = int((output_width - width_scale) / 2) height_scale = output_height height_half_pad = 0 @@ -716,7 +720,7 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("Input is heigher then output") width_scale = output_width width_half_pad = 0 - height_scale = int(input_height * scale_factor_by_height) + height_scale = int(input_height * scale_factor_by_width) height_half_pad = int((output_height - height_scale) / 2) self.log.debug("width_scale: `{}`".format(width_scale)) From 3d7da50191c8dc990a47d949aca9e807df3f3a10 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 20 May 2020 16:00:46 +0200 Subject: [PATCH 189/207] fix scale in legacy code --- pype/plugins/global/publish/extract_review.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 7321fa04ce..4958a97226 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -1317,6 +1317,10 @@ class ExtractReview(pyblish.api.InstancePlugin): if resolution_ratio_test < delivery_ratio_test: scale_factor = float(self.to_width) / ( resolution_width * pixel_aspect) + if int(scale_factor * 100) == 100: + scale_factor = ( + float(self.to_height) / resolution_height + ) self.log.debug("__ scale_factor: `{}`".format(scale_factor)) From f4a01576480a25ac807d7ed5b7eb5f9ca183d7ab Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 20 May 2020 16:03:55 +0200 Subject: [PATCH 190/207] updated legacy code with latest develop --- pype/plugins/global/publish/extract_review.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 4958a97226..119f8804f7 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -1148,6 +1148,12 @@ class ExtractReview(pyblish.api.InstancePlugin): repre_new = repre.copy() ext = profile.get("ext", None) p_tags = profile.get('tags', []) + + # append repre tags into profile tags + for t in tags: + if t not in p_tags: + p_tags.append(t) + self.log.info("p_tags: `{}`".format(p_tags)) # adding control for presets to be sequence From 774d5deacfa79ff7375ca2c102812a39cb1ce785 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 20 May 2020 16:08:45 +0200 Subject: [PATCH 191/207] also check pixel_aspect in skipping part --- pype/plugins/global/publish/extract_review.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index abbc4fc595..5f8330d900 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -641,6 +641,7 @@ class ExtractReview(pyblish.api.InstancePlugin): output_width == input_width and output_height == input_height and not letter_box + and pixel_aspect == 1 ): self.log.debug( "Output resolution is same as input's" From 0a07803007520256c5ecf88b7acfa129bd943369 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 20 May 2020 16:28:03 +0200 Subject: [PATCH 192/207] removed doubled process method --- pype/plugins/global/publish/extract_review.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 5f8330d900..cdebb38f98 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -1096,7 +1096,6 @@ class ExtractReview(pyblish.api.InstancePlugin): def legacy_process(self, instance): self.log.warning("Legacy review presets are used.") - def process(self, instance): output_profiles = self.outputs or {} inst_data = instance.data From 3a6ab5496324cf2741b682ae3a8904304699cf01 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 21 May 2020 10:05:11 +0200 Subject: [PATCH 193/207] changed comment and docstring by comments --- pype/plugins/global/publish/extract_review.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index cdebb38f98..e40e905118 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -1022,7 +1022,7 @@ class ExtractReview(pyblish.api.InstancePlugin): return False def filter_outputs_by_families(self, profile, families): - """Filter outputs that are not supported for instance families. + """Return outputs matching input instance families. Output definitions without families filter are marked as valid. @@ -1044,7 +1044,8 @@ class ExtractReview(pyblish.api.InstancePlugin): filtered_outputs = {} for filename_suffix, output_def in outputs.items(): output_filters = output_def.get("filter") - # When filters not set then skip filtering process + # If no filter on output preset, skip filtering and add output + # profile for farther processing if not output_filters: filtered_outputs[filename_suffix] = output_def continue From fe777569db5c9177d5269a193e4cd091239623c4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 21 May 2020 10:05:44 +0200 Subject: [PATCH 194/207] boolean variables are easier to see now by comments --- pype/plugins/global/publish/extract_review.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index e40e905118..c0fce645b5 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -234,7 +234,7 @@ class ExtractReview(pyblish.api.InstancePlugin): frame_end_handle = frame_end + handle_end # Change output frames when output should be without handles - without_handles = "no-handles" in output_def["tags"] + without_handles = bool("no-handles" in output_def["tags"]) if without_handles: output_frame_start = frame_start output_frame_end = frame_end @@ -496,8 +496,8 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("New representation ext: `{}`".format(output_ext)) # Output is image file sequence witht frames - output_ext_is_image = output_ext in self.image_exts - output_is_sequence = ( + output_ext_is_image = bool(output_ext in self.image_exts) + output_is_sequence = bool( output_ext_is_image and "sequence" in output_def["tags"] ) From 91f4379a9fb8629e731fc6379834e60c87617716 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 21 May 2020 11:18:58 +0200 Subject: [PATCH 195/207] entity is moved from `rename` process to `add` process if avalon entity does not exist --- pype/ftrack/events/event_sync_to_avalon.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 71e52c68da..8c536d20b5 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -1276,9 +1276,9 @@ class SyncToAvalonEvent(BaseEvent): if not avalon_ent: # TODO logging self.log.debug(( - "Can't change the name (Entity is not is avalon) <{}>" + "Entity is not is avalon. Moving to \"add\" process. <{}>" ).format(ent_path)) - not_found[ftrack_id] = ent_info + self.ftrack_added[ftrack_id] = ent_info continue if new_name == avalon_ent["name"]: From 8d306e5ee0e51607c8fff44ffa99179eed7a2968 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 21 May 2020 11:20:05 +0200 Subject: [PATCH 196/207] mongo id custom attribute is checked in hierarchical attributes --- pype/ftrack/events/event_sync_to_avalon.py | 95 ++++++++++++++-------- 1 file changed, 61 insertions(+), 34 deletions(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 8c536d20b5..19bd6c8273 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -1251,10 +1251,10 @@ class SyncToAvalonEvent(BaseEvent): return output def process_renamed(self): - if not self.ftrack_renamed: + ent_infos = self.ftrack_renamed + if not ent_infos: return - ent_infos = self.ftrack_renamed renamed_tasks = {} not_found = {} changeable_queue = queue.Queue() @@ -1456,7 +1456,6 @@ class SyncToAvalonEvent(BaseEvent): # - happen when was created by any sync event/action pop_out_ents = [] new_tasks_by_parent = collections.defaultdict(list) - _new_ent_infos = {} for ftrack_id, ent_info in ent_infos.items(): if self.avalon_ents_by_ftrack_id.get(ftrack_id): pop_out_ents.append(ftrack_id) @@ -1560,36 +1559,20 @@ class SyncToAvalonEvent(BaseEvent): pop_out_ents.append(ftrack_id) continue - configuration_id = entity_type_conf_ids.get(entity_type) - if not configuration_id: - for attr in cust_attrs: - key = attr["key"] - if key != CustAttrIdKey: - continue - - if attr["entity_type"] != ent_info["entityType"]: - continue - - if ( - ent_info["entityType"] == "task" and - attr["object_type_id"] != ent_info["objectTypeId"] - ): - continue - - configuration_id = attr["id"] - entity_type_conf_ids[entity_type] = configuration_id - break - - if not configuration_id: - self.log.warning( - "BUG REPORT: Missing configuration for `{} < {} >`".format( - entity_type, ent_info["entityType"] - ) - ) + mongo_id_configuration_id = self._mongo_id_configuration( + ent_info, + cust_attrs, + hier_attrs, + entity_type_conf_ids + ) + if not mongo_id_configuration_id: + self.log.warning(( + "BUG REPORT: Missing MongoID configuration for `{} < {} >`" + ).format(entity_type, ent_info["entityType"])) continue _entity_key = collections.OrderedDict({ - "configuration_id": configuration_id, + "configuration_id": mongo_id_configuration_id, "entity_id": ftrack_id }) @@ -1692,6 +1675,53 @@ class SyncToAvalonEvent(BaseEvent): if new_name not in self.task_changes_by_avalon_id[mongo_id]: self.task_changes_by_avalon_id[mongo_id].append(new_name) + def _mongo_id_configuration( + self, + ent_info, + cust_attrs, + hier_attrs, + temp_dict + ): + # Use hierarchical mongo id attribute if possible. + if "_hierarchical" not in temp_dict: + hier_mongo_id_configuration_id = None + for attr in hier_attrs: + if attr["key"] == CustAttrIdKey: + hier_mongo_id_configuration_id = attr["id"] + break + temp_dict["_hierarchical"] = hier_mongo_id_configuration_id + + hier_mongo_id_configuration_id = temp_dict.get("_hierarchical") + if hier_mongo_id_configuration_id is not None: + return hier_mongo_id_configuration_id + + # Legacy part for cases that MongoID attribute is per entity type. + entity_type = ent_info["entity_type"] + mongo_id_configuration_id = temp_dict.get(entity_type) + if mongo_id_configuration_id is not None: + return mongo_id_configuration_id + + for attr in cust_attrs: + key = attr["key"] + if key != CustAttrIdKey: + continue + + if attr["entity_type"] != ent_info["entityType"]: + continue + + if ( + ent_info["entityType"] == "task" and + attr["object_type_id"] != ent_info["objectTypeId"] + ): + continue + + mongo_id_configuration_id = attr["id"] + break + + temp_dict[entity_type] = mongo_id_configuration_id + + return mongo_id_configuration_id + def process_moved(self): if not self.ftrack_moved: return @@ -1871,11 +1901,8 @@ class SyncToAvalonEvent(BaseEvent): obj_type_id = ent_info["objectTypeId"] ent_cust_attrs = cust_attrs_by_obj_id.get(obj_type_id) + # Ftrack's entity_type does not have defined custom attributes if ent_cust_attrs is None: - self.log.warning(( - "BUG REPORT: Entity has ent type without" - " custom attributes <{}> \"{}\"" - ).format(entType, ent_info)) continue for key, values in ent_info["changes"].items(): From 179f0e2e464c249fdf88c48e9c79feb1d7d238c8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 21 May 2020 15:41:59 +0200 Subject: [PATCH 197/207] update changes from PR `extract review reformat issue #166` --- pype/nuke/lib.py | 1 + pype/plugins/global/publish/extract_review.py | 2 +- pype/plugins/global/publish/extract_review_slate.py | 12 +++++++++--- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index a706753755..ade7e96691 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -1669,6 +1669,7 @@ class ExporterReviewMov(ExporterReview): if any(colorspaces): # OCIOColorSpace with controled output dag_node = nuke.createNode("OCIOColorSpace") + self._temp_nodes.append(dag_node) for c in colorspaces: test = dag_node["out_colorspace"].setValue(str(c)) if test: diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index c0fce645b5..0989965758 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -1323,7 +1323,7 @@ class ExtractReview(pyblish.api.InstancePlugin): delivery_ratio_test = float( "{:0.2f}".format(delivery_ratio)) - if resolution_ratio_test < delivery_ratio_test: + if resolution_ratio_test != delivery_ratio_test: scale_factor = float(self.to_width) / ( resolution_width * pixel_aspect) if int(scale_factor * 100) == 100: diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 1825035aef..3db4b2e97e 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -67,9 +67,15 @@ class ExtractReviewSlate(pype.api.Extractor): delivery_ratio_test = float( "{:0.2f}".format(delivery_ratio)) - if resolution_ratio_test < delivery_ratio_test: - scale_factor = float(to_width) / ( - resolution_width * pixel_aspect) + if resolution_ratio_test != delivery_ratio_test: + scale_factor = ( + float(to_width) / ( + resolution_width * pixel_aspect) + ) + if int(scale_factor * 100) == 100: + scale_factor = ( + float(to_height) / resolution_height + ) self.log.debug("__ scale_factor: `{}`".format(scale_factor)) From 5203bb31366343cdcf42da5b8be89085075a61b2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 21 May 2020 17:05:17 +0200 Subject: [PATCH 198/207] add profile from input if is set in extract burnin --- pype/scripts/otio_burnin.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index c35ce27b9c..138165d489 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -519,6 +519,12 @@ def burnins_from_data( if codec_name: ffmpeg_args.append("-codec:v {}".format(codec_name)) + profile_name = burnin._streams[0].get("profile") + if profile_name: + # lower profile name and repalce spaces with underscore + profile_name = profile_name.replace(" ", "_").lower() + ffmpeg_args.append("-profile:v {}".format(profile_name)) + pix_fmt = burnin._streams[0].get("pix_fmt") if pix_fmt: ffmpeg_args.append("-pix_fmt {}".format(pix_fmt)) From b5fbf1a81f9ade3ef33e2f5b9b5e5c3d2fe735ee Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 21 May 2020 18:27:38 +0200 Subject: [PATCH 199/207] use slate's inputs resolution instead of using resolution from instance or representation --- pype/plugins/global/publish/extract_review_slate.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 3db4b2e97e..46e9dfc6f8 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -26,6 +26,10 @@ class ExtractReviewSlate(pype.api.Extractor): slate_path = inst_data.get("slateFrame") ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + slate_stream = pype.lib.ffprobe_streams(slate_path)[0] + resolution_width = slate_stream["width"] + resolution_height = slate_stream["height"] + pixel_aspect = inst_data.get("pixelAspect", 1) fps = inst_data.get("fps") @@ -40,15 +44,6 @@ class ExtractReviewSlate(pype.api.Extractor): to_width = repre["resolutionWidth"] to_height = repre["resolutionHeight"] - # QUESTION Should we use resolution from instance and not source's? - resolution_width = inst_data.get("resolutionWidth") - if resolution_width is None: - resolution_width = to_width - - resolution_height = inst_data.get("resolutionHeight") - if resolution_height is None: - resolution_height = to_height - # defining image ratios resolution_ratio = ( (float(resolution_width) * pixel_aspect) / resolution_height From 8986839053d0d4ab22e93d7a8dc3f67a23279269 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 21 May 2020 18:28:36 +0200 Subject: [PATCH 200/207] do not use output arguments from output definition --- pype/plugins/global/publish/extract_review_slate.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 46e9dfc6f8..3f717cfd96 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -103,9 +103,6 @@ class ExtractReviewSlate(pype.api.Extractor): "-t 0.04"] ) - # output args - # preset's output data - output_args.extend(repre["outputDef"].get('output', [])) # Codecs are copied from source for whole input codec_args = self.codec_args(repre) @@ -299,6 +296,11 @@ class ExtractReviewSlate(pype.api.Extractor): if codec_name: codec_args.append("-codec:v {}".format(codec_name)) + profile_name = streams[0].get("profile") + if profile_name: + profile_name = profile_name.replace(" ", "_").lower() + codec_args.append("-profile:v {}".format(profile_name)) + pix_fmt = streams[0].get("pix_fmt") if pix_fmt: codec_args.append("-pix_fmt {}".format(pix_fmt)) From 8645ee13ce7b0cc746c47cb38ede3bf0db22dfa4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 21 May 2020 18:30:33 +0200 Subject: [PATCH 201/207] moved -y ffmpeg arg to output args --- pype/plugins/global/publish/extract_review_slate.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 3f717cfd96..95f420b1ed 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -93,8 +93,7 @@ class ExtractReviewSlate(pype.api.Extractor): input_args = [] output_args = [] - # overrides output file - input_args.append("-y") + # preset's input data input_args.extend(repre["outputDef"].get('input', [])) input_args.append("-loop 1 -i {}".format(slate_path)) @@ -103,7 +102,6 @@ class ExtractReviewSlate(pype.api.Extractor): "-t 0.04"] ) - # Codecs are copied from source for whole input codec_args = self.codec_args(repre) self.log.debug("Codec arguments: {}".format(codec_args)) @@ -157,6 +155,8 @@ class ExtractReviewSlate(pype.api.Extractor): output_args, scaling_arg) # add it to output_args output_args.insert(0, vf_back) + # overrides output file + output_args.append("-y") slate_v_path = slate_path.replace(".png", ext) output_args.append(slate_v_path) From f5e66c87bbd475f0597af823d8f70b3f4fb2f00a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 21 May 2020 19:07:23 +0200 Subject: [PATCH 202/207] slate is scaled all the time to same resolution as review has --- .../global/publish/extract_review_slate.py | 96 ++++++++----------- 1 file changed, 40 insertions(+), 56 deletions(-) diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 95f420b1ed..8426ae84eb 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -27,8 +27,8 @@ class ExtractReviewSlate(pype.api.Extractor): ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") slate_stream = pype.lib.ffprobe_streams(slate_path)[0] - resolution_width = slate_stream["width"] - resolution_height = slate_stream["height"] + slate_width = slate_stream["width"] + slate_height = slate_stream["height"] pixel_aspect = inst_data.get("pixelAspect", 1) fps = inst_data.get("fps") @@ -46,33 +46,28 @@ class ExtractReviewSlate(pype.api.Extractor): # defining image ratios resolution_ratio = ( - (float(resolution_width) * pixel_aspect) / resolution_height + (float(slate_width) * pixel_aspect) / slate_height ) delivery_ratio = float(to_width) / float(to_height) self.log.debug("resolution_ratio: `{}`".format(resolution_ratio)) self.log.debug("delivery_ratio: `{}`".format(delivery_ratio)) # get scale factor - scale_factor = float(to_height) / ( - resolution_height * pixel_aspect) + scale_factor_by_height = float(to_height) / slate_height + scale_factor_by_width = float(to_width) / ( + slate_width * pixel_aspect + ) # shorten two decimals long float number for testing conditions - resolution_ratio_test = float( - "{:0.2f}".format(resolution_ratio)) - delivery_ratio_test = float( - "{:0.2f}".format(delivery_ratio)) + resolution_ratio_test = float("{:0.2f}".format(resolution_ratio)) + delivery_ratio_test = float("{:0.2f}".format(delivery_ratio)) - if resolution_ratio_test != delivery_ratio_test: - scale_factor = ( - float(to_width) / ( - resolution_width * pixel_aspect) - ) - if int(scale_factor * 100) == 100: - scale_factor = ( - float(to_height) / resolution_height - ) - - self.log.debug("__ scale_factor: `{}`".format(scale_factor)) + self.log.debug("__ scale_factor_by_width: `{}`".format( + scale_factor_by_width + )) + self.log.debug("__ scale_factor_by_height: `{}`".format( + scale_factor_by_height + )) _remove_at_end = [] @@ -116,45 +111,34 @@ class ExtractReviewSlate(pype.api.Extractor): ]) # scaling none square pixels and 1920 width - if "reformat" in p_tags: - if resolution_ratio_test < delivery_ratio_test: - self.log.debug("lower then delivery") - width_scale = int(to_width * scale_factor) - width_half_pad = int(( - to_width - width_scale) / 2) - height_scale = to_height - height_half_pad = 0 - else: - self.log.debug("heigher then delivery") - width_scale = to_width - width_half_pad = 0 - scale_factor = float(to_width) / (float( - resolution_width) * pixel_aspect) - self.log.debug(scale_factor) - height_scale = int( - resolution_height * scale_factor) - height_half_pad = int( - (to_height - height_scale) / 2) + if resolution_ratio_test < delivery_ratio_test: + self.log.debug("lower then delivery") + width_scale = int(slate_width * scale_factor_by_height) + width_half_pad = int((to_width - width_scale) / 2) + height_scale = to_height + height_half_pad = 0 + else: + self.log.debug("heigher then delivery") + width_scale = to_width + width_half_pad = 0 + height_scale = int(slate_height * scale_factor_by_width) + height_half_pad = int((to_height - height_scale) / 2) - self.log.debug( - "__ width_scale: `{}`".format(width_scale)) - self.log.debug( - "__ width_half_pad: `{}`".format(width_half_pad)) - self.log.debug( - "__ height_scale: `{}`".format(height_scale)) - self.log.debug( - "__ height_half_pad: `{}`".format(height_half_pad)) + self.log.debug("__ width_scale: `{}`".format(width_scale)) + self.log.debug("__ width_half_pad: `{}`".format(width_half_pad)) + self.log.debug("__ height_scale: `{}`".format(height_scale)) + self.log.debug("__ height_half_pad: `{}`".format(height_half_pad)) - scaling_arg = ("scale={0}x{1}:flags=lanczos," - "pad={2}:{3}:{4}:{5}:black,setsar=1").format( - width_scale, height_scale, to_width, to_height, - width_half_pad, height_half_pad - ) + scaling_arg = ("scale={0}x{1}:flags=lanczos," + "pad={2}:{3}:{4}:{5}:black,setsar=1").format( + width_scale, height_scale, to_width, to_height, + width_half_pad, height_half_pad + ) + + vf_back = self.add_video_filter_args(output_args, scaling_arg) + # add it to output_args + output_args.insert(0, vf_back) - vf_back = self.add_video_filter_args( - output_args, scaling_arg) - # add it to output_args - output_args.insert(0, vf_back) # overrides output file output_args.append("-y") From e5477ddc75e2d00780461c492bd8c86666e7035b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 21 May 2020 19:07:57 +0200 Subject: [PATCH 203/207] fixed variable in extract review rescaling --- pype/plugins/global/publish/extract_review.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 0989965758..5b35e727ac 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -713,7 +713,7 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug( "Input's resolution ratio is lower then output's" ) - width_scale = int(output_width * scale_factor_by_height) + width_scale = int(input_width * scale_factor_by_height) width_half_pad = int((output_width - width_scale) / 2) height_scale = output_height height_half_pad = 0 From b92509067038c9dd7bcd796bc1dc650f54390aef Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 21 May 2020 19:20:34 +0200 Subject: [PATCH 204/207] extract review slate should be backwards compatible --- .../global/publish/extract_review_slate.py | 87 +++++++++++++------ 1 file changed, 59 insertions(+), 28 deletions(-) diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 8426ae84eb..f2ea6c0875 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -30,6 +30,11 @@ class ExtractReviewSlate(pype.api.Extractor): slate_width = slate_stream["width"] slate_height = slate_stream["height"] + if "reviewToWidth" in inst_data: + use_legacy_code = True + else: + use_legacy_code = False + pixel_aspect = inst_data.get("pixelAspect", 1) fps = inst_data.get("fps") @@ -41,8 +46,12 @@ class ExtractReviewSlate(pype.api.Extractor): continue # values are set in ExtractReview - to_width = repre["resolutionWidth"] - to_height = repre["resolutionHeight"] + if use_legacy_code: + to_width = inst_data["reviewToWidth"] + to_height = inst_data["reviewToHeight"] + else: + to_width = repre["resolutionWidth"] + to_height = repre["resolutionHeight"] # defining image ratios resolution_ratio = ( @@ -90,17 +99,25 @@ class ExtractReviewSlate(pype.api.Extractor): output_args = [] # preset's input data - input_args.extend(repre["outputDef"].get('input', [])) + if use_legacy_code: + input_args.extend(repre["_profile"].get('input', [])) + else: + input_args.extend(repre["outputDef"].get('input', [])) input_args.append("-loop 1 -i {}".format(slate_path)) input_args.extend([ "-r {}".format(fps), "-t 0.04"] ) - # Codecs are copied from source for whole input - codec_args = self.codec_args(repre) - self.log.debug("Codec arguments: {}".format(codec_args)) - output_args.extend(codec_args) + if use_legacy_code: + codec_args = repre["_profile"].get('codec', []) + output_args.extend(codec_args) + # preset's output data + output_args.extend(repre["_profile"].get('output', [])) + else: + # Codecs are copied from source for whole input + codec_args = self.codec_args(repre) + output_args.extend(codec_args) # make sure colors are correct output_args.extend([ @@ -111,29 +128,43 @@ class ExtractReviewSlate(pype.api.Extractor): ]) # scaling none square pixels and 1920 width - if resolution_ratio_test < delivery_ratio_test: - self.log.debug("lower then delivery") - width_scale = int(slate_width * scale_factor_by_height) - width_half_pad = int((to_width - width_scale) / 2) - height_scale = to_height - height_half_pad = 0 - else: - self.log.debug("heigher then delivery") - width_scale = to_width - width_half_pad = 0 - height_scale = int(slate_height * scale_factor_by_width) - height_half_pad = int((to_height - height_scale) / 2) + if ( + # Always scale slate if not legacy + not use_legacy_code or + # Legacy code required reformat tag + (use_legacy_code and "reformat" in p_tags) + ): + if resolution_ratio_test < delivery_ratio_test: + self.log.debug("lower then delivery") + width_scale = int(slate_width * scale_factor_by_height) + width_half_pad = int((to_width - width_scale) / 2) + height_scale = to_height + height_half_pad = 0 + else: + self.log.debug("heigher then delivery") + width_scale = to_width + width_half_pad = 0 + height_scale = int(slate_height * scale_factor_by_width) + height_half_pad = int((to_height - height_scale) / 2) - self.log.debug("__ width_scale: `{}`".format(width_scale)) - self.log.debug("__ width_half_pad: `{}`".format(width_half_pad)) - self.log.debug("__ height_scale: `{}`".format(height_scale)) - self.log.debug("__ height_half_pad: `{}`".format(height_half_pad)) + self.log.debug( + "__ width_scale: `{}`".format(width_scale) + ) + self.log.debug( + "__ width_half_pad: `{}`".format(width_half_pad) + ) + self.log.debug( + "__ height_scale: `{}`".format(height_scale) + ) + self.log.debug( + "__ height_half_pad: `{}`".format(height_half_pad) + ) - scaling_arg = ("scale={0}x{1}:flags=lanczos," - "pad={2}:{3}:{4}:{5}:black,setsar=1").format( - width_scale, height_scale, to_width, to_height, - width_half_pad, height_half_pad - ) + scaling_arg = ("scale={0}x{1}:flags=lanczos," + "pad={2}:{3}:{4}:{5}:black,setsar=1").format( + width_scale, height_scale, to_width, to_height, + width_half_pad, height_half_pad + ) vf_back = self.add_video_filter_args(output_args, scaling_arg) # add it to output_args From 6e27c92800ccefb5b2be143234227b51f13162cd Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 22 May 2020 14:07:12 +0200 Subject: [PATCH 205/207] fixed index access of iterable object --- pype/plugins/global/publish/integrate_new.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 0cd46d8891..bd908901cc 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -743,13 +743,13 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): matching_profiles[name] = filters if len(matching_profiles) == 1: - template_name = matching_profiles.keys()[0] + template_name = tuple(matching_profiles.keys())[0] self.log.debug( "Using template name \"{}\".".format(template_name) ) elif len(matching_profiles) > 1: - template_name = matching_profiles.keys()[0] + template_name = tuple(matching_profiles.keys())[0] self.log.warning(( "More than one template profiles matched" " Family \"{}\" and Task: \"{}\"." From 70b1cd004a515f86fb3d7c097e86b0dd6827250c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 22 May 2020 14:15:01 +0200 Subject: [PATCH 206/207] don't use host name from pyblish's registered_host but from AVALON_APP environemnt --- pype/plugins/global/publish/extract_burnin.py | 2 +- pype/plugins/global/publish/extract_review.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index d0a5364945..2eac38bac8 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -73,7 +73,7 @@ class ExtractBurnin(pype.api.Extractor): def main_process(self, instance): # TODO get these data from context - host_name = pyblish.api.registered_hosts()[-1] + host_name = os.environ["AVALON_APP"] task_name = os.environ["AVALON_TASK"] family = self.main_family_from_instance(instance) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 5b35e727ac..228b4cd6f4 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -68,7 +68,7 @@ class ExtractReview(pyblish.api.InstancePlugin): instance.data["representations"].remove(repre) def main_process(self, instance): - host_name = pyblish.api.registered_hosts()[-1] + host_name = os.environ["AVALON_APP"] task_name = os.environ["AVALON_TASK"] family = self.main_family_from_instance(instance) From b5f78a4f4ab5a61012a1988002e34ce3642700fc Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 1 Jun 2020 12:19:51 +0200 Subject: [PATCH 207/207] bump version --- pype/version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/version.py b/pype/version.py index 892994aa6c..43ce13db01 100644 --- a/pype/version.py +++ b/pype/version.py @@ -1 +1 @@ -__version__ = "2.8.0" +__version__ = "2.9.0"