From a94ce94ca168dbf13b8356b07ec242e8d2677d4f Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 13 Nov 2019 19:38:07 +0100 Subject: [PATCH 001/434] move maya ascii, sertdress and layout to reference loader --- pype/plugins/maya/load/load_mayaascii.py | 4 +--- pype/plugins/maya/load/load_reference.py | 7 ++++++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/pype/plugins/maya/load/load_mayaascii.py b/pype/plugins/maya/load/load_mayaascii.py index b9a5de2782..ab7b2daffb 100644 --- a/pype/plugins/maya/load/load_mayaascii.py +++ b/pype/plugins/maya/load/load_mayaascii.py @@ -6,9 +6,7 @@ import os class MayaAsciiLoader(pype.maya.plugin.ReferenceLoader): """Load the model""" - families = ["mayaAscii", - "setdress", - "layout"] + families = [] representations = ["ma"] label = "Reference Maya Ascii" diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index 55db019cf4..f1df584feb 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -8,7 +8,12 @@ reload(pype.maya.plugin) class ReferenceLoader(pype.maya.plugin.ReferenceLoader): """Load the model""" - families = ["model", "pointcache", "animation"] + families = ["model", + "pointcache", + "animation", + "mayaAscii", + "setdress", + "layout"] representations = ["ma", "abc"] tool_names = ["loader"] From 43a66826bc45573c3f904fc553ab0608d2ecc5e1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 20 Nov 2019 18:23:29 +0100 Subject: [PATCH 002/434] integrate remove components is deactivated by default to not remove thumbnails and movs --- pype/plugins/ftrack/publish/integrate_remove_components.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/plugins/ftrack/publish/integrate_remove_components.py b/pype/plugins/ftrack/publish/integrate_remove_components.py index bad50f7200..26cac0f1ae 100644 --- a/pype/plugins/ftrack/publish/integrate_remove_components.py +++ b/pype/plugins/ftrack/publish/integrate_remove_components.py @@ -11,13 +11,13 @@ class IntegrateCleanComponentData(pyblish.api.InstancePlugin): label = 'Clean component data' families = ["ftrack"] optional = True - active = True + active = False def process(self, instance): for comp in instance.data['representations']: self.log.debug('component {}'.format(comp)) - + if "%" in comp['published_path'] or "#" in comp['published_path']: continue From 7f63d864fd56afa13285653b56d9d7b3f8fb4e11 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 20 Nov 2019 18:31:40 +0100 Subject: [PATCH 003/434] created extract scaled thumbnails plugin which creates 3 types of thumbnails small, middle and large --- .../publish/extract_scaled_thumbnails.py | 137 ++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 pype/plugins/global/publish/extract_scaled_thumbnails.py diff --git a/pype/plugins/global/publish/extract_scaled_thumbnails.py b/pype/plugins/global/publish/extract_scaled_thumbnails.py new file mode 100644 index 0000000000..6d6aa6a73c --- /dev/null +++ b/pype/plugins/global/publish/extract_scaled_thumbnails.py @@ -0,0 +1,137 @@ +import os +import pyblish.api +import pype.api + + +class ExtractScaledThumbnails(pyblish.api.InstancePlugin): + """Create scaled thumbnails for GUIs like loader etc. + + Scaled thumbnails creation is based on data in `output_data` attribute. + The dictionary `output_data` store additional filename ending and + filters for ffmpeg. + + Example: + "small": { + "file_end": "S", + "filters": ["scale=160:-1"] + } + + "small" - key is used to store result under represetation + "file_end" - is distinguishing part for files. + - "S" means that source thumbnail "myasset_thumbnail.jpg" + will be converted to "myasset_thumbnail_S.jpg" + "filters" - should contain filters for ffmpeg, key is `scale` filter + which is used to render thumbnails with different + resolution. + - "160:-1" will render thumbnail with 160px width and keep + aspect ratio of source image + """ + + order = pyblish.api.ExtractorOrder + 0.499 + label = "Extract scaled thumbnails" + + optional = True + active = True + hosts = ["nuke", "maya", "shell"] + # Default setting for output data + output_data = { + "small": { + "file_end": "S", + "filters": ["scale=160:-1"] + }, + "middle": { + "file_end": "M", + "filters": ["scale=320:-1"] + }, + "large": { + "file_end": "L", + "filters": ["scale=1024:-1"] + } + } + + def process(self, instance): + for repre in instance.data["representations"]: + name = repre.get("name", "") + if name: + name = " <{}>".format(name) + self.log.debug("Checking repre{}: {}".format(name, repre)) + # Skip if thumbnail not in tags + tags = repre.get("tags") or [] + if ( + "thumbnail" not in tags and + not repre.get("thumbnail") # backwards compatibility + ): + continue + + # skip if files are not set or empty + files = repre.get("files") + if not files: + continue + + orig_filename = None + if isinstance(files, (str, unicode)): + orig_filename = files + elif isinstance(files, list): + orig_filename = files[0] + else: + self.log.debug(( + "Original `files`{} have invalid type \"{}\" on repre {}" + ).format(name, str(type(files)), str(repre))) + continue + + staging_dir = repre["stagingDir"] + full_input_path = os.path.join(staging_dir, orig_filename) + + orig_basename, orig_ext = os.path.splitext(orig_filename) + thumbnail_data = {} + + _input_args = [] + # Overrides output file + _input_args.append("-y") + # Set input path + _input_args.append("-i \"{}\"".format(full_input_path)) + + ffmpeg_path = os.path.join( + os.environ.get("FFMPEG_PATH", ""), "ffmpeg" + ) + + for output_type, single_data in self.output_data.items(): + # DEBUG remove after testing! + self.log.debug(output_type) + file_end = single_data["file_end"] + in_filters = single_data["filters"] + + ffmpeg_filters = [] + if in_filters: + ffmpeg_filters.append("-vf") + ffmpeg_filters.extend([fil for fil in in_filters]) + + # copy _input_args + input_args = [arg for arg in _input_args] + input_args.extend(ffmpeg_filters) + + output_args = [] + filename = "{}_{}{}".format( + orig_basename, file_end, orig_ext + ) + full_output_path = os.path.join(staging_dir, filename) + output_args.append("\"{}\"".format(full_output_path)) + + mov_args = [ + ffmpeg_path, + " ".join(input_args), + " ".join(output_args) + ] + subprcs_cmd = " ".join(mov_args) + + self.log.debug("Executing: {}".format(subprcs_cmd)) + output = pype.api.subprocess(subprcs_cmd) + self.log.debug("Output: {}".format(output)) + + # Store data for integrator + thumbnail_data[output_type] = { + "path": full_output_path, + "filename_append": file_end + } + + repre["thumbnail_data"] = thumbnail_data From c2e5f792f4a62a93fa59a160370ead4500c094fa Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 20 Nov 2019 18:32:20 +0100 Subject: [PATCH 004/434] added processing of thumbnails to integrate new so they can be accesible for guis --- pype/plugins/global/publish/integrate_new.py | 62 ++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 64f6dd5015..a8e6999e8d 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -384,6 +384,65 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): repre['published_path'] = dst self.log.debug("__ dst: {}".format(dst)) + thumbnail_data = {} + if 'thumbnail' in repre.get('tags', []): + self.log.debug(( + "Looking for scaled thumbnails in <{}>" + ).format(repre["name"])) + # prepare template for thumbnails + # - same as anatomy but keys in basename are replaced with + # one single key `thumb_file_name` + # - template is same for all thumbnails + template_base_name = os.path.basename(template) + thumb_template = template.replace( + template_base_name, "{thumb_file_name}" + ) + self.log.debug( + "Thumbnail template: {}".format(thumb_template) + ) + # get orig thumbnail filename + repre_basename = os.path.basename(dst) + repre_file, repre_ext = os.path.splitext(repre_basename) + # get thumbnail data from reresentation (if there are any) + _thumbnail_data = repre.pop("thumbnail_data", {}) + if _thumbnail_data: + thumbnail_data["template"] = thumb_template + + for thumb_type, thumb_info in _thumbnail_data.items(): + _src = thumb_info["path"] + + # get filename appending "like `S` for small thumb" + filename_append = thumb_info["filename_append"] + thumb_file_name = "{}_{}{}".format( + repre_file, filename_append, repre_ext + ) + _template_data = template_data.copy() + _template_data["thumb_file_name"] = thumb_file_name + # fill thumbnail template with prepared data + self.log.debug( + "Thumbnail <{}> template data: {}".format( + thumb_type, _template_data + ) + ) + template_filled = thumb_template.format( + **_template_data + ) + _dst = os.path.normpath( + template_filled + ).replace("..", ".") + self.log.debug( + "Thumbnail <{}> src: {} || dst: {}".format( + thumb_type, _src, _dst + ) + ) + # add to transfers + instance.data["transfers"].append([_src, _dst]) + # store full path and additional context data + thumbnail_data[thumb_type] = { + "path": _dst, + "context": {"thumb_file_name": thumb_file_name} + } + representation = { "schema": "pype:representation-2.0", "type": "representation", @@ -409,6 +468,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): } } + if thumbnail_data: + representation["data"]["thumbnail_data"] = thumbnail_data + if sequence_repre and repre.get("frameStart"): representation['context']['frame'] = repre.get("frameStart") From a03bbc924a4d3f25d36b17a9527ca137f5f5438c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 20 Nov 2019 18:33:00 +0100 Subject: [PATCH 005/434] renamed extract review repre name to thumbnail --- pype/plugins/global/publish/extract_jpeg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 10c339e0c6..18d9286b86 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -69,7 +69,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): instance.data["representations"] = [] representation = { - 'name': 'jpg', + 'name': 'thumbnail', 'ext': 'jpg', 'files': jpegFile, "stagingDir": stagingdir, From 1ddf61a7ce98de34b0ddeb9b5ef273acd5f1f489 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Tue, 3 Dec 2019 16:24:10 +0100 Subject: [PATCH 006/434] assemblies were not loading correctly --- pype/plugins/global/publish/integrate.py | 2 +- pype/plugins/global/publish/integrate_new.py | 3 ++- pype/plugins/maya/load/load_reference.py | 8 ++++--- pype/plugins/maya/publish/extract_assembly.py | 24 +++++++++++++++---- 4 files changed, 28 insertions(+), 9 deletions(-) diff --git a/pype/plugins/global/publish/integrate.py b/pype/plugins/global/publish/integrate.py index 59e05ee2aa..b2f273ec5c 100644 --- a/pype/plugins/global/publish/integrate.py +++ b/pype/plugins/global/publish/integrate.py @@ -24,7 +24,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): label = "Integrate Asset" order = pyblish.api.IntegratorOrder - families = ["assembly"] + families = [] exclude_families = ["clip"] def process(self, instance): diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index fce6b0b5c7..0a1a1fd031 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -70,7 +70,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "audio", "yetiRig", "yeticache", - "source" + "source", + "assembly" ] exclude_families = ["clip"] diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index 55db019cf4..c17538c57d 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -43,14 +43,16 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): namespace = cmds.referenceQuery(nodes[0], namespace=True) shapes = cmds.ls(nodes, shapes=True, long=True) - print(shapes) newNodes = (list(set(nodes) - set(shapes))) - print(newNodes) + + current_namespace = pm.namespaceInfo(currentNamespace=True) + + if current_namespace != ":": + groupName = current_namespace + ":" + groupName groupNode = pm.PyNode(groupName) roots = set() - print(nodes) for node in newNodes: try: diff --git a/pype/plugins/maya/publish/extract_assembly.py b/pype/plugins/maya/publish/extract_assembly.py index 26b16a73c4..c12d57e836 100644 --- a/pype/plugins/maya/publish/extract_assembly.py +++ b/pype/plugins/maya/publish/extract_assembly.py @@ -22,11 +22,11 @@ class ExtractAssembly(pype.api.Extractor): def process(self, instance): - parent_dir = self.staging_dir(instance) + staging_dir = self.staging_dir(instance) hierarchy_filename = "{}.abc".format(instance.name) - hierarchy_path = os.path.join(parent_dir, hierarchy_filename) + hierarchy_path = os.path.join(staging_dir, hierarchy_filename) json_filename = "{}.json".format(instance.name) - json_path = os.path.join(parent_dir, json_filename) + json_path = os.path.join(staging_dir, json_filename) self.log.info("Dumping scene data for debugging ..") with open(json_path, "w") as filepath: @@ -46,8 +46,24 @@ class ExtractAssembly(pype.api.Extractor): "uvWrite": True, "selection": True}) - instance.data["files"] = [json_filename, hierarchy_filename] + if "representations" not in instance.data: + instance.data["representations"] = [] + representation_abc = { + 'name': 'abc', + 'ext': 'abc', + 'files': hierarchy_filename, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation_abc) + + representation_json = { + 'name': 'json', + 'ext': 'json', + 'files': json_filename, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation_json) # Remove data instance.data.pop("scenedata", None) From 6a56554e5fb2504aab1905f6c955dd9d626e7a47 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Tue, 3 Dec 2019 17:59:52 +0100 Subject: [PATCH 007/434] initial work on render creator --- ...eate_renderglobals.py => create_render.py} | 53 +++++++++++-------- 1 file changed, 32 insertions(+), 21 deletions(-) rename pype/plugins/maya/create/{create_renderglobals.py => create_render.py} (89%) diff --git a/pype/plugins/maya/create/create_renderglobals.py b/pype/plugins/maya/create/create_render.py similarity index 89% rename from pype/plugins/maya/create/create_renderglobals.py rename to pype/plugins/maya/create/create_render.py index 7c71bfbc36..a8fbd664d3 100644 --- a/pype/plugins/maya/create/create_renderglobals.py +++ b/pype/plugins/maya/create/create_render.py @@ -2,27 +2,52 @@ import os import json import appdirs import requests + from maya import cmds +import maya.app.renderSetup.model.override as override +import maya.app.renderSetup.model.selector as selector +import maya.app.renderSetup.model.collection as collection +import maya.app.renderSetup.model.renderLayer as renderLayer +import maya.app.renderSetup.model.renderSetup as renderSetup + import pype.maya.lib as lib import avalon.maya -class CreateRenderGlobals(avalon.maya.Creator): +class CreateRender(avalon.maya.Creator): + """Create render layer for export""" - label = "Render Globals" - family = "renderglobals" - icon = "gears" + label = "Render" + family = "renderlayer" + icon = "eye" defaults = ['Main'] _token = None _user = None _password = None - def __init__(self, *args, **kwargs): - super(CreateRenderGlobals, self).__init__(*args, **kwargs) + # renderSetup instance + _rs = None + def __init__(self, *args, **kwargs): + super(CreateRender, self).__init__(*args, **kwargs) + self._create_render_settings() + self._rs = renderSetup.instance() + rl = self._rs.createRenderLayer("MyRenderSetupLayer") + cmds.sets() + + def process(self): + exists = cmds.ls(self.name) + if exists: + return cmds.warning("%s already exists." % exists[0]) + + with lib.undo_chunk(): + super(CreateRender, self).process() + cmds.setAttr("{}.machineList".format(self.name), lock=True) + + def _create_render_settings(self): # We won't be publishing this one - self.data["id"] = "avalon.renderglobals" + self.data["id"] = "avalon.renderLayer" # get pools pools = [] @@ -88,20 +113,6 @@ class CreateRenderGlobals(avalon.maya.Creator): self.options = {"useSelection": False} # Force no content - def process(self): - - exists = cmds.ls(self.name) - assert len(exists) <= 1, ( - "More than one renderglobal exists, this is a bug" - ) - - if exists: - return cmds.warning("%s already exists." % exists[0]) - - with lib.undo_chunk(): - super(CreateRenderGlobals, self).process() - cmds.setAttr("{}.machineList".format(self.name), lock=True) - def _load_credentials(self): """ Load Muster credentials from file and set `MUSTER_USER`, From 6bc2042cea12869717b318b134ed652cc209ee42 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 4 Dec 2019 18:29:50 +0100 Subject: [PATCH 008/434] removed added integraion and thumbnail extractor --- .../publish/extract_scaled_thumbnails.py | 137 ------------------ pype/plugins/global/publish/integrate_new.py | 65 +-------- 2 files changed, 2 insertions(+), 200 deletions(-) delete mode 100644 pype/plugins/global/publish/extract_scaled_thumbnails.py diff --git a/pype/plugins/global/publish/extract_scaled_thumbnails.py b/pype/plugins/global/publish/extract_scaled_thumbnails.py deleted file mode 100644 index 6d6aa6a73c..0000000000 --- a/pype/plugins/global/publish/extract_scaled_thumbnails.py +++ /dev/null @@ -1,137 +0,0 @@ -import os -import pyblish.api -import pype.api - - -class ExtractScaledThumbnails(pyblish.api.InstancePlugin): - """Create scaled thumbnails for GUIs like loader etc. - - Scaled thumbnails creation is based on data in `output_data` attribute. - The dictionary `output_data` store additional filename ending and - filters for ffmpeg. - - Example: - "small": { - "file_end": "S", - "filters": ["scale=160:-1"] - } - - "small" - key is used to store result under represetation - "file_end" - is distinguishing part for files. - - "S" means that source thumbnail "myasset_thumbnail.jpg" - will be converted to "myasset_thumbnail_S.jpg" - "filters" - should contain filters for ffmpeg, key is `scale` filter - which is used to render thumbnails with different - resolution. - - "160:-1" will render thumbnail with 160px width and keep - aspect ratio of source image - """ - - order = pyblish.api.ExtractorOrder + 0.499 - label = "Extract scaled thumbnails" - - optional = True - active = True - hosts = ["nuke", "maya", "shell"] - # Default setting for output data - output_data = { - "small": { - "file_end": "S", - "filters": ["scale=160:-1"] - }, - "middle": { - "file_end": "M", - "filters": ["scale=320:-1"] - }, - "large": { - "file_end": "L", - "filters": ["scale=1024:-1"] - } - } - - def process(self, instance): - for repre in instance.data["representations"]: - name = repre.get("name", "") - if name: - name = " <{}>".format(name) - self.log.debug("Checking repre{}: {}".format(name, repre)) - # Skip if thumbnail not in tags - tags = repre.get("tags") or [] - if ( - "thumbnail" not in tags and - not repre.get("thumbnail") # backwards compatibility - ): - continue - - # skip if files are not set or empty - files = repre.get("files") - if not files: - continue - - orig_filename = None - if isinstance(files, (str, unicode)): - orig_filename = files - elif isinstance(files, list): - orig_filename = files[0] - else: - self.log.debug(( - "Original `files`{} have invalid type \"{}\" on repre {}" - ).format(name, str(type(files)), str(repre))) - continue - - staging_dir = repre["stagingDir"] - full_input_path = os.path.join(staging_dir, orig_filename) - - orig_basename, orig_ext = os.path.splitext(orig_filename) - thumbnail_data = {} - - _input_args = [] - # Overrides output file - _input_args.append("-y") - # Set input path - _input_args.append("-i \"{}\"".format(full_input_path)) - - ffmpeg_path = os.path.join( - os.environ.get("FFMPEG_PATH", ""), "ffmpeg" - ) - - for output_type, single_data in self.output_data.items(): - # DEBUG remove after testing! - self.log.debug(output_type) - file_end = single_data["file_end"] - in_filters = single_data["filters"] - - ffmpeg_filters = [] - if in_filters: - ffmpeg_filters.append("-vf") - ffmpeg_filters.extend([fil for fil in in_filters]) - - # copy _input_args - input_args = [arg for arg in _input_args] - input_args.extend(ffmpeg_filters) - - output_args = [] - filename = "{}_{}{}".format( - orig_basename, file_end, orig_ext - ) - full_output_path = os.path.join(staging_dir, filename) - output_args.append("\"{}\"".format(full_output_path)) - - mov_args = [ - ffmpeg_path, - " ".join(input_args), - " ".join(output_args) - ] - subprcs_cmd = " ".join(mov_args) - - self.log.debug("Executing: {}".format(subprcs_cmd)) - output = pype.api.subprocess(subprcs_cmd) - self.log.debug("Output: {}".format(output)) - - # Store data for integrator - thumbnail_data[output_type] = { - "path": full_output_path, - "filename_append": file_end - } - - repre["thumbnail_data"] = thumbnail_data diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index a8e6999e8d..cc71fce49e 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -384,65 +384,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): repre['published_path'] = dst self.log.debug("__ dst: {}".format(dst)) - thumbnail_data = {} - if 'thumbnail' in repre.get('tags', []): - self.log.debug(( - "Looking for scaled thumbnails in <{}>" - ).format(repre["name"])) - # prepare template for thumbnails - # - same as anatomy but keys in basename are replaced with - # one single key `thumb_file_name` - # - template is same for all thumbnails - template_base_name = os.path.basename(template) - thumb_template = template.replace( - template_base_name, "{thumb_file_name}" - ) - self.log.debug( - "Thumbnail template: {}".format(thumb_template) - ) - # get orig thumbnail filename - repre_basename = os.path.basename(dst) - repre_file, repre_ext = os.path.splitext(repre_basename) - # get thumbnail data from reresentation (if there are any) - _thumbnail_data = repre.pop("thumbnail_data", {}) - if _thumbnail_data: - thumbnail_data["template"] = thumb_template - - for thumb_type, thumb_info in _thumbnail_data.items(): - _src = thumb_info["path"] - - # get filename appending "like `S` for small thumb" - filename_append = thumb_info["filename_append"] - thumb_file_name = "{}_{}{}".format( - repre_file, filename_append, repre_ext - ) - _template_data = template_data.copy() - _template_data["thumb_file_name"] = thumb_file_name - # fill thumbnail template with prepared data - self.log.debug( - "Thumbnail <{}> template data: {}".format( - thumb_type, _template_data - ) - ) - template_filled = thumb_template.format( - **_template_data - ) - _dst = os.path.normpath( - template_filled - ).replace("..", ".") - self.log.debug( - "Thumbnail <{}> src: {} || dst: {}".format( - thumb_type, _src, _dst - ) - ) - # add to transfers - instance.data["transfers"].append([_src, _dst]) - # store full path and additional context data - thumbnail_data[thumb_type] = { - "path": _dst, - "context": {"thumb_file_name": thumb_file_name} - } - representation = { "schema": "pype:representation-2.0", "type": "representation", @@ -468,9 +409,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): } } - if thumbnail_data: - representation["data"]["thumbnail_data"] = thumbnail_data - if sequence_repre and repre.get("frameStart"): representation['context']['frame'] = repre.get("frameStart") @@ -485,7 +423,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): for rep in instance.data["representations"]: self.log.debug("__ represNAME: {}".format(rep['name'])) self.log.debug("__ represPATH: {}".format(rep['published_path'])) - io.insert_many(representations) + result = io.insert_many(representations) + instance.data["published_representation_ids"] = result.inserted_ids # self.log.debug("Representation: {}".format(representations)) self.log.info("Registered {} items".format(len(representations))) From 9228536f52c52492ad9c074a5b97869b2031c63b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 4 Dec 2019 18:30:32 +0100 Subject: [PATCH 009/434] added integrate thumbnails plugin --- .../global/publish/integrate_thumbnail.py | 141 ++++++++++++++++++ 1 file changed, 141 insertions(+) create mode 100644 pype/plugins/global/publish/integrate_thumbnail.py diff --git a/pype/plugins/global/publish/integrate_thumbnail.py b/pype/plugins/global/publish/integrate_thumbnail.py new file mode 100644 index 0000000000..9a7418eebe --- /dev/null +++ b/pype/plugins/global/publish/integrate_thumbnail.py @@ -0,0 +1,141 @@ +import os +import sys +import errno +import shutil +import copy + +import six +import pyblish.api +from bson.objectid import ObjectId + +from avalon import api, dbio + + +class IntegrateThumbnails(pyblish.api.InstancePlugin): + """Integrate Thumbnails.""" + + label = "Integrate Thumbnails" + order = pyblish.api.IntegratorOrder + 0.01 + families = ["review"] + + def process(self, instance): + repre_ids = instance.get("published_representation_ids") + if not repre_ids: + self.log.debug( + "There are not published representation ids on the instance." + ) + return + + project_name = api.Session["AVALON_PROJECT"] + + anatomy = instance.context.data["anatomy"] + if "publish" not in anatomy.templates: + self.log.error("Anatomy does not have set publish key!") + return + + if "thumbnail" not in anatomy.templates["publish"]: + self.log.warning(( + "There is not set \"thumbnail\" template for project {}" + ).format(project_name)) + return + + thumbnail_template = anatomy.templates["publish"]["thumbnail"] + + dbio.install() + repres = dbio.find({ + "_id": {"$in": repre_ids}, + "type": "representation" + }) + if not repres: + self.log.debug(( + "There are not representations in database with ids {}" + ).format(str(repre_ids))) + return + + thumb_repre = None + for repre in repres: + if repre["name"].lower() == "thumbnail": + thumb_repre = repre + break + + if not thumb_repre: + self.log.debug( + "There is not representation with name \"thumbnail\"" + ) + return + + version = dbio.find_one({"_id": thumb_repre["parent"]}) + if not version: + self.log.warning("There does not exist version with id {}".format( + str(thumb_repre["parent"]) + )) + return + + # Get full path to thumbnail file from representation + src_full_path = os.path.normpath(thumb_repre["data"]["path"]) + if not os.path.exists(src_full_path): + self.log.warning("Thumbnail file was not found. Path: {}".format( + src_full_path + )) + return + + # Create id for mongo entity now to fill anatomy template + thumbnail_id = ObjectId() + + # Prepare anatomy template fill data + template_data = copy.deepcopy(thumb_repre["context"]) + template_data["_id"] = str(thumbnail_id) + template_data["thumbnail_root"] = os.environ.get( + "AVALON_THUMBNAIL_ROOT" + ) + + anatomy_filled = anatomy.format(template_data) + final_path = anatomy_filled.get("publish", {}).get("thumbnail") + if not final_path: + self.log.warning(( + "Anatomy template was not filled with entered data" + "\nTemplate: {} " + "\nData: {}" + ).format(thumbnail_template, str(template_data))) + return + + dst_full_path = os.path.normpath(final_path) + self.log.debug( + "Copying file .. {} -> {}".format(src_full_path, dst_full_path) + ) + dirname = os.path.dirname(dst_full_path) + try: + os.makedirs(dirname) + except OSError as e: + if e.errno != errno.EEXIST: + tp, value, tb = sys.exc_info() + six.reraise(tp, value, tb) + + shutil.copy(src_full_path, dst_full_path) + + # Clean template data from keys that are dynamic + template_data.pop("_id") + template_data.pop("thumbnail_root") + + thumbnail_entity = { + "_id": thumbnail_id, + "type": "thumbnail", + "schema": "pype:thumbnail-1.0", + "data": { + "template": thumbnail_template, + "template_data": template_data + } + } + # Create thumbnail entity + dbio.insert_one(thumbnail_entity) + self.log.debug( + "Creating entity in database {}".format(str(thumbnail_entity)) + ) + # Set thumbnail id for version + dbio.update_one( + {"_id": version["_id"]}, + {"$set": {"data.thumbnail_id": thumbnail_id}} + ) + self.log.debug("Setting thumbnail for version \"{}\" <{}>".format( + version["name"], str(version["_id"]) + )) From 295037365ebd3b5349d3464c9e6dfbe1a04991d5 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Thu, 5 Dec 2019 00:30:48 +0100 Subject: [PATCH 010/434] maya: changing render collector --- pype/plugins/maya/create/create_render.py | 112 +++++++++++--------- pype/plugins/maya/publish/collect_render.py | 24 +++++ 2 files changed, 88 insertions(+), 48 deletions(-) create mode 100644 pype/plugins/maya/publish/collect_render.py diff --git a/pype/plugins/maya/create/create_render.py b/pype/plugins/maya/create/create_render.py index a8fbd664d3..c54aaaa76c 100644 --- a/pype/plugins/maya/create/create_render.py +++ b/pype/plugins/maya/create/create_render.py @@ -4,23 +4,31 @@ import appdirs import requests from maya import cmds -import maya.app.renderSetup.model.override as override -import maya.app.renderSetup.model.selector as selector -import maya.app.renderSetup.model.collection as collection -import maya.app.renderSetup.model.renderLayer as renderLayer import maya.app.renderSetup.model.renderSetup as renderSetup import pype.maya.lib as lib import avalon.maya +class RenderSetupListObserver: + """This will later server as handler to renderSetup changes""" + + def listItemAdded(self, item): + # TODO(antirotor): Implement + self.items.append(item) + print("* added {}".format(item.name())) + + def listItemRemoved(self, item): + print("removed") + + class CreateRender(avalon.maya.Creator): """Create render layer for export""" label = "Render" - family = "renderlayer" + family = "render" icon = "eye" - defaults = ['Main'] + defaults = ["Main"] _token = None _user = None @@ -31,32 +39,39 @@ class CreateRender(avalon.maya.Creator): def __init__(self, *args, **kwargs): super(CreateRender, self).__init__(*args, **kwargs) - self._create_render_settings() - self._rs = renderSetup.instance() - rl = self._rs.createRenderLayer("MyRenderSetupLayer") - cmds.sets() def process(self): exists = cmds.ls(self.name) if exists: return cmds.warning("%s already exists." % exists[0]) + use_selection = self.options.get("useSelection") with lib.undo_chunk(): - super(CreateRender, self).process() - cmds.setAttr("{}.machineList".format(self.name), lock=True) + self._create_render_settings() + instance = super(CreateRender, self).process() + cmds.setAttr("{}.machineList".format(instance), lock=True) + self._rs = renderSetup.instance() + # self._rs.addListObserver(RenderSetupListObserver) + if use_selection: + print(">>> processing existing layers") + layers = self._rs.getRenderLayers() + sets = [] + for layer in layers: + print(" - creating set for {}".format(layer.name())) + set = cmds.sets(n="LAYER_{}".format(layer.name())) + sets.append(set) + cmds.sets(sets, forceElement=instance) def _create_render_settings(self): - # We won't be publishing this one - self.data["id"] = "avalon.renderLayer" - # get pools pools = [] - deadline_url = os.environ.get('DEADLINE_REST_URL', None) - muster_url = os.environ.get('MUSTER_REST_URL', None) + deadline_url = os.environ.get("DEADLINE_REST_URL", None) + muster_url = os.environ.get("MUSTER_REST_URL", None) if deadline_url and muster_url: - self.log.error("Both Deadline and Muster are enabled. " - "Cannot support both.") + self.log.error( + "Both Deadline and Muster are enabled. " "Cannot support both." + ) raise RuntimeError("Both Deadline and Muster are enabled") if deadline_url is None: @@ -82,8 +97,8 @@ class CreateRender(avalon.maya.Creator): try: pools = self._get_muster_pools() except requests.exceptions.HTTPError as e: - if e.startswith('401'): - self.log.warning('access token expired') + if e.startswith("401"): + self.log.warning("access token expired") self._show_login() raise RuntimeError("Access token expired") except requests.exceptions.ConnectionError: @@ -91,8 +106,8 @@ class CreateRender(avalon.maya.Creator): raise RuntimeError("Cannot connect to {}".format(muster_url)) pool_names = [] for pool in pools: - self.log.info(" - pool: {}".format(pool['name'])) - pool_names.append(pool['name']) + self.log.info(" - pool: {}".format(pool["name"])) + pool_names.append(pool["name"]) self.data["primaryPool"] = pool_names @@ -122,14 +137,12 @@ class CreateRender(avalon.maya.Creator): Show login dialog if access token is invalid or missing. """ - app_dir = os.path.normpath( - appdirs.user_data_dir('pype-app', 'pype') - ) - file_name = 'muster_cred.json' + app_dir = os.path.normpath(appdirs.user_data_dir("pype-app", "pype")) + file_name = "muster_cred.json" fpath = os.path.join(app_dir, file_name) - file = open(fpath, 'r') + file = open(fpath, "r") muster_json = json.load(file) - self._token = muster_json.get('token', None) + self._token = muster_json.get("token", None) if not self._token: self._show_login() raise RuntimeError("Invalid access token for Muster") @@ -142,26 +155,25 @@ class CreateRender(avalon.maya.Creator): """ Get render pools from muster """ - params = { - 'authToken': self._token - } - api_entry = '/api/pools/list' - response = self._requests_get( - self.MUSTER_REST_URL + api_entry, params=params) + params = {"authToken": self._token} + api_entry = "/api/pools/list" + response = self._requests_get(self.MUSTER_REST_URL + api_entry, + params=params) if response.status_code != 200: if response.status_code == 401: - self.log.warning('Authentication token expired.') + self.log.warning("Authentication token expired.") self._show_login() else: self.log.error( - 'Cannot get pools from Muster: {}'.format( - response.status_code)) - raise Exception('Cannot get pools from Muster') + ("Cannot get pools from " + "Muster: {}").format(response.status_code) + ) + raise Exception("Cannot get pools from Muster") try: - pools = response.json()['ResponseData']['pools'] + pools = response.json()["ResponseData"]["pools"] except ValueError as e: - self.log.error('Invalid response from Muster server {}'.format(e)) - raise Exception('Invalid response from Muster server') + self.log.error("Invalid response from Muster server {}".format(e)) + raise Exception("Invalid response from Muster server") return pools @@ -173,8 +185,8 @@ class CreateRender(avalon.maya.Creator): self.log.debug(api_url) login_response = self._requests_post(api_url, timeout=1) if login_response.status_code != 200: - self.log.error('Cannot show login form to Muster') - raise Exception('Cannot show login form to Muster') + self.log.error("Cannot show login form to Muster") + raise Exception("Cannot show login form to Muster") def _requests_post(self, *args, **kwargs): """ Wrapper for requests, disabling SSL certificate validation if @@ -186,8 +198,10 @@ class CreateRender(avalon.maya.Creator): WARNING: disabling SSL certificate validation is defeating one line of defense SSL is providing and it is not recommended. """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa + if "verify" not in kwargs: + kwargs["verify"] = ( + False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True + ) # noqa return requests.post(*args, **kwargs) def _requests_get(self, *args, **kwargs): @@ -200,6 +214,8 @@ class CreateRender(avalon.maya.Creator): WARNING: disabling SSL certificate validation is defeating one line of defense SSL is providing and it is not recommended. """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa + if "verify" not in kwargs: + kwargs["verify"] = ( + False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True + ) # noqa return requests.get(*args, **kwargs) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py new file mode 100644 index 0000000000..a9021831b6 --- /dev/null +++ b/pype/plugins/maya/publish/collect_render.py @@ -0,0 +1,24 @@ +from maya import cmds + +import pyblish.api + +from avalon import maya, api +import pype.maya.lib as lib + + +class CollectMayaRender(pyblish.api.InstancePlugin): + """Gather all publishable render layers from renderSetup""" + + order = pyblish.api.CollectorOrder + 0.01 + hosts = ["maya"] + label = "Collect Render Layers" + families = ["render"] + + def process(self, instance): + layers = instance.data['setMembers'] + self.log.debug('layers: {}'.format(layers)) + + for layer in layers: + # test if there are sets (subsets) to attach render to + sets = cmds.ls(layer, long=True, dag=True, sets=True) + self.log.debug(sets) From 7b2e5f1b9e5bbc90ad1fdf947e21b323e83f44a9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 5 Dec 2019 19:31:58 +0100 Subject: [PATCH 011/434] integrate thumbnails now works --- .../global/publish/integrate_thumbnail.py | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/pype/plugins/global/publish/integrate_thumbnail.py b/pype/plugins/global/publish/integrate_thumbnail.py index 9a7418eebe..08157187df 100644 --- a/pype/plugins/global/publish/integrate_thumbnail.py +++ b/pype/plugins/global/publish/integrate_thumbnail.py @@ -8,7 +8,7 @@ import six import pyblish.api from bson.objectid import ObjectId -from avalon import api, dbio +from avalon import api, io class IntegrateThumbnails(pyblish.api.InstancePlugin): @@ -19,7 +19,7 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): families = ["review"] def process(self, instance): - repre_ids = instance.get("published_representation_ids") + repre_ids = instance.data.get("published_representation_ids") if not repre_ids: self.log.debug( "There are not published representation ids on the instance." @@ -30,27 +30,24 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): anatomy = instance.context.data["anatomy"] if "publish" not in anatomy.templates: - self.log.error("Anatomy does not have set publish key!") - return + raise AssertionError("Anatomy does not have set publish key!") if "thumbnail" not in anatomy.templates["publish"]: - self.log.warning(( - "There is not set \"thumbnail\" template for project {}" + raise AssertionError(( + "There is not set \"thumbnail\" template for project \"{}\"" ).format(project_name)) - return thumbnail_template = anatomy.templates["publish"]["thumbnail"] - dbio.install() - repres = dbio.find({ + io.install() + repres = io.find({ "_id": {"$in": repre_ids}, "type": "representation" }) if not repres: - self.log.debug(( + raise AssertionError(( "There are not representations in database with ids {}" ).format(str(repre_ids))) - return thumb_repre = None for repre in repres: @@ -64,12 +61,13 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): ) return - version = dbio.find_one({"_id": thumb_repre["parent"]}) + version = io.find_one({"_id": thumb_repre["parent"]}) if not version: - self.log.warning("There does not exist version with id {}".format( - str(thumb_repre["parent"]) - )) - return + raise AssertionError( + "There does not exist version with id {}".format( + str(thumb_repre["parent"]) + ) + ) # Get full path to thumbnail file from representation src_full_path = os.path.normpath(thumb_repre["data"]["path"]) @@ -79,25 +77,27 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): )) return + filename, file_extension = os.path.splitext(src_full_path) # Create id for mongo entity now to fill anatomy template thumbnail_id = ObjectId() # Prepare anatomy template fill data template_data = copy.deepcopy(thumb_repre["context"]) - template_data["_id"] = str(thumbnail_id) - template_data["thumbnail_root"] = os.environ.get( - "AVALON_THUMBNAIL_ROOT" - ) + template_data.update({ + "_id": str(thumbnail_id), + "thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"), + "ext": file_extension, + "thumbnail_type": "thumbnail" + }) anatomy_filled = anatomy.format(template_data) final_path = anatomy_filled.get("publish", {}).get("thumbnail") if not final_path: - self.log.warning(( + raise AssertionError(( "Anatomy template was not filled with entered data" "\nTemplate: {} " "\nData: {}" ).format(thumbnail_template, str(template_data))) - return dst_full_path = os.path.normpath(final_path) self.log.debug( @@ -127,12 +127,12 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): } } # Create thumbnail entity - dbio.insert_one(thumbnail_entity) + io.insert_one(thumbnail_entity) self.log.debug( "Creating entity in database {}".format(str(thumbnail_entity)) ) # Set thumbnail id for version - dbio.update_one( + io.update_many( {"_id": version["_id"]}, {"$set": {"data.thumbnail_id": thumbnail_id}} ) From 8b33b22d3081d7bd876d1930509f759d9e460caa Mon Sep 17 00:00:00 2001 From: Jakub Trllo Date: Sat, 7 Dec 2019 13:03:44 +0100 Subject: [PATCH 012/434] create _id in representation before insert to DB to not require query them after --- pype/plugins/global/publish/integrate_new.py | 5 +++-- pype/plugins/global/publish/integrate_thumbnail.py | 14 +++----------- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index cc71fce49e..3422c95d73 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -385,6 +385,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("__ dst: {}".format(dst)) representation = { + "_id": io.ObjectId(), "schema": "pype:representation-2.0", "type": "representation", "parent": version_id, @@ -423,8 +424,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): for rep in instance.data["representations"]: self.log.debug("__ represNAME: {}".format(rep['name'])) self.log.debug("__ represPATH: {}".format(rep['published_path'])) - result = io.insert_many(representations) - instance.data["published_representation_ids"] = result.inserted_ids + io.insert_many(representations) + instance.data["published_representations"] = representations # self.log.debug("Representation: {}".format(representations)) self.log.info("Registered {} items".format(len(representations))) diff --git a/pype/plugins/global/publish/integrate_thumbnail.py b/pype/plugins/global/publish/integrate_thumbnail.py index 08157187df..bf6c62155f 100644 --- a/pype/plugins/global/publish/integrate_thumbnail.py +++ b/pype/plugins/global/publish/integrate_thumbnail.py @@ -19,8 +19,8 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): families = ["review"] def process(self, instance): - repre_ids = instance.data.get("published_representation_ids") - if not repre_ids: + published_repres = instance.data.get("published_representations") + if not published_repres: self.log.debug( "There are not published representation ids on the instance." ) @@ -40,17 +40,9 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): thumbnail_template = anatomy.templates["publish"]["thumbnail"] io.install() - repres = io.find({ - "_id": {"$in": repre_ids}, - "type": "representation" - }) - if not repres: - raise AssertionError(( - "There are not representations in database with ids {}" - ).format(str(repre_ids))) thumb_repre = None - for repre in repres: + for repre in published_repres: if repre["name"].lower() == "thumbnail": thumb_repre = repre break From 5b916b7d723ed0607cfb6fbc90e92e824cea83b8 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Tue, 17 Dec 2019 13:28:04 +0100 Subject: [PATCH 013/434] (maya) work on new render collector --- pype/plugins/maya/publish/collect_render.py | 102 +++++++++++++++++++- 1 file changed, 98 insertions(+), 4 deletions(-) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index a9021831b6..fee6581dd8 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -1,4 +1,9 @@ +import re + from maya import cmds +from maya import OpenMaya as om + +import maya.app.renderSetup.model.renderSetup as renderSetup import pyblish.api @@ -15,10 +20,99 @@ class CollectMayaRender(pyblish.api.InstancePlugin): families = ["render"] def process(self, instance): - layers = instance.data['setMembers'] - self.log.debug('layers: {}'.format(layers)) + collected_render_layers = instance.data['setMembers'] + + self._rs = renderSetup.instance() + maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()} + + self.maya_layers = maya_render_layers + + for layer in collected_render_layers: + # every layer in set should start with `LAYER_` prefix + try: + expected_layer_name = re.search(r"^LAYER_(.*)", layer).group(1) + except IndexError: + msg = ("Invalid layer name in set [ {} ]".format(layer)) + self.log.warnig(msg) + continue + + # check if layer is part of renderSetup + if expected_layer_name not in maya_render_layers: + msg = ("Render layer [ {} ] is not in " + "Render Setup".format(expected_layer_name)) + self.log.warning(msg) + continue + + # check if layer is renderable + if not maya_render_layers[expected_layer_name].isRenderable(): + msg = ("Render layer [ {} ] is not " + "renderable".format(expected_layer_name)) + self.log.warning(msg) + continue - for layer in layers: # test if there are sets (subsets) to attach render to - sets = cmds.ls(layer, long=True, dag=True, sets=True) + sets = cmds.ls(expected_layer_name, long=True, dag=True, sets=True) self.log.debug(sets) + + # Get layer specific settings, might be overrides + data = { + "subset": expected_layer_name, + "setMembers": layer, + "publish": True, + "frameStart": self.get_render_attribute("startFrame", + layer=layer), + "frameEnd": self.get_render_attribute("endFrame", + layer=layer), + "byFrameStep": self.get_render_attribute("byFrameStep", + layer=layer), + "renderer": self.get_render_attribute("currentRenderer", + layer=layer), + + # instance subset + "family": "Render Layers", + "families": ["renderlayer"], + "asset": asset, + "time": api.time(), + "author": context.data["user"], + + # Add source to allow tracing back to the scene from + # which was submitted originally + "source": filepath + } + + # Apply each user defined attribute as data + for attr in cmds.listAttr(layer, userDefined=True) or list(): + try: + value = cmds.getAttr("{}.{}".format(layer, attr)) + except Exception: + # Some attributes cannot be read directly, + # such as mesh and color attributes. These + # are considered non-essential to this + # particular publishing pipeline. + value = None + + data[attr] = value + + # Include (optional) global settings + # TODO(marcus): Take into account layer overrides + # Get global overrides and translate to Deadline values + overrides = self.parse_options(render_globals) + data.update(**overrides) + + # Define nice label + label = "{0} ({1})".format(layername, data["asset"]) + label += " [{0}-{1}]".format(int(data["frameStart"]), + int(data["frameEnd"])) + + instance = context.create_instance(layername) + instance.data["label"] = label + instance.data.update(data) + pass + + def get_attributes(self, layer, attribute): + + pass + + def _get_overrides(self, layer): + rset = self.maya_layers[layer].renderSettingsCollectionInstance() + return rset.getOverrides() From f75f38b7573a41c71e28a525d7815fde9e556f29 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Fri, 20 Dec 2019 02:04:03 +0100 Subject: [PATCH 014/434] (maya) added render layer syncing, updated collectors --- pype/maya/__init__.py | 13 +- pype/maya/lib.py | 133 ++++++++++++++++++ pype/plugins/maya/create/create_render.py | 2 +- pype/plugins/maya/publish/collect_render.py | 130 +++++++++++++++-- .../maya/publish/collect_renderable_camera.py | 2 +- .../maya/publish/collect_renderlayers.py | 1 + 6 files changed, 264 insertions(+), 17 deletions(-) diff --git a/pype/maya/__init__.py b/pype/maya/__init__.py index b4dbc52bc8..fcc557e7bc 100644 --- a/pype/maya/__init__.py +++ b/pype/maya/__init__.py @@ -8,7 +8,6 @@ from avalon import api as avalon, pipeline, maya from avalon.maya.pipeline import IS_HEADLESS from avalon.tools import workfiles from pyblish import api as pyblish -from pypeapp import config from ..lib import ( any_outdated @@ -156,6 +155,12 @@ def on_open(_): from avalon.vendor.Qt import QtWidgets from ..widgets import popup + cmds.evalDeferred( + "from pype.maya import lib;lib.remove_render_layer_observer()") + cmds.evalDeferred( + "from pype.maya import lib;lib.add_render_layer_observer()") + cmds.evalDeferred( + "from pype.maya import lib;lib.add_render_layer_change_observer()") # # Update current task for the current scene # update_task_from_path(cmds.file(query=True, sceneName=True)) @@ -193,6 +198,12 @@ def on_new(_): """Set project resolution and fps when create a new file""" avalon.logger.info("Running callback on new..") with maya.suspended_refresh(): + cmds.evalDeferred( + "from pype.maya import lib;lib.remove_render_layer_observer()") + cmds.evalDeferred( + "from pype.maya import lib;lib.add_render_layer_observer()") + cmds.evalDeferred( + "from pype.maya import lib;lib.add_render_layer_change_observer()") lib.set_context_settings() diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 0890d3863e..74bae96abb 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2422,3 +2422,136 @@ class shelf(): cmds.deleteUI(each) else: cmds.shelfLayout(self.name, p="ShelfLayout") + + +def _get_render_instance(): + objectset = cmds.ls("*.id", long=True, type="objectSet", + recursive=True, objectsOnly=True) + + for objset in objectset: + + if not cmds.attributeQuery("id", node=objset, exists=True): + continue + + id_attr = "{}.id".format(objset) + if cmds.getAttr(id_attr) != "pyblish.avalon.instance": + continue + + has_family = cmds.attributeQuery("family", + node=objset, + exists=True) + if not has_family: + continue + + if cmds.getAttr("{}.family".format(objset)) == 'render': + return objset + + return None + + +renderItemObserverList = [] + + +class RenderSetupListObserver: + + def listItemAdded(self, item): + print("--- adding ...") + self._add_render_layer(item) + + def listItemRemoved(self, item): + print("--- removing ...") + self._remove_render_layer(item.name()) + + def _add_render_layer(self, item): + render_set = _get_render_instance() + layer_name = item.name() + + if not render_set: + return + + members = cmds.sets(render_set, query=True) + if not "LAYER_{}".format(layer_name) in members: + print(" - creating set for {}".format(layer_name)) + set = cmds.sets(n="LAYER_{}".format(layer_name)) + cmds.sets(set, forceElement=render_set) + rio = RenderSetupItemObserver(item) + print("- adding observer for {}".format(item.name())) + item.addItemObserver(rio.itemChanged) + renderItemObserverList.append(rio) + + def _remove_render_layer(self, layer_name): + render_set = _get_render_instance() + + if not render_set: + return + + members = cmds.sets(render_set, query=True) + if "LAYER_{}".format(layer_name) in members: + print(" - removing set for {}".format(layer_name)) + cmds.delete(n="LAYER_{}".format(layer_name)) + + +class RenderSetupItemObserver(): + + def __init__(self, item): + self.item = item + self.original_name = item.name() + + def itemChanged(self, *args, **kwargs): + if self.item.name() == self.original_name: + return + + render_set = _get_render_instance() + + if not render_set: + return + + members = cmds.sets(render_set, query=True) + if "LAYER_{}".format(self.original_name) in members: + print(" <> renaming {} to {}".format(self.original_name, + self.item.name())) + cmds.rename("LAYER_{}".format(self.original_name), + "LAYER_{}".format(self.item.name())) + self.original_name = self.item.name() + + +renderListObserver = RenderSetupListObserver() + + +def add_render_layer_change_observer(): + import maya.app.renderSetup.model.renderSetup as renderSetup + + rs = renderSetup.instance() + render_set = _get_render_instance() + if not render_set: + return + + members = cmds.sets(render_set, query=True) + layers = rs.getRenderLayers() + for layer in layers: + if "LAYER_{}".format(layer.name()) in members: + rio = RenderSetupItemObserver(layer) + print("- adding observer for {}".format(layer.name())) + layer.addItemObserver(rio.itemChanged) + renderItemObserverList.append(rio) + + +def add_render_layer_observer(): + import maya.app.renderSetup.model.renderSetup as renderSetup + + print("> adding renderSetup observer ...") + rs = renderSetup.instance() + rs.addListObserver(renderListObserver) + pass + + +def remove_render_layer_observer(): + import maya.app.renderSetup.model.renderSetup as renderSetup + + print("< removing renderSetup observer ...") + rs = renderSetup.instance() + try: + rs.removeListObserver(renderListObserver) + except ValueError: + # no observer set yet + pass diff --git a/pype/plugins/maya/create/create_render.py b/pype/plugins/maya/create/create_render.py index c54aaaa76c..f847b8add5 100644 --- a/pype/plugins/maya/create/create_render.py +++ b/pype/plugins/maya/create/create_render.py @@ -119,7 +119,7 @@ class CreateRender(avalon.maya.Creator): self.data["suspendPublishJob"] = False self.data["extendFrames"] = False self.data["overrideExistingFrame"] = True - self.data["useLegacyRenderLayers"] = True + # self.data["useLegacyRenderLayers"] = True self.data["priority"] = 50 self.data["framesPerTask"] = 1 self.data["whitelist"] = False diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index fee6581dd8..5436fbd7e4 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -2,6 +2,7 @@ import re from maya import cmds from maya import OpenMaya as om +from pprint import pprint import maya.app.renderSetup.model.renderSetup as renderSetup @@ -11,7 +12,7 @@ from avalon import maya, api import pype.maya.lib as lib -class CollectMayaRender(pyblish.api.InstancePlugin): +class CollectMayaRender(pyblish.api.ContextPlugin): """Gather all publishable render layers from renderSetup""" order = pyblish.api.CollectorOrder + 0.01 @@ -19,8 +20,21 @@ class CollectMayaRender(pyblish.api.InstancePlugin): label = "Collect Render Layers" families = ["render"] - def process(self, instance): - collected_render_layers = instance.data['setMembers'] + def process(self, context): + render_instance = None + for instance in context: + if 'render' in instance.data['families']: + render_instance = instance + + if not render_instance: + self.log.info("No render instance found, skipping render " + "layer collection.") + return + + render_globals = render_instance + collected_render_layers = render_instance.data['setMembers'] + filepath = context.data["currentFile"].replace("\\", "/") + asset = api.Session["AVALON_ASSET"] self._rs = renderSetup.instance() maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()} @@ -36,6 +50,7 @@ class CollectMayaRender(pyblish.api.InstancePlugin): self.log.warnig(msg) continue + self.log.info("processing %s" % layer) # check if layer is part of renderSetup if expected_layer_name not in maya_render_layers: msg = ("Render layer [ {} ] is not in " @@ -52,21 +67,23 @@ class CollectMayaRender(pyblish.api.InstancePlugin): # test if there are sets (subsets) to attach render to sets = cmds.ls(expected_layer_name, long=True, dag=True, sets=True) - self.log.debug(sets) + self.log.debug("marked subsets: {}".format(sets)) + layer_name = "rs_{}".format(expected_layer_name) + self.log.info(" - %s" % layer_name) # Get layer specific settings, might be overrides data = { "subset": expected_layer_name, - "setMembers": layer, + "setMembers": expected_layer_name, "publish": True, "frameStart": self.get_render_attribute("startFrame", - layer=layer), + layer=layer_name), "frameEnd": self.get_render_attribute("endFrame", - layer=layer), + layer=layer_name), "byFrameStep": self.get_render_attribute("byFrameStep", - layer=layer), + layer=layer_name), "renderer": self.get_render_attribute("currentRenderer", - layer=layer), + layer=layer_name), # instance subset "family": "Render Layers", @@ -96,23 +113,108 @@ class CollectMayaRender(pyblish.api.InstancePlugin): # Include (optional) global settings # TODO(marcus): Take into account layer overrides # Get global overrides and translate to Deadline values - overrides = self.parse_options(render_globals) + overrides = self.parse_options(str(render_globals)) data.update(**overrides) # Define nice label - label = "{0} ({1})".format(layername, data["asset"]) + label = "{0} ({1})".format(expected_layer_name, data["asset"]) label += " [{0}-{1}]".format(int(data["frameStart"]), int(data["frameEnd"])) - instance = context.create_instance(layername) + instance = context.create_instance(expected_layer_name) instance.data["label"] = label instance.data.update(data) pass - def get_attributes(self, layer, attribute): + def parse_options(self, render_globals): + """Get all overrides with a value, skip those without - pass + Here's the kicker. These globals override defaults in the submission + integrator, but an empty value means no overriding is made. + Otherwise, Frames would override the default frames set under globals. + + Args: + render_globals (str): collection of render globals + + Returns: + dict: only overrides with values + """ + + attributes = maya.read(render_globals) + + self.log.info(attributes) + + options = {"renderGlobals": {}} + options["renderGlobals"]["Priority"] = attributes["priority"] + + # Check for specific pools + pool_a, pool_b = self._discover_pools(attributes) + options["renderGlobals"].update({"Pool": pool_a}) + if pool_b: + options["renderGlobals"].update({"SecondaryPool": pool_b}) + + legacy = attributes["useLegacyRenderLayers"] + options["renderGlobals"]["UseLegacyRenderLayers"] = legacy + + # Machine list + machine_list = attributes["machineList"] + if machine_list: + key = "Whitelist" if attributes["whitelist"] else "Blacklist" + options['renderGlobals'][key] = machine_list + + # Suspend publish job + state = "Suspended" if attributes["suspendPublishJob"] else "Active" + options["publishJobState"] = state + + chunksize = attributes.get("framesPerTask", 1) + options["renderGlobals"]["ChunkSize"] = chunksize + + # Override frames should be False if extendFrames is False. This is + # to ensure it doesn't go off doing crazy unpredictable things + override_frames = False + extend_frames = attributes.get("extendFrames", False) + if extend_frames: + override_frames = attributes.get("overrideExistingFrame", False) + + options["extendFrames"] = extend_frames + options["overrideExistingFrame"] = override_frames + + maya_render_plugin = "MayaBatch" + if not attributes.get("useMayaBatch", True): + maya_render_plugin = "MayaCmd" + + options["mayaRenderPlugin"] = maya_render_plugin + + return options + + def _discover_pools(self, attributes): + + pool_a = None + pool_b = None + + # Check for specific pools + pool_b = [] + if "primaryPool" in attributes: + pool_a = attributes["primaryPool"] + if "secondaryPool" in attributes: + pool_b = attributes["secondaryPool"] + + else: + # Backwards compatibility + pool_str = attributes.get("pools", None) + if pool_str: + pool_a, pool_b = pool_str.split(";") + + # Ensure empty entry token is caught + if pool_b == "-": + pool_b = None + + return pool_a, pool_b def _get_overrides(self, layer): rset = self.maya_layers[layer].renderSettingsCollectionInstance() return rset.getOverrides() + + def get_render_attribute(self, attr, layer): + return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr), + layer=layer) diff --git a/pype/plugins/maya/publish/collect_renderable_camera.py b/pype/plugins/maya/publish/collect_renderable_camera.py index 6b1732c3cb..707d52ef69 100644 --- a/pype/plugins/maya/publish/collect_renderable_camera.py +++ b/pype/plugins/maya/publish/collect_renderable_camera.py @@ -16,7 +16,7 @@ class CollectRenderableCamera(pyblish.api.InstancePlugin): "renderlayer"] def process(self, instance): - layer = instance.data["setMembers"] + layer = "rs_%s" % instance.data["setMembers"] cameras = cmds.ls(type="camera", long=True) renderable = [c for c in cameras if diff --git a/pype/plugins/maya/publish/collect_renderlayers.py b/pype/plugins/maya/publish/collect_renderlayers.py index 73a4d237ab..0012b28ac9 100644 --- a/pype/plugins/maya/publish/collect_renderlayers.py +++ b/pype/plugins/maya/publish/collect_renderlayers.py @@ -12,6 +12,7 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder + 0.01 hosts = ["maya"] label = "Render Layers" + active = False def process(self, context): From e951cd36b00531935bac5ee5dfb069b10bbcc7dd Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Sat, 21 Dec 2019 01:08:01 +0100 Subject: [PATCH 015/434] (maya) work on attaching renders to subsets --- pype/maya/lib.py | 2 +- .../global/publish/collect_filesequences.py | 6 ++ pype/plugins/global/publish/integrate_new.py | 2 + .../global/publish/submit_publish_job.py | 13 +++++ pype/plugins/global/publish/update_version.py | 57 +++++++++++++++++++ pype/plugins/maya/publish/collect_render.py | 7 ++- 6 files changed, 85 insertions(+), 2 deletions(-) create mode 100644 pype/plugins/global/publish/update_version.py diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 74bae96abb..28c57bbcdb 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2488,7 +2488,7 @@ class RenderSetupListObserver: members = cmds.sets(render_set, query=True) if "LAYER_{}".format(layer_name) in members: print(" - removing set for {}".format(layer_name)) - cmds.delete(n="LAYER_{}".format(layer_name)) + cmds.delete("LAYER_{}".format(layer_name)) class RenderSetupItemObserver(): diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index d0ff5722a3..80dddcd6e6 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -191,6 +191,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): families.append("review") if "write" in instance_family: families.append("write") + if data.get("attachTo"): + families.append("attach-render") for collection in collections: instance = context.create_instance(str(collection)) @@ -232,6 +234,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): }) if lut_path: instance.data.update({"lutPath": lut_path}) + + if data.get("attachTo"): + instance.data.update({"attachTo": data.get("attachTo")}) + instance.append(collection) instance.context.data['fps'] = fps diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index faade613f2..360b97e4be 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -83,6 +83,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if [ef for ef in self.exclude_families if instance.data["family"] in ef]: return + if "attach-to" in instance.data["families"]: + return self.register(instance) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 2a254b015c..abce4672e7 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -282,6 +282,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): relative_path = os.path.relpath(source, api.registered_root()) source = os.path.join("{root}", relative_path).replace("\\", "/") + # find subsets and version to attach render to + attach_to = instance.data.get("attachTo") + attach_subset_versions = [] + if attach_to: + for subset in attach_to: + for instance in context: + if instance.data["subset"] != subset: + continue + attach_subset_versions.append( + {"version": instance.data["version"], + "subset": subset}) + # Write metadata for publish job metadata = { "asset": asset, @@ -293,6 +305,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "source": source, "user": context.data["user"], "version": context.data["version"], + "attachTo": attach_subset_versions, # Optional metadata (for debugging) "metadata": { "instance": data, diff --git a/pype/plugins/global/publish/update_version.py b/pype/plugins/global/publish/update_version.py new file mode 100644 index 0000000000..ebf89b84e1 --- /dev/null +++ b/pype/plugins/global/publish/update_version.py @@ -0,0 +1,57 @@ +import pyblish.api +import logging +from avalon import api, io + +log = logging.getLogger(__name__) + + +class UpdateVersion(pyblish.api.InstancePlugin): + """Update existing subset version with new data""" + + label = "Update Subset Version" + order = pyblish.api.IntegratorOrder + families = ["attach-render"] + + def process(self, instance): + # Required environment variables + PROJECT = api.Session["AVALON_PROJECT"] + ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"] + TASK = instance.data.get("task") or api.Session["AVALON_TASK"] + LOCATION = api.Session["AVALON_LOCATION"] + + context = instance.context + + stagingdir = instance.data.get("stagingDir") + if not stagingdir: + self.log.info('''{} is missing reference to staging + directory Will try to get it from + representation'''.format(instance)) + + # extra check if stagingDir actually exists and is available + + self.log.debug("Establishing staging directory @ %s" % stagingdir) + + # Ensure at least one file is set up for transfer in staging dir. + repres = instance.data.get("representations", None) + assert repres, "Instance has no files to transfer" + assert isinstance(repres, (list, tuple)), ( + "Instance 'files' must be a list, got: {0}".format(repres) + ) + + # FIXME: io is not initialized at this point for shell host + io.install() + project = io.find_one({"type": "project"}) + + asset = io.find_one({"type": "asset", + "name": ASSET, + "parent": project["_id"]}) + + assert instance.data.get("attachTo"), "no subset to attach to" + for subset_to_attach in instance.data.get("attachTo"): + + subset = io.find_one({"type": "subset", + "parent": asset["_id"], + "name": subset_to_attach["subset"]}) + + assert all([project, asset]), ("Could not find current project or " + "asset '%s'" % ASSET) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 5436fbd7e4..ef760def5b 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -66,7 +66,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin): continue # test if there are sets (subsets) to attach render to - sets = cmds.ls(expected_layer_name, long=True, dag=True, sets=True) + sets = cmds.sets(layer, query=True) or [] + if sets: + for s in sets: + self.log.info(" - attach render to: {}".format(s)) + self.log.debug("marked subsets: {}".format(sets)) layer_name = "rs_{}".format(expected_layer_name) @@ -74,6 +78,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): # Get layer specific settings, might be overrides data = { "subset": expected_layer_name, + "attachTo": sets, "setMembers": expected_layer_name, "publish": True, "frameStart": self.get_render_attribute("startFrame", From 66b8a18a664d90302abefe8b2d8450e0a9fbc5fd Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Sat, 21 Dec 2019 01:10:00 +0100 Subject: [PATCH 016/434] (maya) adding changes to update previous commit --- pype/plugins/global/publish/update_version.py | 112 +++++++++++++++++- pype/plugins/maya/publish/collect_render.py | 8 +- 2 files changed, 111 insertions(+), 9 deletions(-) diff --git a/pype/plugins/global/publish/update_version.py b/pype/plugins/global/publish/update_version.py index ebf89b84e1..771bc04bba 100644 --- a/pype/plugins/global/publish/update_version.py +++ b/pype/plugins/global/publish/update_version.py @@ -1,5 +1,7 @@ -import pyblish.api +import os import logging + +import pyblish.api from avalon import api, io log = logging.getLogger(__name__) @@ -53,5 +55,109 @@ class UpdateVersion(pyblish.api.InstancePlugin): "parent": asset["_id"], "name": subset_to_attach["subset"]}) - assert all([project, asset]), ("Could not find current project or " - "asset '%s'" % ASSET) + assert all([project, asset]), ("Could not find current project or " + "asset '%s'" % ASSET) + + attach_version = subset_to_attach["version"] + + version_data = self.create_version_data(context, instance) + + version_data_instance = instance.data.get('versionData') + + if version_data_instance: + version_data.update(version_data_instance) + + version = self.create_version(subset=subset, + version_number=attach_version, + locations=[LOCATION], + data=version_data) + + self.log.debug("Creating version ...") + existing_version = io.find_one({ + 'type': 'version', + 'parent': subset["_id"], + 'name': attach_version + }) + if existing_version is None: + version_id = io.insert_one(version).inserted_id + else: + io.update_many({ + 'type': 'version', + 'parent': subset["_id"], + 'name': attach_version + }, {'$set': version} + ) + version_id = existing_version['_id'] + instance.data['version'] = version['name'] + + def create_version(self, subset, version_number, locations, data=None): + """ Copy given source to destination + + Args: + subset (dict): the registered subset of the asset + version_number (int): the version number + locations (list): the currently registered locations + + Returns: + dict: collection of data to create a version + """ + # Imprint currently registered location + version_locations = [location for location in locations if + location is not None] + + return {"schema": "pype:version-3.0", + "type": "version", + "parent": subset["_id"], + "name": version_number, + "locations": version_locations, + "data": data} + + def create_version_data(self, context, instance): + """Create the data collection for the version + + Args: + context: the current context + instance: the current instance being published + + Returns: + dict: the required information with instance.data as key + """ + + families = [] + current_families = instance.data.get("families", list()) + instance_family = instance.data.get("family", None) + + if instance_family is not None: + families.append(instance_family) + families += current_families + + self.log.debug("Registered root: {}".format(api.registered_root())) + # create relative source path for DB + try: + source = instance.data['source'] + except KeyError: + source = context.data["currentFile"] + source = source.replace(os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), + api.registered_root()) + relative_path = os.path.relpath(source, api.registered_root()) + source = os.path.join("{root}", relative_path).replace("\\", "/") + + self.log.debug("Source: {}".format(source)) + version_data = {"families": families, + "time": context.data["time"], + "author": context.data["user"], + "source": source, + "comment": context.data.get("comment"), + "machine": context.data.get("machine"), + "fps": context.data.get("fps")} + + # Include optional data if present in + optionals = [ + "frameStart", "frameEnd", "step", "handles", + "handleEnd", "handleStart", "sourceHashes" + ] + for key in optionals: + if key in instance.data: + version_data[key] = instance.data[key] + + return version_data diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index ef760def5b..aea17b6e87 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -69,12 +69,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin): sets = cmds.sets(layer, query=True) or [] if sets: for s in sets: - self.log.info(" - attach render to: {}".format(s)) - - self.log.debug("marked subsets: {}".format(sets)) + self.log.info(" -> attach render to: {}".format(s)) layer_name = "rs_{}".format(expected_layer_name) - self.log.info(" - %s" % layer_name) + # Get layer specific settings, might be overrides data = { "subset": expected_layer_name, @@ -147,8 +145,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): attributes = maya.read(render_globals) - self.log.info(attributes) - options = {"renderGlobals": {}} options["renderGlobals"]["Priority"] = attributes["priority"] From 4909dcf43d113fe1c04d610129dfa51012ab7fee Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Wed, 8 Jan 2020 18:26:58 +0100 Subject: [PATCH 017/434] modified filesequence collector and attaching subsets --- .../global/publish/collect_filesequences.py | 251 +++++++++++++----- .../global/publish/submit_publish_job.py | 5 +- pype/plugins/maya/publish/collect_render.py | 8 +- 3 files changed, 188 insertions(+), 76 deletions(-) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 80dddcd6e6..e7fe085027 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -54,10 +54,6 @@ def collect(root, patterns=[pattern], minimum_items=1) - # Ignore any remainders - if remainder: - print("Skipping remainder {}".format(remainder)) - # Exclude any frames outside start and end frame. for collection in collections: for index in list(collection.indexes): @@ -71,7 +67,7 @@ def collect(root, # Keep only collections that have at least a single frame collections = [c for c in collections if c.indexes] - return collections + return collections, remainder class CollectRenderedFrames(pyblish.api.ContextPlugin): @@ -119,8 +115,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): try: data = json.load(f) except Exception as exc: - self.log.error("Error loading json: " - "{} - Exception: {}".format(path, exc)) + self.log.error( + "Error loading json: " + "{} - Exception: {}".format(path, exc) + ) raise cwd = os.path.dirname(path) @@ -152,7 +150,6 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): pixel_aspect = instance.get("pixelAspect", 1) lut_path = instance.get("lutPath", None) - else: # Search in directory data = dict() @@ -163,14 +160,17 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): if regex: self.log.info("Using regex: {}".format(regex)) - collections = collect(root=root, - regex=regex, - exclude_regex=data.get("exclude_regex"), - frame_start=data.get("frameStart"), - frame_end=data.get("frameEnd")) + collections, remainder = collect( + root=root, + regex=regex, + exclude_regex=data.get("exclude_regex"), + frame_start=data.get("frameStart"), + frame_end=data.get("frameEnd"), + ) self.log.info("Found collections: {}".format(collections)) + """ if data.get("subset"): # If subset is provided for this json then it must be a single # collection. @@ -178,85 +178,190 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): self.log.error("Forced subset can only work with a single " "found sequence") raise RuntimeError("Invalid sequence") + """ fps = data.get("fps", 25) + if data.get("user"): + context.data["user"] = data["user"] + # Get family from the data families = data.get("families", ["render"]) if "render" not in families: families.append("render") if "ftrack" not in families: families.append("ftrack") - if "review" not in families: - families.append("review") if "write" in instance_family: families.append("write") + if data.get("attachTo"): - families.append("attach-render") + # we need to attach found collections to existing + # subset version as review represenation. - for collection in collections: - instance = context.create_instance(str(collection)) - self.log.info("Collection: %s" % list(collection)) + for attach in data.get("attachTo"): + self.log.info( + "Attaching render {}:v{}".format( + attach["subset"], attach["version"])) + instance = context.create_instance( + attach["subset"]) + instance.data.update( + { + "name": attach["subset"], + "version": attach["version"], + "family": 'review', + "families": ['review', 'ftrack'], + "asset": data.get( + "asset", api.Session["AVALON_ASSET"]), + "stagingDir": root, + "frameStart": data.get("frameStart"), + "frameEnd": data.get("frameEnd"), + "fps": fps, + "source": data.get("source", ""), + "pixelAspect": pixel_aspect + }) - # Ensure each instance gets a unique reference to the data + if "representations" not in instance.data: + instance.data["representations"] = [] + + for collection in collections: + self.log.info( + " - adding representation: {}".format( + str(collection)) + ) + ext = collection.tail.lstrip(".") + + representation = { + "name": ext, + "ext": "{}".format(ext), + "files": list(collection), + "stagingDir": root, + "anatomy_template": "render", + "fps": fps, + "tags": ["review"], + } + instance.data["representations"].append( + representation) + + elif data.get("subset"): + # if we have subset - add all collections and known + # reminder as representations + + self.log.info( + "Adding representations to subset {}".format( + data.get("subset"))) + + instance = context.create_instance(data.get("subset")) data = copy.deepcopy(data) - # If no subset provided, get it from collection's head - subset = data.get("subset", collection.head.rstrip("_. ")) - - # If no start or end frame provided, get it from collection - indices = list(collection.indexes) - start = data.get("frameStart", indices[0]) - end = data.get("frameEnd", indices[-1]) - - self.log.debug("Collected pixel_aspect:\n" - "{}".format(pixel_aspect)) - self.log.debug("type pixel_aspect:\n" - "{}".format(type(pixel_aspect))) - - # root = os.path.normpath(root) - # self.log.info("Source: {}}".format(data.get("source", ""))) - - ext = list(collection)[0].split('.')[-1] - - instance.data.update({ - "name": str(collection), - "family": families[0], # backwards compatibility / pyblish - "families": list(families), - "subset": subset, - "asset": data.get("asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, - "frameStart": start, - "frameEnd": end, - "fps": fps, - "source": data.get('source', ''), - "pixelAspect": pixel_aspect, - }) - if lut_path: - instance.data.update({"lutPath": lut_path}) - - if data.get("attachTo"): - instance.data.update({"attachTo": data.get("attachTo")}) - - instance.append(collection) - instance.context.data['fps'] = fps + instance.data.update( + { + "name": data.get("subset"), + "family": families[0], + "families": list(families), + "subset": data.get("subset"), + "asset": data.get( + "asset", api.Session["AVALON_ASSET"]), + "stagingDir": root, + "frameStart": data.get("frameStart"), + "frameEnd": data.get("frameEnd"), + "fps": fps, + "source": data.get("source", ""), + "pixelAspect": pixel_aspect, + } + ) if "representations" not in instance.data: instance.data["representations"] = [] - representation = { - 'name': ext, - 'ext': '{}'.format(ext), - 'files': list(collection), - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ['review'] - } - instance.data["representations"].append(representation) + for collection in collections: + self.log.info(" - {}".format(str(collection))) - if data.get('user'): - context.data["user"] = data['user'] + ext = collection.tail.lstrip(".") - self.log.debug("Collected instance:\n" - "{}".format(pformat(instance.data))) + representation = { + "name": ext, + "ext": "{}".format(ext), + "files": list(collection), + "stagingDir": root, + "anatomy_template": "render", + "fps": fps, + "tags": ["review"], + } + instance.data["representations"].append( + representation) + + # process reminders + for rem in remainder: + # add only known types to representation + if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']: + self.log.info(" . {}".format(rem)) + representation = { + "name": rem.split(".")[-1], + "ext": "{}".format(rem.split(".")[-1]), + "files": rem, + "stagingDir": root, + "anatomy_template": "render", + "fps": fps, + "tags": ["review"], + } + instance.data["representations"].append( + representation) + + else: + # we have no subset so we take every collection and create one + # from it + for collection in collections: + instance = context.create_instance(str(collection)) + self.log.info("Creating subset from: %s" % str(collection)) + + # Ensure each instance gets a unique reference to the data + data = copy.deepcopy(data) + + # If no subset provided, get it from collection's head + subset = data.get("subset", collection.head.rstrip("_. ")) + + # If no start or end frame provided, get it from collection + indices = list(collection.indexes) + start = data.get("frameStart", indices[0]) + end = data.get("frameEnd", indices[-1]) + + ext = list(collection)[0].split(".")[-1] + + if "review" not in families: + families.append("review") + + instance.data.update( + { + "name": str(collection), + "family": families[0], # backwards compatibility + "families": list(families), + "subset": subset, + "asset": data.get( + "asset", api.Session["AVALON_ASSET"]), + "stagingDir": root, + "frameStart": start, + "frameEnd": end, + "fps": fps, + "source": data.get("source", ""), + "pixelAspect": pixel_aspect, + } + ) + if lut_path: + instance.data.update({"lutPath": lut_path}) + + instance.append(collection) + instance.context.data["fps"] = fps + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": ext, + "ext": "{}".format(ext), + "files": list(collection), + "stagingDir": root, + "anatomy_template": "render", + "fps": fps, + "tags": ["review"], + } + instance.data["representations"].append(representation) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index abce4672e7..3495dc6cd5 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -288,11 +288,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if attach_to: for subset in attach_to: for instance in context: - if instance.data["subset"] != subset: + if instance.data["subset"] != subset["subset"]: continue attach_subset_versions.append( {"version": instance.data["version"], - "subset": subset}) + "subset": subset["subset"], + "family": subset["family"]}) # Write metadata for publish job metadata = { diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index aea17b6e87..fe2ba31c2b 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -67,8 +67,14 @@ class CollectMayaRender(pyblish.api.ContextPlugin): # test if there are sets (subsets) to attach render to sets = cmds.sets(layer, query=True) or [] + attachTo = [] if sets: for s in sets: + attachTo.append({ + "version": None, # we need integrator to get version + "subset": s, + "family": cmds.getAttr("{}.family".format(s)) + }) self.log.info(" -> attach render to: {}".format(s)) layer_name = "rs_{}".format(expected_layer_name) @@ -76,7 +82,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): # Get layer specific settings, might be overrides data = { "subset": expected_layer_name, - "attachTo": sets, + "attachTo": attachTo, "setMembers": expected_layer_name, "publish": True, "frameStart": self.get_render_attribute("startFrame", From 1a6462d35872a81dfa15c83d36e5fd28bfe618cc Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Tue, 14 Jan 2020 16:16:19 +0100 Subject: [PATCH 018/434] submitting jobs with limited env, pype location determination --- .../global/publish/submit_publish_job.py | 6 +- .../maya/publish/submit_maya_deadline.py | 75 ++----------------- pype/scripts/publish_filesequence.py | 29 ++++++- 3 files changed, 37 insertions(+), 73 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 9c72ece73c..8d189cc7b3 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -149,7 +149,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "FTRACK_API_USER", "FTRACK_API_KEY", "FTRACK_SERVER", - "PYPE_ROOT" + "PYPE_ROOT", + "PYPE_METADATA_FILE" ] def _submit_deadline_post_job(self, instance, job): @@ -192,7 +193,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "PluginInfo": { "Version": "3.6", "ScriptFile": _get_script(), - "Arguments": '--paths "{}"'.format(metadata_path), + "Arguments": "", "SingleFrameOnly": "True" }, @@ -204,6 +205,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # job so they use the same environment environment = job["Props"].get("Env", {}) + environment["PYPE_METADATA_FILE"] = metadata_path i = 0 for index, key in enumerate(environment): self.log.info("KEY: {}".format(key)) diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index 55c04e9c41..e3fa79b1c8 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -228,80 +228,19 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "AuxFiles": [] } - # Include critical environment variables with submission + # We need those to pass them to pype for it to set correct context keys = [ - # This will trigger `userSetup.py` on the slave - # such that proper initialisation happens the same - # way as it does on a local machine. - # TODO(marcus): This won't work if the slaves don't - # have accesss to these paths, such as if slaves are - # running Linux and the submitter is on Windows. - "PYTHONPATH", - "PATH", - - "MTOA_EXTENSIONS_PATH", - "MTOA_EXTENSIONS", - "DYLD_LIBRARY_PATH", - "MAYA_RENDER_DESC_PATH", - "MAYA_MODULE_PATH", - "ARNOLD_PLUGIN_PATH", - "AVALON_SCHEMA", "FTRACK_API_KEY", "FTRACK_API_USER", "FTRACK_SERVER", - "PYBLISHPLUGINPATH", - - # todo: This is a temporary fix for yeti variables - "PEREGRINEL_LICENSE", - "SOLIDANGLE_LICENSE", - "ARNOLD_LICENSE" - "MAYA_MODULE_PATH", - "TOOL_ENV" + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_TASK", + "PYPE_USERNAME" ] + environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **api.Session) - # self.log.debug("enviro: {}".format(pprint(environment))) - for path in os.environ: - if path.lower().startswith('pype_'): - environment[path] = os.environ[path] - - environment["PATH"] = os.environ["PATH"] - # self.log.debug("enviro: {}".format(environment['PYPE_SCRIPTS'])) - clean_environment = {} - for key in environment: - clean_path = "" - self.log.debug("key: {}".format(key)) - self.log.debug("value: {}".format(environment[key])) - to_process = str(environment[key]) - if key == "PYPE_STUDIO_CORE_MOUNT": - clean_path = to_process - elif "://" in to_process: - clean_path = to_process - elif os.pathsep not in str(to_process): - try: - path = to_process - path.decode('UTF-8', 'strict') - clean_path = os.path.normpath(path) - except UnicodeDecodeError: - print('path contains non UTF characters') - else: - for path in to_process.split(os.pathsep): - try: - path.decode('UTF-8', 'strict') - clean_path += os.path.normpath(path) + os.pathsep - except UnicodeDecodeError: - print('path contains non UTF characters') - - if key == "PYTHONPATH": - clean_path = clean_path.replace('python2', 'python3') - clean_path = clean_path.replace( - os.path.normpath( - environment['PYPE_STUDIO_CORE_MOUNT']), # noqa - os.path.normpath( - environment['PYPE_STUDIO_CORE_PATH'])) # noqa - clean_environment[key] = clean_path - - environment = clean_environment payload["JobInfo"].update({ "EnvironmentKeyValue%d" % index: "{key}={value}".format( @@ -319,7 +258,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): self.preflight_check(instance) - self.log.info("Submitting..") + self.log.info("Submitting ...") self.log.info(json.dumps(payload, indent=4, sort_keys=True)) # E.g. http://192.168.0.1:8082/api/jobs diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py index 5517cfeb4c..167f7bea17 100644 --- a/pype/scripts/publish_filesequence.py +++ b/pype/scripts/publish_filesequence.py @@ -1,9 +1,12 @@ """This module is used for command line publishing of image sequences.""" import os +import sys +import argparse import logging import subprocess import platform + try: from shutil import which except ImportError: @@ -23,7 +26,7 @@ error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" def __main__(): - import argparse + parser = argparse.ArgumentParser() parser.add_argument("--paths", nargs="*", @@ -43,7 +46,27 @@ def __main__(): print("Running pype ...") auto_pype_root = os.path.dirname(os.path.abspath(__file__)) auto_pype_root = os.path.abspath(auto_pype_root + "../../../../..") - auto_pype_root = os.environ.get('PYPE_ROOT') or auto_pype_root + # we need to use `auto_pype_root` to be able to remap locations. + # This is egg and chicken case: we need to know our storage locations + # to be able to remap them on different platforms but if we got `PYPE_ROOT` + # variable, we cannot be sure it originated on different platform and is + # therefor invalid. + # So we use auto_pype_root to get to `PypeLauncher.path_remapper()`. This + # will load Storage environments and is able to remap environment to + # correct paths. + sys.path.append(auto_pype_root) + try: + from pypeapp import PypeLauncher + except ImportError: + print("!!! Error: cannot determine Pype location.") + print("--- we are looking at {}, but this is not Pype.".format( + auto_pype_root)) + + remapped_env = PypeLauncher.path_remapper() + auto_pype_root = remapped_env.get('PYPE_ROOT') or auto_pype_root + if remapped_env.get('PYPE_ROOT'): + print("Got Pype location from environment: {}".format( + remapped_env.get('PYPE_ROOT'))) pype_command = "pype.ps1" if platform.system().lower() == "linux": @@ -81,7 +104,7 @@ def __main__(): # Forcing forwaring the environment because environment inheritance does # not always work. # Cast all values in environment to str to be safe - env = {k: str(v) for k, v in os.environ.items()} + env = {k: str(v) for k, v in remapped_env.items()} exit_code = subprocess.call(args, env=env) if exit_code != 0: raise RuntimeError("Publishing failed.") From 408c220df80ad0a10951a2da9473f4fb5590926c Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Wed, 15 Jan 2020 15:30:00 +0100 Subject: [PATCH 019/434] remapping handling pushed to pype publish command --- pype/scripts/publish_filesequence.py | 27 +++++---------------------- 1 file changed, 5 insertions(+), 22 deletions(-) diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py index 167f7bea17..620ee3d851 100644 --- a/pype/scripts/publish_filesequence.py +++ b/pype/scripts/publish_filesequence.py @@ -26,7 +26,6 @@ error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" def __main__(): - parser = argparse.ArgumentParser() parser.add_argument("--paths", nargs="*", @@ -46,27 +45,11 @@ def __main__(): print("Running pype ...") auto_pype_root = os.path.dirname(os.path.abspath(__file__)) auto_pype_root = os.path.abspath(auto_pype_root + "../../../../..") - # we need to use `auto_pype_root` to be able to remap locations. - # This is egg and chicken case: we need to know our storage locations - # to be able to remap them on different platforms but if we got `PYPE_ROOT` - # variable, we cannot be sure it originated on different platform and is - # therefor invalid. - # So we use auto_pype_root to get to `PypeLauncher.path_remapper()`. This - # will load Storage environments and is able to remap environment to - # correct paths. - sys.path.append(auto_pype_root) - try: - from pypeapp import PypeLauncher - except ImportError: - print("!!! Error: cannot determine Pype location.") - print("--- we are looking at {}, but this is not Pype.".format( - auto_pype_root)) - remapped_env = PypeLauncher.path_remapper() - auto_pype_root = remapped_env.get('PYPE_ROOT') or auto_pype_root - if remapped_env.get('PYPE_ROOT'): + auto_pype_root = os.environ.get('PYPE_ROOT') or auto_pype_root + if os.environ.get('PYPE_ROOT'): print("Got Pype location from environment: {}".format( - remapped_env.get('PYPE_ROOT'))) + os.environ.get('PYPE_ROOT'))) pype_command = "pype.ps1" if platform.system().lower() == "linux": @@ -92,7 +75,7 @@ def __main__(): print("Set pype root to: {}".format(pype_root)) print("Paths: {}".format(kwargs.paths or [os.getcwd()])) - paths = kwargs.paths or [os.getcwd()] + paths = kwargs.paths or [os.environ.get("PYPE_METADATA_FILE")] or [os.getcwd()] # noqa args = [ os.path.join(pype_root, pype_command), @@ -104,7 +87,7 @@ def __main__(): # Forcing forwaring the environment because environment inheritance does # not always work. # Cast all values in environment to str to be safe - env = {k: str(v) for k, v in remapped_env.items()} + env = {k: str(v) for k, v in os.environ.items()} exit_code = subprocess.call(args, env=env) if exit_code != 0: raise RuntimeError("Publishing failed.") From 0052ed1d34f60cf4f46f1cf4afa3c12de45d0647 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 23 Jan 2020 08:55:30 +0100 Subject: [PATCH 020/434] fix(nuke): adding publish comment before rendering --- pype/plugins/global/publish/collect_filesequences.py | 4 ++++ pype/plugins/nuke/publish/extract_review_data_mov.py | 1 - pype/plugins/nuke/publish/extract_slate_frame.py | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index b7b41ea88b..121afa23ea 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -197,6 +197,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): fps = data.get("fps", 25) + # adding publish comment and intent to context + context.data["comment"] = data.get("comment", "") + context.data["intent"] = data.get("intent", "") + if data.get("user"): context.data["user"] = data["user"] diff --git a/pype/plugins/nuke/publish/extract_review_data_mov.py b/pype/plugins/nuke/publish/extract_review_data_mov.py index 39c338b62c..8b204680a7 100644 --- a/pype/plugins/nuke/publish/extract_review_data_mov.py +++ b/pype/plugins/nuke/publish/extract_review_data_mov.py @@ -3,7 +3,6 @@ import pyblish.api from avalon.nuke import lib as anlib from pype.nuke import lib as pnlib import pype -reload(pnlib) class ExtractReviewDataMov(pype.api.Extractor): diff --git a/pype/plugins/nuke/publish/extract_slate_frame.py b/pype/plugins/nuke/publish/extract_slate_frame.py index 7e43b3cd6f..4d43f38859 100644 --- a/pype/plugins/nuke/publish/extract_slate_frame.py +++ b/pype/plugins/nuke/publish/extract_slate_frame.py @@ -12,7 +12,7 @@ class ExtractSlateFrame(pype.api.Extractor): """ - order = pyblish.api.ExtractorOrder + 0.01 + order = pyblish.api.ExtractorOrder - 0.001 label = "Extract Slate Frame" families = ["slate"] From c51769b68c11866b73aaf921f2162c31656fcc77 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 23 Jan 2020 11:56:01 +0100 Subject: [PATCH 021/434] fix(ftrack): plugin is unfinished and causing troubles --- pype/plugins/ftrack/publish/integrate_ftrack_comments.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_comments.py b/pype/plugins/ftrack/publish/integrate_ftrack_comments.py index 9d0b7b3ab9..4f7afb4346 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_comments.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_comments.py @@ -9,6 +9,7 @@ class IntegrateFtrackComments(pyblish.api.InstancePlugin): order = pyblish.api.IntegratorOrder label = "Integrate Comments to Ftrack." families = ["shot"] + enabled = False def process(self, instance): session = instance.context.data["ftrackSession"] From 0fbc2aeceaf67e0db5c95608dfa68af43d76833b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 23 Jan 2020 12:17:46 +0100 Subject: [PATCH 022/434] feat(ftrack): disable ftrack comment integration --- pype/plugins/ftrack/publish/integrate_ftrack_comments.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_comments.py b/pype/plugins/ftrack/publish/integrate_ftrack_comments.py index 9d0b7b3ab9..4f7afb4346 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_comments.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_comments.py @@ -9,6 +9,7 @@ class IntegrateFtrackComments(pyblish.api.InstancePlugin): order = pyblish.api.IntegratorOrder label = "Integrate Comments to Ftrack." families = ["shot"] + enabled = False def process(self, instance): session = instance.context.data["ftrackSession"] From a1d5622cde2b2935a8acd000b86beb6f97c2e0c2 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 23 Jan 2020 12:38:36 +0100 Subject: [PATCH 023/434] fix(ftrack): enabled: False was not enough --- .../{publish => _unused_publish}/integrate_ftrack_comments.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename pype/plugins/ftrack/{publish => _unused_publish}/integrate_ftrack_comments.py (94%) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_comments.py b/pype/plugins/ftrack/_unused_publish/integrate_ftrack_comments.py similarity index 94% rename from pype/plugins/ftrack/publish/integrate_ftrack_comments.py rename to pype/plugins/ftrack/_unused_publish/integrate_ftrack_comments.py index 4f7afb4346..4be9f7fc3a 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_comments.py +++ b/pype/plugins/ftrack/_unused_publish/integrate_ftrack_comments.py @@ -7,7 +7,7 @@ class IntegrateFtrackComments(pyblish.api.InstancePlugin): """Create comments in Ftrack.""" order = pyblish.api.IntegratorOrder - label = "Integrate Comments to Ftrack." + label = "Integrate Comments to Ftrack" families = ["shot"] enabled = False From 0b81a4f04bd2140d4524946f9743bc144a097c30 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 23 Jan 2020 14:49:12 +0100 Subject: [PATCH 024/434] (fix): use correct(existing) variable name --- pype/ftrack/tray/ftrack_module.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/tray/ftrack_module.py b/pype/ftrack/tray/ftrack_module.py index dab751c001..250872f239 100644 --- a/pype/ftrack/tray/ftrack_module.py +++ b/pype/ftrack/tray/ftrack_module.py @@ -171,7 +171,7 @@ class FtrackModule: # If thread failed test Ftrack and Mongo connection elif not self.thread_socket_server.isAlive(): - self.thread_socket_server_thread.join() + self.thread_socket_server.join() self.thread_socket_server = None ftrack_accessible = False From 7b460a515e908e81aead12d391b395718a0793e8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 23 Jan 2020 14:49:45 +0100 Subject: [PATCH 025/434] set logger name in action subprocess because is launched as __main__ --- pype/ftrack/ftrack_server/sub_user_server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/ftrack_server/sub_user_server.py b/pype/ftrack/ftrack_server/sub_user_server.py index 68066b33ce..b2ca17f929 100644 --- a/pype/ftrack/ftrack_server/sub_user_server.py +++ b/pype/ftrack/ftrack_server/sub_user_server.py @@ -7,7 +7,7 @@ from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub from pypeapp import Logger -log = Logger().get_logger(__name__) +log = Logger().get_logger("FtrackUserServer") def main(args): From 81ccb152f58caa4063f6944597059fe81587953c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 23 Jan 2020 14:50:04 +0100 Subject: [PATCH 026/434] modified startup and end log messages --- pype/ftrack/ftrack_server/sub_user_server.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pype/ftrack/ftrack_server/sub_user_server.py b/pype/ftrack/ftrack_server/sub_user_server.py index b2ca17f929..8b2a9277cf 100644 --- a/pype/ftrack/ftrack_server/sub_user_server.py +++ b/pype/ftrack/ftrack_server/sub_user_server.py @@ -18,7 +18,9 @@ def main(args): # Connect the socket to the port where the server is listening server_address = ("localhost", port) - log.debug("Storer connected to {} port {}".format(*server_address)) + log.debug( + "User Ftrack Server connected to {} port {}".format(*server_address) + ) sock.connect(server_address) sock.sendall(b"CreatedUser") @@ -27,7 +29,7 @@ def main(args): auto_connect_event_hub=True, sock=sock, Eventhub=UserEventHub ) server = FtrackServer("action") - log.debug("Launched Ftrack Event storer") + log.debug("Launched User Ftrack Server") server.run_server(session=session) finally: @@ -42,7 +44,6 @@ if __name__ == "__main__": log.info( "Process was forced to stop. Process ended." ) - log.info("Process ended.") sys.exit(0) signal.signal(signal.SIGINT, signal_handler) From f35f2f466958e2a85233c13c88c3cc98509a6f66 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 23 Jan 2020 14:52:21 +0100 Subject: [PATCH 027/434] modified collect ftrack api to log used ftrack user and queried all project, asset and task entity separately and log queries and resul --- .../ftrack/publish/collect_ftrack_api.py | 48 +++++++++++++------ 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/pype/plugins/ftrack/publish/collect_ftrack_api.py b/pype/plugins/ftrack/publish/collect_ftrack_api.py index d09baec676..f79d74453b 100644 --- a/pype/plugins/ftrack/publish/collect_ftrack_api.py +++ b/pype/plugins/ftrack/publish/collect_ftrack_api.py @@ -23,25 +23,43 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): # Collect session session = ftrack_api.Session() + self.log.debug("Ftrack user: \"{0}\"".format(session.api_user)) context.data["ftrackSession"] = session # Collect task - project = os.environ.get('AVALON_PROJECT', '') - asset = os.environ.get('AVALON_ASSET', '') - task = os.environ.get('AVALON_TASK', None) - self.log.debug(task) + project_name = os.environ.get('AVALON_PROJECT', '') + asset_name = os.environ.get('AVALON_ASSET', '') + task_name = os.environ.get('AVALON_TASK', None) + + # Find project entity + project_query = 'Project where full_name is "{0}"'.format(project_name) + self.log.debug("Project query: < {0} >".format(project_query)) + project_entity = session.query(project_query).one() + self.log.debug("Project found: {0}".format(project_entity)) + + # Find asset entity + entity_query = ( + 'TypedContext where project_id is "{0}"' + ' and name is "{1}"' + ).format(project_entity["id"], asset_name) + self.log.debug("Asset entity query: < {0} >".format(entity_query)) + asset_entity = session.query(entity_query).one() + self.log.debug("Asset found: {0}".format(asset_entity)) + + # Find task entity if task is set + if task_name: + task_query = ( + 'Task where name is "{0}" and parent_id is "{1}"' + ).format(task_name, asset_entity["id"]) + self.log.debug("Task entity query: < {0} >".format(task_query)) + task_entity = session.query(task_query).one() + self.log.debug("Task entity found: {0}".format(task_entity)) - if task: - result = session.query('Task where\ - project.full_name is "{0}" and\ - name is "{1}" and\ - parent.name is "{2}"'.format(project, task, asset)).one() - context.data["ftrackTask"] = result else: - result = session.query('TypedContext where\ - project.full_name is "{0}" and\ - name is "{1}"'.format(project, asset)).one() - context.data["ftrackEntity"] = result + task_entity = None + self.log.warning("Task name is not set.") - self.log.info(result) + context.data["ftrackProject"] = asset_entity + context.data["ftrackEntity"] = asset_entity + context.data["ftrackTask"] = task_entity From da990057f45124d52463c51e21983b7e36d933bc Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 24 Jan 2020 14:26:46 +0100 Subject: [PATCH 028/434] feat(nuke): adding validator for output resolution --- .../publish/validate_output_resolution.py | 78 +++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 pype/plugins/nuke/publish/validate_output_resolution.py diff --git a/pype/plugins/nuke/publish/validate_output_resolution.py b/pype/plugins/nuke/publish/validate_output_resolution.py new file mode 100644 index 0000000000..2563ee929f --- /dev/null +++ b/pype/plugins/nuke/publish/validate_output_resolution.py @@ -0,0 +1,78 @@ +import nuke + +import pyblish.api + + +class RepairWriteResolutionDifference(pyblish.api.Action): + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + + # Get the errored instances + failed = [] + for result in context.data["results"]: + if (result["error"] is not None and result["instance"] is not None + and result["instance"] not in failed): + failed.append(result["instance"]) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(failed, plugin) + + for instance in instances: + reformat = instance[0].dependencies()[0] + if reformat.Class() != "Reformat": + reformat = nuke.nodes.Reformat(inputs=[instance[0].input(0)]) + + xpos = instance[0].xpos() + ypos = instance[0].ypos() - 26 + + dependent_ypos = instance[0].dependencies()[0].ypos() + if (instance[0].ypos() - dependent_ypos) <= 51: + xpos += 110 + + reformat.setXYpos(xpos, ypos) + + instance[0].setInput(0, reformat) + + reformat["resize"].setValue("none") + + +class ValidateOutputResolution(pyblish.api.InstancePlugin): + """Validates Output Resolution. + + It is making sure the resolution of write's input is the same as + Format definition of script in Root node. + """ + + order = pyblish.api.ValidatorOrder + optional = True + families = ["render", "render.local", "render.farm"] + label = "Write Resolution" + hosts = ["nuke"] + actions = [RepairWriteResolutionDifference] + + def process(self, instance): + + # Skip bounding box check if a crop node exists. + if instance[0].dependencies()[0].Class() == "Crop": + return + + msg = "Bounding box is outside the format." + assert self.check_resolution(instance), msg + + def check_resolution(self, instance): + node = instance[0] + + root_width = instance.data["resolutionWidth"] + root_height = instance.data["resolutionHeight"] + + write_width = node.format().width() + write_height = node.format().height() + + if (root_width != write_width) or (root_height != write_height): + return None + else: + return True From 675da4d1359898ea4e0cc6311b90882373de44f8 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 24 Jan 2020 14:27:16 +0100 Subject: [PATCH 029/434] fix(nuke): making sure validator will run on all render families --- pype/plugins/nuke/publish/validate_write_bounding_box.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nuke/publish/validate_write_bounding_box.py b/pype/plugins/nuke/publish/validate_write_bounding_box.py index 417d4ab004..e4b7c77a25 100644 --- a/pype/plugins/nuke/publish/validate_write_bounding_box.py +++ b/pype/plugins/nuke/publish/validate_write_bounding_box.py @@ -57,7 +57,7 @@ class ValidateNukeWriteBoundingBox(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder optional = True - families = ["render"] + families = ["render", "render.local", "render.farm"] label = "Write Bounding Box" hosts = ["nuke"] actions = [RepairNukeBoundingBoxAction] From 6b36d72b06a134ef1a1a03d82a10317becd31716 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 24 Jan 2020 15:27:15 +0100 Subject: [PATCH 030/434] visual parent was checked in wrong variable --- pype/ftrack/lib/avalon_sync.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 8cebd12a59..2240e42d36 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -1991,7 +1991,7 @@ class SyncEntitiesFactory: vis_par = ent["data"]["visualParent"] if ( vis_par is not None and - str(vis_par) in self.deleted_entities + str(vis_par) in _deleted_entities ): continue _ready.append(mongo_id) From 0b1caf955a4353c59aa59e7262350fbd0e018c69 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 24 Jan 2020 16:23:29 +0100 Subject: [PATCH 031/434] added new version of py2 ftrack-api --- pype/vendor/ftrack_api_old/_version.py | 2 +- pype/vendor/ftrack_api_old/_weakref.py | 66 +++++++ pype/vendor/ftrack_api_old/attribute.py | 12 +- pype/vendor/ftrack_api_old/entity/factory.py | 16 +- pype/vendor/ftrack_api_old/entity/location.py | 3 +- pype/vendor/ftrack_api_old/entity/note.py | 55 +++++- .../vendor/ftrack_api_old/event/expression.py | 7 +- pype/vendor/ftrack_api_old/event/hub.py | 68 +++++-- pype/vendor/ftrack_api_old/logging.py | 17 ++ pype/vendor/ftrack_api_old/session.py | 170 +++++++++++++++--- pype/vendor/ftrack_api_old/symbol.py | 6 +- 11 files changed, 358 insertions(+), 64 deletions(-) create mode 100644 pype/vendor/ftrack_api_old/_weakref.py diff --git a/pype/vendor/ftrack_api_old/_version.py b/pype/vendor/ftrack_api_old/_version.py index 07f744ca5d..aa1a8c4aba 100644 --- a/pype/vendor/ftrack_api_old/_version.py +++ b/pype/vendor/ftrack_api_old/_version.py @@ -1 +1 @@ -__version__ = '1.3.3' +__version__ = '1.8.2' diff --git a/pype/vendor/ftrack_api_old/_weakref.py b/pype/vendor/ftrack_api_old/_weakref.py new file mode 100644 index 0000000000..69cc6f4b4f --- /dev/null +++ b/pype/vendor/ftrack_api_old/_weakref.py @@ -0,0 +1,66 @@ +""" +Yet another backport of WeakMethod for Python 2.7. +Changes include removing exception chaining and adding args to super() calls. + +Copyright (c) 2001-2019 Python Software Foundation.All rights reserved. + +Full license available in LICENSE.python. +""" +from weakref import ref + + +class WeakMethod(ref): + """ + A custom `weakref.ref` subclass which simulates a weak reference to + a bound method, working around the lifetime problem of bound methods. + """ + + __slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__" + + def __new__(cls, meth, callback=None): + try: + obj = meth.__self__ + func = meth.__func__ + except AttributeError: + raise TypeError( + "argument should be a bound method, not {}".format(type(meth)) + ) + + def _cb(arg): + # The self-weakref trick is needed to avoid creating a reference + # cycle. + self = self_wr() + if self._alive: + self._alive = False + if callback is not None: + callback(self) + + self = ref.__new__(cls, obj, _cb) + self._func_ref = ref(func, _cb) + self._meth_type = type(meth) + self._alive = True + self_wr = ref(self) + return self + + def __call__(self): + obj = super(WeakMethod, self).__call__() + func = self._func_ref() + if obj is None or func is None: + return None + return self._meth_type(func, obj) + + def __eq__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is other + return ref.__eq__(self, other) and self._func_ref == other._func_ref + return NotImplemented + + def __ne__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is not other + return ref.__ne__(self, other) or self._func_ref != other._func_ref + return NotImplemented + + __hash__ = ref.__hash__ diff --git a/pype/vendor/ftrack_api_old/attribute.py b/pype/vendor/ftrack_api_old/attribute.py index 66840bed66..47fd6c9616 100644 --- a/pype/vendor/ftrack_api_old/attribute.py +++ b/pype/vendor/ftrack_api_old/attribute.py @@ -148,7 +148,8 @@ class Attribute(object): '''A name and value pair persisted remotely.''' def __init__( - self, name, default_value=ftrack_api_old.symbol.NOT_SET, mutable=True + self, name, default_value=ftrack_api_old.symbol.NOT_SET, mutable=True, + computed=False ): '''Initialise attribute with *name*. @@ -161,10 +162,14 @@ class Attribute(object): are :attr:`ftrack_api_old.symbol.NOT_SET`. The exception to this is when the target value is also :attr:`ftrack_api_old.symbol.NOT_SET`. + If *computed* is set to True the value is a remote side computed value + and should not be long-term cached. + ''' super(Attribute, self).__init__() self._name = name self._mutable = mutable + self._computed = computed self.default_value = default_value self._local_key = 'local' @@ -205,6 +210,11 @@ class Attribute(object): '''Return whether attribute is mutable.''' return self._mutable + @property + def computed(self): + '''Return whether attribute is computed.''' + return self._computed + def get_value(self, entity): '''Return current value for *entity*. diff --git a/pype/vendor/ftrack_api_old/entity/factory.py b/pype/vendor/ftrack_api_old/entity/factory.py index 16721514bd..f47c92e563 100644 --- a/pype/vendor/ftrack_api_old/entity/factory.py +++ b/pype/vendor/ftrack_api_old/entity/factory.py @@ -49,9 +49,11 @@ class Factory(object): # Build attributes for class. attributes = ftrack_api_old.attribute.Attributes() - immutable = schema.get('immutable', []) + immutable_properties = schema.get('immutable', []) + computed_properties = schema.get('computed', []) for name, fragment in schema.get('properties', {}).items(): - mutable = name not in immutable + mutable = name not in immutable_properties + computed = name in computed_properties default = fragment.get('default', ftrack_api_old.symbol.NOT_SET) if default == '{uid}': @@ -62,7 +64,8 @@ class Factory(object): if data_type is not ftrack_api_old.symbol.NOT_SET: if data_type in ( - 'string', 'boolean', 'integer', 'number', 'variable' + 'string', 'boolean', 'integer', 'number', 'variable', + 'object' ): # Basic scalar attribute. if data_type == 'number': @@ -74,7 +77,7 @@ class Factory(object): data_type = 'datetime' attribute = self.create_scalar_attribute( - class_name, name, mutable, default, data_type + class_name, name, mutable, computed, default, data_type ) if attribute: attributes.add(attribute) @@ -139,11 +142,12 @@ class Factory(object): return cls def create_scalar_attribute( - self, class_name, name, mutable, default, data_type + self, class_name, name, mutable, computed, default, data_type ): '''Return appropriate scalar attribute instance.''' return ftrack_api_old.attribute.ScalarAttribute( - name, data_type=data_type, default_value=default, mutable=mutable + name, data_type=data_type, default_value=default, mutable=mutable, + computed=computed ) def create_reference_attribute(self, class_name, name, mutable, reference): diff --git a/pype/vendor/ftrack_api_old/entity/location.py b/pype/vendor/ftrack_api_old/entity/location.py index d48264abc2..8d9d52c654 100644 --- a/pype/vendor/ftrack_api_old/entity/location.py +++ b/pype/vendor/ftrack_api_old/entity/location.py @@ -526,7 +526,8 @@ class Location(ftrack_api_old.entity.base.Entity): for index, resource_identifier in enumerate(resource_identifiers): resource_identifiers[index] = ( self.resource_identifier_transformer.decode( - resource_identifier + resource_identifier, + context={'component': components[index]} ) ) diff --git a/pype/vendor/ftrack_api_old/entity/note.py b/pype/vendor/ftrack_api_old/entity/note.py index 4cacf6ac8a..c628886fd9 100644 --- a/pype/vendor/ftrack_api_old/entity/note.py +++ b/pype/vendor/ftrack_api_old/entity/note.py @@ -1,6 +1,8 @@ # :coding: utf-8 # :copyright: Copyright (c) 2015 ftrack +import warnings + import ftrack_api_old.entity.base @@ -33,26 +35,52 @@ class Note(ftrack_api_old.entity.base.Entity): class CreateNoteMixin(object): '''Mixin to add create_note method on entity class.''' - def create_note(self, content, author, recipients=None, category=None): + def create_note( + self, content, author, recipients=None, category=None, labels=None + ): '''Create note with *content*, *author*. - Note category can be set by including *category* and *recipients* - can be specified as a list of user or group instances. + NoteLabels can be set by including *labels*. + + Note category can be set by including *category*. + + *recipients* can be specified as a list of user or group instances. ''' + note_label_support = 'NoteLabel' in self.session.types + + if not labels: + labels = [] + + if labels and not note_label_support: + raise ValueError( + 'NoteLabel is not supported by the current server version.' + ) + + if category and labels: + raise ValueError( + 'Both category and labels cannot be set at the same time.' + ) + if not recipients: recipients = [] - category_id = None - if category: - category_id = category['id'] - data = { 'content': content, - 'author': author, - 'category_id': category_id + 'author': author } + if category: + if note_label_support: + labels = [category] + warnings.warn( + 'category argument will be removed in an upcoming version, ' + 'please use labels instead.', + PendingDeprecationWarning + ) + else: + data['category_id'] = category['id'] + note = self.session.create('Note', data) self['notes'].append(note) @@ -65,4 +93,13 @@ class CreateNoteMixin(object): note['recipients'].append(recipient) + for label in labels: + self.session.create( + 'NoteLabelLink', + { + 'label_id': label['id'], + 'note_id': note['id'] + } + ) + return note diff --git a/pype/vendor/ftrack_api_old/event/expression.py b/pype/vendor/ftrack_api_old/event/expression.py index e10cd85844..8de4be0d71 100644 --- a/pype/vendor/ftrack_api_old/event/expression.py +++ b/pype/vendor/ftrack_api_old/event/expression.py @@ -3,14 +3,15 @@ from operator import eq, ne, ge, le, gt, lt -from pyparsing import (ParserElement, Group, Word, CaselessKeyword, Forward, +from pyparsing import (Group, Word, CaselessKeyword, Forward, FollowedBy, Suppress, oneOf, OneOrMore, Optional, alphanums, quotedString, removeQuotes) import ftrack_api_old.exception -# Optimise parsing using packrat memoisation feature. -ParserElement.enablePackrat() +# Do not enable packrat since it is not thread-safe and will result in parsing +# exceptions in a multi threaded environment. +# ParserElement.enablePackrat() class Parser(object): diff --git a/pype/vendor/ftrack_api_old/event/hub.py b/pype/vendor/ftrack_api_old/event/hub.py index 25410aa1e1..3ffbd38056 100644 --- a/pype/vendor/ftrack_api_old/event/hub.py +++ b/pype/vendor/ftrack_api_old/event/hub.py @@ -14,6 +14,7 @@ import operator import functools import json import socket +import warnings import requests import requests.exceptions @@ -40,9 +41,20 @@ ServerDetails = collections.namedtuple('ServerDetails', [ ]) + + class EventHub(object): '''Manage routing of events.''' + _future_signature_warning = ( + 'When constructing your Session object you did not explicitly define ' + 'auto_connect_event_hub as True even though you appear to be publishing ' + 'and / or subscribing to asynchronous events. In version version 2.0 of ' + 'the ftrack-python-api the default behavior will change from True ' + 'to False. Please make sure to update your tools. You can read more at ' + 'http://ftrack-python-api.rtd.ftrack.com/en/stable/release/migration.html' + ) + def __init__(self, server_url, api_user, api_key): '''Initialise hub, connecting to ftrack *server_url*. @@ -76,6 +88,8 @@ class EventHub(object): self._auto_reconnect_attempts = 30 self._auto_reconnect_delay = 10 + self._deprecation_warning_auto_connect = False + # Mapping of Socket.IO codes to meaning. self._code_name_mapping = { '0': 'disconnect', @@ -134,6 +148,9 @@ class EventHub(object): connected or connection fails. ''' + + self._deprecation_warning_auto_connect = False + if self.connected: raise ftrack_api_old.exception.EventHubConnectionError( 'Already connected.' @@ -164,17 +181,26 @@ class EventHub(object): # https://docs.python.org/2/library/socket.html#socket.socket.setblocking self._connection = websocket.create_connection(url, timeout=60) - except Exception: + except Exception as error: + error_message = ( + 'Failed to connect to event server at {server_url} with ' + 'error: "{error}".' + ) + + error_details = { + 'error': unicode(error), + 'server_url': self.get_server_url() + } + self.logger.debug( L( - 'Error connecting to event server at {0}.', - self.get_server_url() + error_message, **error_details ), exc_info=1 ) raise ftrack_api_old.exception.EventHubConnectionError( - 'Failed to connect to event server at {0}.' - .format(self.get_server_url()) + error_message, + details=error_details ) # Start background processing thread. @@ -543,6 +569,11 @@ class EventHub(object): event will be caught by this method and ignored. ''' + if self._deprecation_warning_auto_connect and not synchronous: + warnings.warn( + self._future_signature_warning, FutureWarning + ) + try: return self._publish( event, synchronous=synchronous, on_reply=on_reply @@ -700,18 +731,23 @@ class EventHub(object): # Automatically publish a non None response as a reply when not in # synchronous mode. - if not synchronous and response is not None: - - try: - self.publish_reply( - event, data=response, source=subscriber.metadata + if not synchronous: + if self._deprecation_warning_auto_connect: + warnings.warn( + self._future_signature_warning, FutureWarning ) - except Exception: - self.logger.exception(L( - 'Error publishing response {0} from subscriber {1} ' - 'for event {2}.', response, subscriber, event - )) + if response is not None: + try: + self.publish_reply( + event, data=response, source=subscriber.metadata + ) + + except Exception: + self.logger.exception(L( + 'Error publishing response {0} from subscriber {1} ' + 'for event {2}.', response, subscriber, event + )) # Check whether to continue processing topic event. if event.is_stopped(): @@ -881,6 +917,7 @@ class EventHub(object): if code_name == 'connect': self.logger.debug('Connected to event server.') event = ftrack_api_old.event.base.Event('ftrack.meta.connected') + self._prepare_event(event) self._event_queue.put(event) elif code_name == 'disconnect': @@ -901,6 +938,7 @@ class EventHub(object): if not self.connected: event = ftrack_api_old.event.base.Event('ftrack.meta.disconnected') + self._prepare_event(event) self._event_queue.put(event) elif code_name == 'heartbeat': diff --git a/pype/vendor/ftrack_api_old/logging.py b/pype/vendor/ftrack_api_old/logging.py index 2b28ce900b..41969c5b2a 100644 --- a/pype/vendor/ftrack_api_old/logging.py +++ b/pype/vendor/ftrack_api_old/logging.py @@ -1,6 +1,23 @@ # :coding: utf-8 # :copyright: Copyright (c) 2016 ftrack +import functools +import warnings + + +def deprecation_warning(message): + def decorator(function): + @functools.wraps(function) + def wrapper(*args, **kwargs): + warnings.warn( + message, + PendingDeprecationWarning + ) + return function(*args, **kwargs) + return wrapper + + return decorator + class LazyLogMessage(object): '''A log message that can be evaluated lazily for improved performance. diff --git a/pype/vendor/ftrack_api_old/session.py b/pype/vendor/ftrack_api_old/session.py index c313203a0c..0986962ca4 100644 --- a/pype/vendor/ftrack_api_old/session.py +++ b/pype/vendor/ftrack_api_old/session.py @@ -16,6 +16,7 @@ import hashlib import tempfile import threading import atexit +import warnings import requests import requests.auth @@ -42,8 +43,14 @@ import ftrack_api_old.structure.origin import ftrack_api_old.structure.entity_id import ftrack_api_old.accessor.server import ftrack_api_old._centralized_storage_scenario +import ftrack_api_old.logging from ftrack_api_old.logging import LazyLogMessage as L +try: + from weakref import WeakMethod +except ImportError: + from ftrack_api_old._weakref import WeakMethod + class SessionAuthentication(requests.auth.AuthBase): '''Attach ftrack session authentication information to requests.''' @@ -69,7 +76,7 @@ class Session(object): def __init__( self, server_url=None, api_key=None, api_user=None, auto_populate=True, plugin_paths=None, cache=None, cache_key_maker=None, - auto_connect_event_hub=True, schema_cache_path=None, + auto_connect_event_hub=None, schema_cache_path=None, plugin_arguments=None ): '''Initialise session. @@ -233,7 +240,8 @@ class Session(object): self._api_key ) - if auto_connect_event_hub: + self._auto_connect_event_hub_thread = None + if auto_connect_event_hub in (None, True): # Connect to event hub in background thread so as not to block main # session usage waiting for event hub connection. self._auto_connect_event_hub_thread = threading.Thread( @@ -242,8 +250,14 @@ class Session(object): self._auto_connect_event_hub_thread.daemon = True self._auto_connect_event_hub_thread.start() + # To help with migration from auto_connect_event_hub default changing + # from True to False. + self._event_hub._deprecation_warning_auto_connect = ( + auto_connect_event_hub is None + ) + # Register to auto-close session on exit. - atexit.register(self.close) + atexit.register(WeakMethod(self.close)) self._plugin_paths = plugin_paths if self._plugin_paths is None: @@ -271,6 +285,15 @@ class Session(object): ftrack_api_old._centralized_storage_scenario.register(self) self._configure_locations() + self.event_hub.publish( + ftrack_api_old.event.base.Event( + topic='ftrack.api.session.ready', + data=dict( + session=self + ) + ), + synchronous=True + ) def __enter__(self): '''Return session as context manager.''' @@ -389,7 +412,8 @@ class Session(object): try: self.event_hub.disconnect() - self._auto_connect_event_hub_thread.join() + if self._auto_connect_event_hub_thread: + self._auto_connect_event_hub_thread.join() except ftrack_api_old.exception.EventHubConnectionError: pass @@ -428,6 +452,16 @@ class Session(object): # Re-configure certain session aspects that may be dependant on cache. self._configure_locations() + self.event_hub.publish( + ftrack_api_old.event.base.Event( + topic='ftrack.api.session.reset', + data=dict( + session=self + ) + ), + synchronous=True + ) + def auto_populating(self, auto_populate): '''Temporarily set auto populate to *auto_populate*. @@ -508,7 +542,7 @@ class Session(object): 'entity_key': entity.get('id') }) - result = self._call( + result = self.call( [payload] ) @@ -790,12 +824,13 @@ class Session(object): }] # TODO: When should this execute? How to handle background=True? - results = self._call(batch) + results = self.call(batch) # Merge entities into local cache and return merged entities. data = [] + merged = dict() for entity in results[0]['data']: - data.append(self.merge(entity)) + data.append(self._merge_recursive(entity, merged)) return data, results[0]['metadata'] @@ -856,6 +891,48 @@ class Session(object): else: return value + def _merge_recursive(self, entity, merged=None): + '''Merge *entity* and all its attributes recursivly.''' + log_debug = self.logger.isEnabledFor(logging.DEBUG) + + if merged is None: + merged = {} + + attached = self.merge(entity, merged) + + for attribute in entity.attributes: + # Remote attributes. + remote_value = attribute.get_remote_value(entity) + + if isinstance( + remote_value, + ( + ftrack_api_old.entity.base.Entity, + ftrack_api_old.collection.Collection, + ftrack_api_old.collection.MappedCollectionProxy + ) + ): + log_debug and self.logger.debug( + 'Merging remote value for attribute {0}.'.format(attribute) + ) + + if isinstance(remote_value, ftrack_api_old.entity.base.Entity): + self._merge_recursive(remote_value, merged=merged) + + elif isinstance( + remote_value, ftrack_api_old.collection.Collection + ): + for entry in remote_value: + self._merge_recursive(entry, merged=merged) + + elif isinstance( + remote_value, ftrack_api_old.collection.MappedCollectionProxy + ): + for entry in remote_value.collection: + self._merge_recursive(entry, merged=merged) + + return attached + def _merge_entity(self, entity, merged=None): '''Merge *entity* into session returning merged entity. @@ -1185,7 +1262,7 @@ class Session(object): # Process batch. if batch: - result = self._call(batch) + result = self.call(batch) # Clear recorded operations. self.recorded_operations.clear() @@ -1260,7 +1337,7 @@ class Session(object): def _fetch_server_information(self): '''Return server information.''' - result = self._call([{'action': 'query_server_information'}]) + result = self.call([{'action': 'query_server_information'}]) return result[0] def _discover_plugins(self, plugin_arguments=None): @@ -1362,7 +1439,7 @@ class Session(object): 'Loading schemas from server due to hash not matching.' 'Local: {0!r} != Server: {1!r}', local_schema_hash, server_hash )) - schemas = self._call([{'action': 'query_schemas'}])[0] + schemas = self.call([{'action': 'query_schemas'}])[0] if schema_cache_path: try: @@ -1525,8 +1602,24 @@ class Session(object): synchronous=True ) + @ftrack_api_old.logging.deprecation_warning( + 'Session._call is now available as public method Session.call. The ' + 'private method will be removed in version 2.0.' + ) def _call(self, data): - '''Make request to server with *data*.''' + '''Make request to server with *data* batch describing the actions. + + .. note:: + + This private method is now available as public method + :meth:`entity_reference`. This alias remains for backwards + compatibility, but will be removed in version 2.0. + + ''' + return self.call(data) + + def call(self, data): + '''Make request to server with *data* batch describing the actions.''' url = self._server_url + '/api' headers = { 'content-type': 'application/json', @@ -1553,7 +1646,7 @@ class Session(object): 'Server reported error in unexpected format. Raw error was: {0}' .format(response.text) ) - self.logger.error(error_message) + self.logger.exception(error_message) raise ftrack_api_old.exception.ServerError(error_message) else: @@ -1562,7 +1655,7 @@ class Session(object): error_message = 'Server reported error: {0}({1})'.format( result['exception'], result['content'] ) - self.logger.error(error_message) + self.logger.exception(error_message) raise ftrack_api_old.exception.ServerError(error_message) return result @@ -1620,12 +1713,12 @@ class Session(object): if "entity_data" in data: for key, value in data["entity_data"].items(): if isinstance(value, ftrack_api_old.entity.base.Entity): - data["entity_data"][key] = self._entity_reference(value) + data["entity_data"][key] = self.entity_reference(value) return data if isinstance(item, ftrack_api_old.entity.base.Entity): - data = self._entity_reference(item) + data = self.entity_reference(item) with self.auto_populating(True): @@ -1646,14 +1739,15 @@ class Session(object): value = attribute.get_local_value(item) elif entity_attribute_strategy == 'persisted_only': - value = attribute.get_remote_value(item) + if not attribute.computed: + value = attribute.get_remote_value(item) if value is not ftrack_api_old.symbol.NOT_SET: if isinstance( attribute, ftrack_api_old.attribute.ReferenceAttribute ): if isinstance(value, ftrack_api_old.entity.base.Entity): - value = self._entity_reference(value) + value = self.entity_reference(value) data[attribute.name] = value @@ -1668,14 +1762,14 @@ class Session(object): if isinstance(item, ftrack_api_old.collection.Collection): data = [] for entity in item: - data.append(self._entity_reference(entity)) + data.append(self.entity_reference(entity)) return data raise TypeError('{0!r} is not JSON serializable'.format(item)) - def _entity_reference(self, entity): - '''Return reference to *entity*. + def entity_reference(self, entity): + '''Return entity reference that uniquely identifies *entity*. Return a mapping containing the __entity_type__ of the entity along with the key, value pairs that make up it's primary key. @@ -1689,6 +1783,26 @@ class Session(object): return reference + @ftrack_api_old.logging.deprecation_warning( + 'Session._entity_reference is now available as public method ' + 'Session.entity_reference. The private method will be removed ' + 'in version 2.0.' + ) + def _entity_reference(self, entity): + '''Return entity reference that uniquely identifies *entity*. + + Return a mapping containing the __entity_type__ of the entity along + with the key, value pairs that make up it's primary key. + + .. note:: + + This private method is now available as public method + :meth:`entity_reference`. This alias remains for backwards + compatibility, but will be removed in version 2.0. + + ''' + return self.entity_reference(entity) + def decode(self, string): '''Return decoded JSON *string* as Python object.''' with self.operation_recording(False): @@ -2016,6 +2130,10 @@ class Session(object): return availabilities + @ftrack_api_old.logging.deprecation_warning( + 'Session.delayed_job has been deprecated in favour of session.call. ' + 'Please refer to the release notes for more information.' + ) def delayed_job(self, job_type): '''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned. @@ -2033,7 +2151,7 @@ class Session(object): } try: - result = self._call( + result = self.call( [operation] )[0] @@ -2070,7 +2188,7 @@ class Session(object): ) try: - result = self._call([operation]) + result = self.call([operation]) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2172,7 +2290,7 @@ class Session(object): } try: - result = self._call([operation]) + result = self.call([operation]) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2212,7 +2330,7 @@ class Session(object): } try: - result = self._call([operation]) + result = self.call([operation]) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2258,7 +2376,7 @@ class Session(object): ) try: - self._call(operations) + self.call(operations) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2306,7 +2424,7 @@ class Session(object): ) try: - self._call(operations) + self.call(operations) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. if 'Invalid action u\'send_review_session_invite\'' in error.message: diff --git a/pype/vendor/ftrack_api_old/symbol.py b/pype/vendor/ftrack_api_old/symbol.py index 10b3f55bd5..f46760f634 100644 --- a/pype/vendor/ftrack_api_old/symbol.py +++ b/pype/vendor/ftrack_api_old/symbol.py @@ -1,6 +1,8 @@ # :coding: utf-8 # :copyright: Copyright (c) 2014 ftrack +import os + class Symbol(object): '''A constant symbol.''' @@ -68,8 +70,8 @@ CONNECT_LOCATION_ID = '07b82a97-8cf9-11e3-9383-20c9d081909b' #: Identifier of builtin server location. SERVER_LOCATION_ID = '3a372bde-05bc-11e4-8908-20c9d081909b' -#: Chunk size used when working with data. -CHUNK_SIZE = 8192 +#: Chunk size used when working with data, default to 1Mb. +CHUNK_SIZE = int(os.getenv('FTRACK_API_FILE_CHUNK_SIZE', 0)) or 1024*1024 #: Symbol representing syncing users with ldap JOB_SYNC_USERS_LDAP = Symbol('SYNC_USERS_LDAP') From b69e839cfc4e7842e04e7ed477e88ebe68aebde6 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Sat, 25 Jan 2020 14:18:39 +0100 Subject: [PATCH 032/434] feat(global): adding review to sequence functionality --- pype/plugins/global/publish/extract_review.py | 41 ++++++++++++++++--- 1 file changed, 35 insertions(+), 6 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index a11f681e61..2e79d86c38 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -53,10 +53,21 @@ class ExtractReview(pyblish.api.InstancePlugin): if "review" in tags: staging_dir = repre["stagingDir"] + + # iterating preset output profiles for name, profile in output_profiles.items(): + repre_new = repre.copy() + ext = profile.get("ext", None) + p_tags = profile.get('tags', []) + self.log.info("p_tags: `{}`".format(p_tags)) + + # adding control for presets to be sequence + # or single file + is_sequence = ("sequence" in p_tags) and (ext in ( + "png", "jpg", "jpeg")) + self.log.debug("Profile name: {}".format(name)) - ext = profile.get("ext", None) if not ext: ext = "mov" self.log.warning( @@ -88,18 +99,22 @@ class ExtractReview(pyblish.api.InstancePlugin): filename = repre["files"].split(".")[0] repr_file = filename + "_{0}.{1}".format(name, ext) - full_output_path = os.path.join( staging_dir, repr_file) + if is_sequence: + filename_base = filename + "_{0}".format(name) + repr_file = filename_base + ".%08d.{0}".format( + ext) + repre_new["sequence_file"] = repr_file + full_output_path = os.path.join( + staging_dir, filename_base, repr_file) + self.log.info("input {}".format(full_input_path)) self.log.info("output {}".format(full_output_path)) - repre_new = repre.copy() - new_tags = [x for x in tags if x != "delete"] - p_tags = profile.get('tags', []) - self.log.info("p_tags: `{}`".format(p_tags)) + # add families [instance.data["families"].append(t) for t in p_tags @@ -288,6 +303,14 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug( "_ output_args: `{}`".format(output_args)) + if is_sequence: + stg_dir = os.path.dirname(full_output_path) + + if not os.path.exists(stg_dir): + self.log.debug( + "creating dir: {}".format(stg_dir)) + os.mkdir(stg_dir) + mov_args = [ os.path.join( os.environ.get( @@ -315,6 +338,12 @@ class ExtractReview(pyblish.api.InstancePlugin): "resolutionHeight": resolution_height, "resolutionWidth": resolution_width, }) + if is_sequence: + repre_new.update({ + "stagingDir": stg_dir, + "files": os.listdir(stg_dir) + }) + if repre_new.get('preview'): repre_new.pop("preview") if repre_new.get('thumbnail'): From 533037b0c407c6035af97ae6b5d1648a7e971017 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Sat, 25 Jan 2020 14:19:21 +0100 Subject: [PATCH 033/434] wip(global): extract burnins to sequence --- pype/plugins/global/publish/extract_burnin.py | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 8f5a4aa000..4988f0d042 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -77,19 +77,31 @@ class ExtractBurnin(pype.api.Extractor): if "burnin" not in repre.get("tags", []): continue + is_sequence = "sequence" in repre.get("tags", []) + stagingdir = repre["stagingDir"] filename = "{0}".format(repre["files"]) + if is_sequence: + filename = repre["sequence_file"] + name = "_burnin" ext = os.path.splitext(filename)[1] movieFileBurnin = filename.replace(ext, "") + name + ext + if is_sequence: + fn_splt = filename.split(".") + movieFileBurnin = ".".join( + ((fn_splt[0] + name), fn_splt[-2], fn_splt[-1])) + + self.log.debug("__ movieFileBurnin: `{}`".format(movieFileBurnin)) + full_movie_path = os.path.join( - os.path.normpath(stagingdir), repre["files"] - ) + os.path.normpath(stagingdir), filename) full_burnin_path = os.path.join( - os.path.normpath(stagingdir), movieFileBurnin - ) + os.path.normpath(stagingdir), movieFileBurnin) + + self.log.debug("__ full_movie_path: {}".format(full_movie_path)) self.log.debug("__ full_burnin_path: {}".format(full_burnin_path)) # create copy of prep_data for anatomy formatting From dc459e593446eab6c6818fbf58040ffe28fcbe53 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 27 Jan 2020 12:45:02 +0100 Subject: [PATCH 034/434] hotfix - maya 2020 compatibility --- pype/maya/menu.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pype/maya/menu.py b/pype/maya/menu.py index 5254337f03..806944c117 100644 --- a/pype/maya/menu.py +++ b/pype/maya/menu.py @@ -15,12 +15,13 @@ log = logging.getLogger(__name__) def _get_menu(): """Return the menu instance if it currently exists in Maya""" - app = QtWidgets.QApplication.instance() - widgets = dict((w.objectName(), w) for w in app.allWidgets()) + widgets = dict(( + w.objectName(), w) for w in QtWidgets.QApplication.allWidgets()) menu = widgets.get(self._menu) return menu + def deferred(): log.info("Attempting to install scripts menu..") From 07997219d7920e8530e6200559c78989649ae33e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Mon, 27 Jan 2020 13:31:13 +0000 Subject: [PATCH 035/434] fixing unassigned variable --- pype/plugins/global/publish/collect_filesequences.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 9aa96b0e33..5c7ba41a5b 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -101,6 +101,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): lut_path = None slate_frame = None families_data = None + baked_mov_path = None subset = None version = None frame_start = 0 From 6e1f4de58dc8571f079867fe2ecd60ecc9edb897 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 27 Jan 2020 23:59:36 +0100 Subject: [PATCH 036/434] fix environment filter typo --- pype/plugins/global/publish/submit_publish_job.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index afb0bcab0c..faf4aaef93 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -162,7 +162,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "FTRACK_API_KEY", "FTRACK_SERVER", "PYPE_ROOT", - "PYPE_METADATA_FILE" + "PYPE_METADATA_FILE", "PYPE_STUDIO_PROJECTS_PATH", "PYPE_STUDIO_PROJECTS_MOUNT" ] From ee71d2420d7454f272d6ce19e319d80098288829 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Tue, 28 Jan 2020 00:00:19 +0100 Subject: [PATCH 037/434] add start and end frame to collection --- pype/plugins/global/publish/collect_filesequences.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index a04de4fdd7..564c5d528f 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -423,6 +423,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): "name": ext, "ext": "{}".format(ext), "files": list(collection), + "frameStart": start, + "frameEnd": end, "stagingDir": root, "anatomy_template": "render", "fps": fps, From 5f3cad3dd16401739031fbd57d26bc2744298c4f Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Tue, 28 Jan 2020 12:27:37 +0100 Subject: [PATCH 038/434] fix case where there are no render layers yet --- pype/maya/lib.py | 2 +- pype/plugins/maya/create/create_render.py | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 28c57bbcdb..5f060cef73 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2469,7 +2469,7 @@ class RenderSetupListObserver: if not render_set: return - members = cmds.sets(render_set, query=True) + members = cmds.sets(render_set, query=True) or [] if not "LAYER_{}".format(layer_name) in members: print(" - creating set for {}".format(layer_name)) set = cmds.sets(n="LAYER_{}".format(layer_name)) diff --git a/pype/plugins/maya/create/create_render.py b/pype/plugins/maya/create/create_render.py index f847b8add5..e3c97c5c2c 100644 --- a/pype/plugins/maya/create/create_render.py +++ b/pype/plugins/maya/create/create_render.py @@ -78,7 +78,12 @@ class CreateRender(avalon.maya.Creator): self.log.warning("Deadline REST API url not found.") else: argument = "{}/api/pools?NamesOnly=true".format(deadline_url) - response = self._requests_get(argument) + try: + response = self._requests_get(argument) + except requests.exceptions.ConnectionError as e: + msg = 'Cannot connect to deadline web service' + self.log.error(msg) + raise RuntimeError('{} - {}'.format(msg, e)) if not response.ok: self.log.warning("No pools retrieved") else: From 83eabbccd8af0cf5c21b34587d111b007d175c10 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Tue, 28 Jan 2020 13:03:35 +0100 Subject: [PATCH 039/434] code cleanup --- .../global/publish/collect_filesequences.py | 1 + pype/plugins/global/publish/integrate_new.py | 2 - pype/plugins/global/publish/update_version.py | 163 ------------------ pype/plugins/maya/create/create_render.py | 18 -- 4 files changed, 1 insertion(+), 183 deletions(-) delete mode 100644 pype/plugins/global/publish/update_version.py diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 5f6bc78664..a04de4fdd7 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -167,6 +167,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): slate_frame = instance.get("slateFrame") version = instance.get("version") + else: # Search in directory data = dict() diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 87e1e50fc4..e577c477c3 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -84,8 +84,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if [ef for ef in self.exclude_families if instance.data["family"] in ef]: return - if "attach-to" in instance.data["families"]: - return self.register(instance) diff --git a/pype/plugins/global/publish/update_version.py b/pype/plugins/global/publish/update_version.py deleted file mode 100644 index 771bc04bba..0000000000 --- a/pype/plugins/global/publish/update_version.py +++ /dev/null @@ -1,163 +0,0 @@ -import os -import logging - -import pyblish.api -from avalon import api, io - -log = logging.getLogger(__name__) - - -class UpdateVersion(pyblish.api.InstancePlugin): - """Update existing subset version with new data""" - - label = "Update Subset Version" - order = pyblish.api.IntegratorOrder - families = ["attach-render"] - - def process(self, instance): - # Required environment variables - PROJECT = api.Session["AVALON_PROJECT"] - ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"] - TASK = instance.data.get("task") or api.Session["AVALON_TASK"] - LOCATION = api.Session["AVALON_LOCATION"] - - context = instance.context - - stagingdir = instance.data.get("stagingDir") - if not stagingdir: - self.log.info('''{} is missing reference to staging - directory Will try to get it from - representation'''.format(instance)) - - # extra check if stagingDir actually exists and is available - - self.log.debug("Establishing staging directory @ %s" % stagingdir) - - # Ensure at least one file is set up for transfer in staging dir. - repres = instance.data.get("representations", None) - assert repres, "Instance has no files to transfer" - assert isinstance(repres, (list, tuple)), ( - "Instance 'files' must be a list, got: {0}".format(repres) - ) - - # FIXME: io is not initialized at this point for shell host - io.install() - project = io.find_one({"type": "project"}) - - asset = io.find_one({"type": "asset", - "name": ASSET, - "parent": project["_id"]}) - - assert instance.data.get("attachTo"), "no subset to attach to" - for subset_to_attach in instance.data.get("attachTo"): - - subset = io.find_one({"type": "subset", - "parent": asset["_id"], - "name": subset_to_attach["subset"]}) - - assert all([project, asset]), ("Could not find current project or " - "asset '%s'" % ASSET) - - attach_version = subset_to_attach["version"] - - version_data = self.create_version_data(context, instance) - - version_data_instance = instance.data.get('versionData') - - if version_data_instance: - version_data.update(version_data_instance) - - version = self.create_version(subset=subset, - version_number=attach_version, - locations=[LOCATION], - data=version_data) - - self.log.debug("Creating version ...") - existing_version = io.find_one({ - 'type': 'version', - 'parent': subset["_id"], - 'name': attach_version - }) - if existing_version is None: - version_id = io.insert_one(version).inserted_id - else: - io.update_many({ - 'type': 'version', - 'parent': subset["_id"], - 'name': attach_version - }, {'$set': version} - ) - version_id = existing_version['_id'] - instance.data['version'] = version['name'] - - def create_version(self, subset, version_number, locations, data=None): - """ Copy given source to destination - - Args: - subset (dict): the registered subset of the asset - version_number (int): the version number - locations (list): the currently registered locations - - Returns: - dict: collection of data to create a version - """ - # Imprint currently registered location - version_locations = [location for location in locations if - location is not None] - - return {"schema": "pype:version-3.0", - "type": "version", - "parent": subset["_id"], - "name": version_number, - "locations": version_locations, - "data": data} - - def create_version_data(self, context, instance): - """Create the data collection for the version - - Args: - context: the current context - instance: the current instance being published - - Returns: - dict: the required information with instance.data as key - """ - - families = [] - current_families = instance.data.get("families", list()) - instance_family = instance.data.get("family", None) - - if instance_family is not None: - families.append(instance_family) - families += current_families - - self.log.debug("Registered root: {}".format(api.registered_root())) - # create relative source path for DB - try: - source = instance.data['source'] - except KeyError: - source = context.data["currentFile"] - source = source.replace(os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), - api.registered_root()) - relative_path = os.path.relpath(source, api.registered_root()) - source = os.path.join("{root}", relative_path).replace("\\", "/") - - self.log.debug("Source: {}".format(source)) - version_data = {"families": families, - "time": context.data["time"], - "author": context.data["user"], - "source": source, - "comment": context.data.get("comment"), - "machine": context.data.get("machine"), - "fps": context.data.get("fps")} - - # Include optional data if present in - optionals = [ - "frameStart", "frameEnd", "step", "handles", - "handleEnd", "handleStart", "sourceHashes" - ] - for key in optionals: - if key in instance.data: - version_data[key] = instance.data[key] - - return version_data diff --git a/pype/plugins/maya/create/create_render.py b/pype/plugins/maya/create/create_render.py index e3c97c5c2c..faed231ac5 100644 --- a/pype/plugins/maya/create/create_render.py +++ b/pype/plugins/maya/create/create_render.py @@ -10,18 +10,6 @@ import pype.maya.lib as lib import avalon.maya -class RenderSetupListObserver: - """This will later server as handler to renderSetup changes""" - - def listItemAdded(self, item): - # TODO(antirotor): Implement - self.items.append(item) - print("* added {}".format(item.name())) - - def listItemRemoved(self, item): - print("removed") - - class CreateRender(avalon.maya.Creator): """Create render layer for export""" @@ -51,7 +39,6 @@ class CreateRender(avalon.maya.Creator): instance = super(CreateRender, self).process() cmds.setAttr("{}.machineList".format(instance), lock=True) self._rs = renderSetup.instance() - # self._rs.addListObserver(RenderSetupListObserver) if use_selection: print(">>> processing existing layers") layers = self._rs.getRenderLayers() @@ -116,11 +103,6 @@ class CreateRender(avalon.maya.Creator): self.data["primaryPool"] = pool_names - # We don't need subset or asset attributes - # self.data.pop("subset", None) - # self.data.pop("asset", None) - # self.data.pop("active", None) - self.data["suspendPublishJob"] = False self.data["extendFrames"] = False self.data["overrideExistingFrame"] = True From b9269512deab6e9bff99a13e563baa1a05441b40 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Jan 2020 14:26:14 +0100 Subject: [PATCH 040/434] comment should not be in a query of asset version --- .../ftrack/publish/integrate_ftrack_api.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_api.py b/pype/plugins/ftrack/publish/integrate_ftrack_api.py index c51685f84d..adb22aabba 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_api.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_api.py @@ -148,6 +148,9 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): assetversion_cust_attrs = _assetversion_data.pop( "custom_attributes", {} ) + asset_version_comment = _assetversion_data.pop( + "comment", None + ) assetversion_data.update(_assetversion_data) assetversion_entity = session.query( @@ -185,6 +188,20 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): existing_assetversion_metadata.update(assetversion_metadata) assetversion_entity["metadata"] = existing_assetversion_metadata + # Add comment + if asset_version_comment: + assetversion_entity["comment"] = asset_version_comment + try: + session.commit() + except Exception: + session.rollback() + self.log.warning(( + "Comment was not possible to set for AssetVersion" + "\"{0}\". Can't set it's value to: \"{1}\"" + ).format( + assetversion_entity["id"], str(asset_version_comment) + )) + # Adding Custom Attributes for attr, val in assetversion_cust_attrs.items(): if attr in assetversion_entity["custom_attributes"]: From 5d654e8de13e7c83d6190970d30157eacbca79c6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Jan 2020 14:26:51 +0100 Subject: [PATCH 041/434] syn to avalon action allows to synchronize empty projects --- pype/ftrack/lib/avalon_sync.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 8cebd12a59..b0482c2ab9 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -1722,7 +1722,11 @@ class SyncEntitiesFactory: self.avalon_project_id = new_id self._avalon_ents_by_id[str(new_id)] = project_item + if self._avalon_ents_by_ftrack_id is None: + self._avalon_ents_by_ftrack_id = {} self._avalon_ents_by_ftrack_id[self.ft_project_id] = str(new_id) + if self._avalon_ents_by_name is None: + self._avalon_ents_by_name = {} self._avalon_ents_by_name[project_item["name"]] = str(new_id) self.create_list.append(project_item) From 2487a07e01e1cfb6b46a69b40cf5df16beb44b06 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 28 Jan 2020 14:27:15 +0100 Subject: [PATCH 042/434] action server starts subprocess with same executable as trat has --- pype/ftrack/ftrack_server/socket_thread.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/ftrack/ftrack_server/socket_thread.py b/pype/ftrack/ftrack_server/socket_thread.py index c688693c77..1bf9d69ad7 100644 --- a/pype/ftrack/ftrack_server/socket_thread.py +++ b/pype/ftrack/ftrack_server/socket_thread.py @@ -1,4 +1,5 @@ import os +import sys import time import socket import threading @@ -52,7 +53,7 @@ class SocketThread(threading.Thread): ) self.subproc = subprocess.Popen( - ["python", self.filepath, "-port", str(self.port)], + [sys.executable, self.filepath, "-port", str(self.port)], stdout=subprocess.PIPE ) From 6c70f3fcbff1d22feded66e65c474e908f88992f Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 28 Jan 2020 15:06:48 +0100 Subject: [PATCH 043/434] fix(nuke: removing annoying message window happened every time log.error happened --- pype/nuke/__init__.py | 69 ++++++++-------- pype/nuke/lib.py | 78 +++++++++++-------- pype/nuke/presets.py | 10 ++- pype/plugins/nuke/create/create_backdrop.py | 6 +- pype/plugins/nuke/create/create_gizmo.py | 12 ++- pype/plugins/nuke/create/create_read.py | 8 +- pype/plugins/nuke/create/create_write.py | 14 +++- pype/plugins/nuke/load/load_backdrop.py | 7 +- pype/plugins/nuke/load/load_gizmo_ip.py | 6 +- pype/plugins/nuke/load/load_luts_ip.py | 5 +- pype/plugins/nuke/load/load_matchmove.py | 5 +- .../nuke/publish/validate_rendered_frames.py | 4 +- 12 files changed, 133 insertions(+), 91 deletions(-) diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py index 141cf4c13d..dfd61f4b39 100644 --- a/pype/nuke/__init__.py +++ b/pype/nuke/__init__.py @@ -33,40 +33,41 @@ if os.getenv("PYBLISH_GUI", None): pyblish.register_gui(os.getenv("PYBLISH_GUI", None)) -class NukeHandler(logging.Handler): - ''' - Nuke Handler - emits logs into nuke's script editor. - warning will emit nuke.warning() - critical and fatal would popup msg dialog to alert of the error. - ''' +# class NukeHandler(logging.Handler): +# ''' +# Nuke Handler - emits logs into nuke's script editor. +# warning will emit nuke.warning() +# critical and fatal would popup msg dialog to alert of the error. +# ''' +# +# def __init__(self): +# logging.Handler.__init__(self) +# self.set_name("Pype_Nuke_Handler") +# +# def emit(self, record): +# # Formated message: +# msg = self.format(record) +# +# if record.levelname.lower() in [ +# # "warning", +# "critical", +# "fatal", +# "error" +# ]: +# msg = self.format(record) +# nuke.message(msg) +# +# +# '''Adding Nuke Logging Handler''' +# log.info([handler.get_name() for handler in logging.root.handlers[:]]) +# nuke_handler = NukeHandler() +# if nuke_handler.get_name() \ +# not in [handler.get_name() +# for handler in logging.root.handlers[:]]: +# logging.getLogger().addHandler(nuke_handler) +# logging.getLogger().setLevel(logging.INFO) +# log.info([handler.get_name() for handler in logging.root.handlers[:]]) - def __init__(self): - logging.Handler.__init__(self) - self.set_name("Pype_Nuke_Handler") - - def emit(self, record): - # Formated message: - msg = self.format(record) - - if record.levelname.lower() in [ - # "warning", - "critical", - "fatal", - "error" - ]: - msg = self.format(record) - nuke.message(msg) - - -'''Adding Nuke Logging Handler''' -log.info([handler.get_name() for handler in logging.root.handlers[:]]) -nuke_handler = NukeHandler() -if nuke_handler.get_name() \ - not in [handler.get_name() - for handler in logging.root.handlers[:]]: - logging.getLogger().addHandler(nuke_handler) - logging.getLogger().setLevel(logging.INFO) -log.info([handler.get_name() for handler in logging.root.handlers[:]]) def reload_config(): """Attempt to reload pipeline at run-time. @@ -113,7 +114,7 @@ def install(): family_states = [ "write", "review", - "nukenodes" + "nukenodes" "gizmo" ] diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 7aa0395da5..9282443fcf 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -21,7 +21,6 @@ from .presets import ( from .presets import ( get_anatomy ) -# TODO: remove get_anatomy and import directly Anatomy() here from pypeapp import Logger log = Logger().get_logger(__name__, "nuke") @@ -50,8 +49,6 @@ def checkInventoryVersions(): and check if the node is having actual version. If not then it will color it to red. """ - # TODO: make it for all nodes not just Read (Loader - # get all Loader nodes by avalon attribute metadata for each in nuke.allNodes(): if each.Class() == 'Read': @@ -93,7 +90,6 @@ def checkInventoryVersions(): def writes_version_sync(): ''' Callback synchronizing version of publishable write nodes ''' - # TODO: make it work with new write node group try: rootVersion = pype.get_version_from_path(nuke.root().name()) padding = len(rootVersion) @@ -130,7 +126,8 @@ def writes_version_sync(): os.makedirs(os.path.dirname(node_new_file), 0o766) except Exception as e: log.warning( - "Write node: `{}` has no version in path: {}".format(each.name(), e)) + "Write node: `{}` has no version in path: {}".format( + each.name(), e)) def version_up_script(): @@ -183,9 +180,11 @@ def format_anatomy(data): try: padding = int(anatomy.templates['render']['padding']) except KeyError as e: - log.error("`padding` key is not in `render` " - "Anatomy template. Please, add it there and restart " - "the pipeline (padding: \"4\"): `{}`".format(e)) + msg = "`padding` key is not in `render` " + "Anatomy template. Please, add it there and restart " + "the pipeline (padding: \"4\"): `{}`".format(e) + log.error(msg) + nuke.message(msg) version = data.get("version", None) if not version: @@ -265,7 +264,9 @@ def create_write_node(name, data, input=None, prenodes=None): anatomy_filled = format_anatomy(data) except Exception as e: - log.error("problem with resolving anatomy tepmlate: {}".format(e)) + msg = "problem with resolving anatomy tepmlate: {}".format(e) + log.error(msg) + nuke.message(msg) # build file path to workfiles fpath = str(anatomy_filled["work"]["folder"]).replace("\\", "/") @@ -543,8 +544,11 @@ class WorkfileSettings(object): viewer_dict (dict): adjustments from presets ''' - assert isinstance(viewer_dict, dict), log.error( - "set_viewers_colorspace(): argument should be dictionary") + if not isinstance(viewer_dict, dict): + msg = "set_viewers_colorspace(): argument should be dictionary" + log.error(msg) + nuke.message(msg) + return filter_knobs = [ "viewerProcess", @@ -592,8 +596,10 @@ class WorkfileSettings(object): root_dict (dict): adjustmensts from presets ''' - assert isinstance(root_dict, dict), log.error( - "set_root_colorspace(): argument should be dictionary") + if not isinstance(root_dict, dict): + msg = "set_root_colorspace(): argument should be dictionary" + log.error(msg) + nuke.message(msg) log.debug(">> root_dict: {}".format(root_dict)) @@ -640,8 +646,11 @@ class WorkfileSettings(object): ''' # TODO: complete this function so any write node in # scene will have fixed colorspace following presets for the project - assert isinstance(write_dict, dict), log.error( - "set_root_colorspace(): argument should be dictionary") + if not isinstance(write_dict, dict): + msg = "set_root_colorspace(): argument should be dictionary" + nuke.message(msg) + log.error(msg) + return log.debug("__ set_writes_colorspace(): {}".format(write_dict)) @@ -653,25 +662,28 @@ class WorkfileSettings(object): try: self.set_root_colorspace(nuke_colorspace["root"]) except AttributeError: - log.error( - "set_colorspace(): missing `root` settings in template") + msg = "set_colorspace(): missing `root` settings in template" + try: self.set_viewers_colorspace(nuke_colorspace["viewer"]) except AttributeError: - log.error( - "set_colorspace(): missing `viewer` settings in template") + msg = "set_colorspace(): missing `viewer` settings in template" + nuke.message(msg) + log.error(msg) try: self.set_writes_colorspace(nuke_colorspace["write"]) except AttributeError: - log.error( - "set_colorspace(): missing `write` settings in template") + msg = "set_colorspace(): missing `write` settings in template" + nuke.message(msg) + log.error(msg) try: for key in nuke_colorspace: log.debug("Preset's colorspace key: {}".format(key)) except TypeError: - log.error("Nuke is not in templates! \n\n\n" - "contact your supervisor!") + msg = "Nuke is not in templates! Contact your supervisor!" + nuke.message(msg) + log.error(msg) def reset_frame_range_handles(self): """Set frame range to current asset""" @@ -758,13 +770,13 @@ class WorkfileSettings(object): } if any(x for x in data.values() if x is None): - log.error( - "Missing set shot attributes in DB." - "\nContact your supervisor!." - "\n\nWidth: `{width}`" - "\nHeight: `{height}`" - "\nPixel Asspect: `{pixel_aspect}`".format(**data) - ) + msg = "Missing set shot attributes in DB." + "\nContact your supervisor!." + "\n\nWidth: `{width}`" + "\nHeight: `{height}`" + "\nPixel Asspect: `{pixel_aspect}`".format(**data) + log.error(msg) + nuke.message(msg) bbox = self._asset_entity.get('data', {}).get('crop') @@ -781,10 +793,10 @@ class WorkfileSettings(object): ) except Exception as e: bbox = None - log.error( - "{}: {} \nFormat:Crop need to be set with dots, example: " + msg = "{}: {} \nFormat:Crop need to be set with dots, example: " "0.0.1920.1080, /nSetting to default".format(__name__, e) - ) + log.error(msg) + nuke.message(msg) existing_format = None for format in nuke.formats(): diff --git a/pype/nuke/presets.py b/pype/nuke/presets.py index e0c12e2671..a413ccc878 100644 --- a/pype/nuke/presets.py +++ b/pype/nuke/presets.py @@ -1,6 +1,6 @@ from pype import api as pype from pypeapp import Anatomy, config - +import nuke log = pype.Logger().get_logger(__name__, "nuke") @@ -28,7 +28,7 @@ def get_node_dataflow_preset(**kwarg): families = kwarg.get("families", []) preset = kwarg.get("preset", None) # omit < 2.0.0v - assert any([host, cls]), log.error( + assert any([host, cls]), nuke.message( "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) nuke_dataflow = get_dataflow_preset().get(str(host), None) @@ -56,8 +56,10 @@ def get_node_colorspace_preset(**kwarg): families = kwarg.get("families", []) preset = kwarg.get("preset", None) # omit < 2.0.0v - assert any([host, cls]), log.error( - "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) + if not any([host, cls]): + msg = "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__) + log.error(msg) + nuke.message(msg) nuke_colorspace = get_colorspace_preset().get(str(host), None) nuke_colorspace_node = nuke_colorspace.get(str(cls), None) diff --git a/pype/plugins/nuke/create/create_backdrop.py b/pype/plugins/nuke/create/create_backdrop.py index 767e92b592..2016c66095 100644 --- a/pype/plugins/nuke/create/create_backdrop.py +++ b/pype/plugins/nuke/create/create_backdrop.py @@ -35,8 +35,10 @@ class CreateBackdrop(Creator): return instance else: - nuke.message("Please select nodes you " - "wish to add to a container") + msg = "Please select nodes you " + "wish to add to a container" + self.log.error(msg) + nuke.message(msg) return else: bckd_node = autoBackdrop() diff --git a/pype/plugins/nuke/create/create_gizmo.py b/pype/plugins/nuke/create/create_gizmo.py index 41229862e3..93fbbcf144 100644 --- a/pype/plugins/nuke/create/create_gizmo.py +++ b/pype/plugins/nuke/create/create_gizmo.py @@ -36,8 +36,10 @@ class CreateGizmo(Creator): node["tile_color"].setValue(int(self.node_color, 16)) return anlib.imprint(node, self.data) else: - nuke.message("Please select a group node " - "you wish to publish as the gizmo") + msg = "Please select a group node " + "you wish to publish as the gizmo" + self.log.error(msg) + nuke.message(msg) if len(nodes) >= 2: anlib.select_nodes(nodes) @@ -58,8 +60,10 @@ class CreateGizmo(Creator): return anlib.imprint(gizmo_node, self.data) else: - nuke.message("Please select nodes you " - "wish to add to the gizmo") + msg = "Please select nodes you " + "wish to add to the gizmo" + self.log.error(msg) + nuke.message(msg) return else: with anlib.maintained_selection(): diff --git a/pype/plugins/nuke/create/create_read.py b/pype/plugins/nuke/create/create_read.py index 1aa7e68746..70db580a7e 100644 --- a/pype/plugins/nuke/create/create_read.py +++ b/pype/plugins/nuke/create/create_read.py @@ -34,7 +34,9 @@ class CrateRead(avalon.nuke.Creator): nodes = self.nodes if not nodes or len(nodes) == 0: - nuke.message('Please select Read node') + msg = "Please select Read node" + self.log.error(msg) + nuke.message(msg) else: count_reads = 0 for node in nodes: @@ -46,7 +48,9 @@ class CrateRead(avalon.nuke.Creator): count_reads += 1 if count_reads < 1: - nuke.message('Please select Read node') + msg = "Please select Read node" + self.log.error(msg) + nuke.message(msg) return def change_read_node(self, name, node, data): diff --git a/pype/plugins/nuke/create/create_write.py b/pype/plugins/nuke/create/create_write.py index a85408cab3..c5c7d659e3 100644 --- a/pype/plugins/nuke/create/create_write.py +++ b/pype/plugins/nuke/create/create_write.py @@ -41,9 +41,11 @@ class CreateWriteRender(plugin.PypeCreator): if (self.options or {}).get("useSelection"): nodes = self.nodes - assert len(nodes) < 2, self.log.error( - "Select only one node. The node you want to connect to, " - "or tick off `Use selection`") + if not (len(nodes) < 2): + msg = "Select only one node. The node you want to connect to, " + "or tick off `Use selection`" + log.error(msg) + nuke.message(msg) selected_node = nodes[0] inputs = [selected_node] @@ -134,7 +136,11 @@ class CreateWritePrerender(plugin.PypeCreator): if (self.options or {}).get("useSelection"): nodes = self.nodes - assert len(nodes) < 2, self.log.error("Select only one node. The node you want to connect to, or tick off `Use selection`") + if not (len(nodes) < 2): + msg = "Select only one node. The node you want to connect to, " + "or tick off `Use selection`" + self.log.error(msg) + nuke.message(msg) selected_node = nodes[0] inputs = [selected_node] diff --git a/pype/plugins/nuke/load/load_backdrop.py b/pype/plugins/nuke/load/load_backdrop.py index 7f58d4e9ec..07a6724771 100644 --- a/pype/plugins/nuke/load/load_backdrop.py +++ b/pype/plugins/nuke/load/load_backdrop.py @@ -256,8 +256,11 @@ class LoadBackdropNodes(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - self.log.error("Please create Viewer node before you " - "run this action again") + if not (len(nodes) < 2): + msg = "Please create Viewer node before you " + "run this action again" + self.log.error(msg) + nuke.message(msg) return None # get coordinates of Viewer1 diff --git a/pype/plugins/nuke/load/load_gizmo_ip.py b/pype/plugins/nuke/load/load_gizmo_ip.py index 0d78c14214..23d7ef2f4a 100644 --- a/pype/plugins/nuke/load/load_gizmo_ip.py +++ b/pype/plugins/nuke/load/load_gizmo_ip.py @@ -176,8 +176,10 @@ class LoadGizmoInputProcess(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - self.log.error("Please create Viewer node before you " - "run this action again") + msg = "Please create Viewer node before you " + "run this action again" + self.log.error(msg) + nuke.message(msg) return None # get coordinates of Viewer1 diff --git a/pype/plugins/nuke/load/load_luts_ip.py b/pype/plugins/nuke/load/load_luts_ip.py index 5f09adb05f..2b38a9ff08 100644 --- a/pype/plugins/nuke/load/load_luts_ip.py +++ b/pype/plugins/nuke/load/load_luts_ip.py @@ -276,7 +276,10 @@ class LoadLutsInputProcess(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - self.log.error("Please create Viewer node before you run this action again") + msg = "Please create Viewer node before you " + "run this action again" + self.log.error(msg) + nuke.message(msg) return None # get coordinates of Viewer1 diff --git a/pype/plugins/nuke/load/load_matchmove.py b/pype/plugins/nuke/load/load_matchmove.py index 6a674368fb..60d5dc026f 100644 --- a/pype/plugins/nuke/load/load_matchmove.py +++ b/pype/plugins/nuke/load/load_matchmove.py @@ -1,4 +1,5 @@ from avalon import api +import nuke class MatchmoveLoader(api.Loader): @@ -19,6 +20,8 @@ class MatchmoveLoader(api.Loader): exec(open(self.fname).read()) else: - self.log.error("Unsupported script type") + msg = "Unsupported script type" + self.log.error(msg) + nuke.message(msg) return True diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py index c63c289947..169ea1ecb5 100644 --- a/pype/plugins/nuke/publish/validate_rendered_frames.py +++ b/pype/plugins/nuke/publish/validate_rendered_frames.py @@ -41,7 +41,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): if not repre.get('files'): msg = ("no frames were collected, " "you need to render them") - self.log.warning(msg) + self.log.error(msg) raise ValidationException(msg) collections, remainder = clique.assemble(repre["files"]) @@ -75,7 +75,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): self.log.info( 'len(collection.indexes): {}'.format(collected_frames_len) ) - + if "slate" in instance.data["families"]: collected_frames_len -= 1 From b0da1c9013079f7cb7c26e63161158edf7ef1c88 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 28 Jan 2020 15:06:48 +0100 Subject: [PATCH 044/434] fix(nuke: removing annoying message window happened every time log.error happened --- pype/nuke/__init__.py | 69 ++++++++-------- pype/nuke/lib.py | 78 +++++++++++-------- pype/nuke/presets.py | 10 ++- pype/plugins/nuke/create/create_backdrop.py | 6 +- pype/plugins/nuke/create/create_gizmo.py | 12 ++- pype/plugins/nuke/create/create_read.py | 8 +- pype/plugins/nuke/create/create_write.py | 14 +++- pype/plugins/nuke/load/load_backdrop.py | 7 +- pype/plugins/nuke/load/load_gizmo_ip.py | 6 +- pype/plugins/nuke/load/load_luts_ip.py | 5 +- pype/plugins/nuke/load/load_matchmove.py | 5 +- .../nuke/publish/validate_rendered_frames.py | 4 +- 12 files changed, 133 insertions(+), 91 deletions(-) diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py index 141cf4c13d..dfd61f4b39 100644 --- a/pype/nuke/__init__.py +++ b/pype/nuke/__init__.py @@ -33,40 +33,41 @@ if os.getenv("PYBLISH_GUI", None): pyblish.register_gui(os.getenv("PYBLISH_GUI", None)) -class NukeHandler(logging.Handler): - ''' - Nuke Handler - emits logs into nuke's script editor. - warning will emit nuke.warning() - critical and fatal would popup msg dialog to alert of the error. - ''' +# class NukeHandler(logging.Handler): +# ''' +# Nuke Handler - emits logs into nuke's script editor. +# warning will emit nuke.warning() +# critical and fatal would popup msg dialog to alert of the error. +# ''' +# +# def __init__(self): +# logging.Handler.__init__(self) +# self.set_name("Pype_Nuke_Handler") +# +# def emit(self, record): +# # Formated message: +# msg = self.format(record) +# +# if record.levelname.lower() in [ +# # "warning", +# "critical", +# "fatal", +# "error" +# ]: +# msg = self.format(record) +# nuke.message(msg) +# +# +# '''Adding Nuke Logging Handler''' +# log.info([handler.get_name() for handler in logging.root.handlers[:]]) +# nuke_handler = NukeHandler() +# if nuke_handler.get_name() \ +# not in [handler.get_name() +# for handler in logging.root.handlers[:]]: +# logging.getLogger().addHandler(nuke_handler) +# logging.getLogger().setLevel(logging.INFO) +# log.info([handler.get_name() for handler in logging.root.handlers[:]]) - def __init__(self): - logging.Handler.__init__(self) - self.set_name("Pype_Nuke_Handler") - - def emit(self, record): - # Formated message: - msg = self.format(record) - - if record.levelname.lower() in [ - # "warning", - "critical", - "fatal", - "error" - ]: - msg = self.format(record) - nuke.message(msg) - - -'''Adding Nuke Logging Handler''' -log.info([handler.get_name() for handler in logging.root.handlers[:]]) -nuke_handler = NukeHandler() -if nuke_handler.get_name() \ - not in [handler.get_name() - for handler in logging.root.handlers[:]]: - logging.getLogger().addHandler(nuke_handler) - logging.getLogger().setLevel(logging.INFO) -log.info([handler.get_name() for handler in logging.root.handlers[:]]) def reload_config(): """Attempt to reload pipeline at run-time. @@ -113,7 +114,7 @@ def install(): family_states = [ "write", "review", - "nukenodes" + "nukenodes" "gizmo" ] diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 7aa0395da5..9282443fcf 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -21,7 +21,6 @@ from .presets import ( from .presets import ( get_anatomy ) -# TODO: remove get_anatomy and import directly Anatomy() here from pypeapp import Logger log = Logger().get_logger(__name__, "nuke") @@ -50,8 +49,6 @@ def checkInventoryVersions(): and check if the node is having actual version. If not then it will color it to red. """ - # TODO: make it for all nodes not just Read (Loader - # get all Loader nodes by avalon attribute metadata for each in nuke.allNodes(): if each.Class() == 'Read': @@ -93,7 +90,6 @@ def checkInventoryVersions(): def writes_version_sync(): ''' Callback synchronizing version of publishable write nodes ''' - # TODO: make it work with new write node group try: rootVersion = pype.get_version_from_path(nuke.root().name()) padding = len(rootVersion) @@ -130,7 +126,8 @@ def writes_version_sync(): os.makedirs(os.path.dirname(node_new_file), 0o766) except Exception as e: log.warning( - "Write node: `{}` has no version in path: {}".format(each.name(), e)) + "Write node: `{}` has no version in path: {}".format( + each.name(), e)) def version_up_script(): @@ -183,9 +180,11 @@ def format_anatomy(data): try: padding = int(anatomy.templates['render']['padding']) except KeyError as e: - log.error("`padding` key is not in `render` " - "Anatomy template. Please, add it there and restart " - "the pipeline (padding: \"4\"): `{}`".format(e)) + msg = "`padding` key is not in `render` " + "Anatomy template. Please, add it there and restart " + "the pipeline (padding: \"4\"): `{}`".format(e) + log.error(msg) + nuke.message(msg) version = data.get("version", None) if not version: @@ -265,7 +264,9 @@ def create_write_node(name, data, input=None, prenodes=None): anatomy_filled = format_anatomy(data) except Exception as e: - log.error("problem with resolving anatomy tepmlate: {}".format(e)) + msg = "problem with resolving anatomy tepmlate: {}".format(e) + log.error(msg) + nuke.message(msg) # build file path to workfiles fpath = str(anatomy_filled["work"]["folder"]).replace("\\", "/") @@ -543,8 +544,11 @@ class WorkfileSettings(object): viewer_dict (dict): adjustments from presets ''' - assert isinstance(viewer_dict, dict), log.error( - "set_viewers_colorspace(): argument should be dictionary") + if not isinstance(viewer_dict, dict): + msg = "set_viewers_colorspace(): argument should be dictionary" + log.error(msg) + nuke.message(msg) + return filter_knobs = [ "viewerProcess", @@ -592,8 +596,10 @@ class WorkfileSettings(object): root_dict (dict): adjustmensts from presets ''' - assert isinstance(root_dict, dict), log.error( - "set_root_colorspace(): argument should be dictionary") + if not isinstance(root_dict, dict): + msg = "set_root_colorspace(): argument should be dictionary" + log.error(msg) + nuke.message(msg) log.debug(">> root_dict: {}".format(root_dict)) @@ -640,8 +646,11 @@ class WorkfileSettings(object): ''' # TODO: complete this function so any write node in # scene will have fixed colorspace following presets for the project - assert isinstance(write_dict, dict), log.error( - "set_root_colorspace(): argument should be dictionary") + if not isinstance(write_dict, dict): + msg = "set_root_colorspace(): argument should be dictionary" + nuke.message(msg) + log.error(msg) + return log.debug("__ set_writes_colorspace(): {}".format(write_dict)) @@ -653,25 +662,28 @@ class WorkfileSettings(object): try: self.set_root_colorspace(nuke_colorspace["root"]) except AttributeError: - log.error( - "set_colorspace(): missing `root` settings in template") + msg = "set_colorspace(): missing `root` settings in template" + try: self.set_viewers_colorspace(nuke_colorspace["viewer"]) except AttributeError: - log.error( - "set_colorspace(): missing `viewer` settings in template") + msg = "set_colorspace(): missing `viewer` settings in template" + nuke.message(msg) + log.error(msg) try: self.set_writes_colorspace(nuke_colorspace["write"]) except AttributeError: - log.error( - "set_colorspace(): missing `write` settings in template") + msg = "set_colorspace(): missing `write` settings in template" + nuke.message(msg) + log.error(msg) try: for key in nuke_colorspace: log.debug("Preset's colorspace key: {}".format(key)) except TypeError: - log.error("Nuke is not in templates! \n\n\n" - "contact your supervisor!") + msg = "Nuke is not in templates! Contact your supervisor!" + nuke.message(msg) + log.error(msg) def reset_frame_range_handles(self): """Set frame range to current asset""" @@ -758,13 +770,13 @@ class WorkfileSettings(object): } if any(x for x in data.values() if x is None): - log.error( - "Missing set shot attributes in DB." - "\nContact your supervisor!." - "\n\nWidth: `{width}`" - "\nHeight: `{height}`" - "\nPixel Asspect: `{pixel_aspect}`".format(**data) - ) + msg = "Missing set shot attributes in DB." + "\nContact your supervisor!." + "\n\nWidth: `{width}`" + "\nHeight: `{height}`" + "\nPixel Asspect: `{pixel_aspect}`".format(**data) + log.error(msg) + nuke.message(msg) bbox = self._asset_entity.get('data', {}).get('crop') @@ -781,10 +793,10 @@ class WorkfileSettings(object): ) except Exception as e: bbox = None - log.error( - "{}: {} \nFormat:Crop need to be set with dots, example: " + msg = "{}: {} \nFormat:Crop need to be set with dots, example: " "0.0.1920.1080, /nSetting to default".format(__name__, e) - ) + log.error(msg) + nuke.message(msg) existing_format = None for format in nuke.formats(): diff --git a/pype/nuke/presets.py b/pype/nuke/presets.py index e0c12e2671..a413ccc878 100644 --- a/pype/nuke/presets.py +++ b/pype/nuke/presets.py @@ -1,6 +1,6 @@ from pype import api as pype from pypeapp import Anatomy, config - +import nuke log = pype.Logger().get_logger(__name__, "nuke") @@ -28,7 +28,7 @@ def get_node_dataflow_preset(**kwarg): families = kwarg.get("families", []) preset = kwarg.get("preset", None) # omit < 2.0.0v - assert any([host, cls]), log.error( + assert any([host, cls]), nuke.message( "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) nuke_dataflow = get_dataflow_preset().get(str(host), None) @@ -56,8 +56,10 @@ def get_node_colorspace_preset(**kwarg): families = kwarg.get("families", []) preset = kwarg.get("preset", None) # omit < 2.0.0v - assert any([host, cls]), log.error( - "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) + if not any([host, cls]): + msg = "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__) + log.error(msg) + nuke.message(msg) nuke_colorspace = get_colorspace_preset().get(str(host), None) nuke_colorspace_node = nuke_colorspace.get(str(cls), None) diff --git a/pype/plugins/nuke/create/create_backdrop.py b/pype/plugins/nuke/create/create_backdrop.py index 767e92b592..2016c66095 100644 --- a/pype/plugins/nuke/create/create_backdrop.py +++ b/pype/plugins/nuke/create/create_backdrop.py @@ -35,8 +35,10 @@ class CreateBackdrop(Creator): return instance else: - nuke.message("Please select nodes you " - "wish to add to a container") + msg = "Please select nodes you " + "wish to add to a container" + self.log.error(msg) + nuke.message(msg) return else: bckd_node = autoBackdrop() diff --git a/pype/plugins/nuke/create/create_gizmo.py b/pype/plugins/nuke/create/create_gizmo.py index 41229862e3..93fbbcf144 100644 --- a/pype/plugins/nuke/create/create_gizmo.py +++ b/pype/plugins/nuke/create/create_gizmo.py @@ -36,8 +36,10 @@ class CreateGizmo(Creator): node["tile_color"].setValue(int(self.node_color, 16)) return anlib.imprint(node, self.data) else: - nuke.message("Please select a group node " - "you wish to publish as the gizmo") + msg = "Please select a group node " + "you wish to publish as the gizmo" + self.log.error(msg) + nuke.message(msg) if len(nodes) >= 2: anlib.select_nodes(nodes) @@ -58,8 +60,10 @@ class CreateGizmo(Creator): return anlib.imprint(gizmo_node, self.data) else: - nuke.message("Please select nodes you " - "wish to add to the gizmo") + msg = "Please select nodes you " + "wish to add to the gizmo" + self.log.error(msg) + nuke.message(msg) return else: with anlib.maintained_selection(): diff --git a/pype/plugins/nuke/create/create_read.py b/pype/plugins/nuke/create/create_read.py index 1aa7e68746..70db580a7e 100644 --- a/pype/plugins/nuke/create/create_read.py +++ b/pype/plugins/nuke/create/create_read.py @@ -34,7 +34,9 @@ class CrateRead(avalon.nuke.Creator): nodes = self.nodes if not nodes or len(nodes) == 0: - nuke.message('Please select Read node') + msg = "Please select Read node" + self.log.error(msg) + nuke.message(msg) else: count_reads = 0 for node in nodes: @@ -46,7 +48,9 @@ class CrateRead(avalon.nuke.Creator): count_reads += 1 if count_reads < 1: - nuke.message('Please select Read node') + msg = "Please select Read node" + self.log.error(msg) + nuke.message(msg) return def change_read_node(self, name, node, data): diff --git a/pype/plugins/nuke/create/create_write.py b/pype/plugins/nuke/create/create_write.py index a85408cab3..c5c7d659e3 100644 --- a/pype/plugins/nuke/create/create_write.py +++ b/pype/plugins/nuke/create/create_write.py @@ -41,9 +41,11 @@ class CreateWriteRender(plugin.PypeCreator): if (self.options or {}).get("useSelection"): nodes = self.nodes - assert len(nodes) < 2, self.log.error( - "Select only one node. The node you want to connect to, " - "or tick off `Use selection`") + if not (len(nodes) < 2): + msg = "Select only one node. The node you want to connect to, " + "or tick off `Use selection`" + log.error(msg) + nuke.message(msg) selected_node = nodes[0] inputs = [selected_node] @@ -134,7 +136,11 @@ class CreateWritePrerender(plugin.PypeCreator): if (self.options or {}).get("useSelection"): nodes = self.nodes - assert len(nodes) < 2, self.log.error("Select only one node. The node you want to connect to, or tick off `Use selection`") + if not (len(nodes) < 2): + msg = "Select only one node. The node you want to connect to, " + "or tick off `Use selection`" + self.log.error(msg) + nuke.message(msg) selected_node = nodes[0] inputs = [selected_node] diff --git a/pype/plugins/nuke/load/load_backdrop.py b/pype/plugins/nuke/load/load_backdrop.py index 7f58d4e9ec..07a6724771 100644 --- a/pype/plugins/nuke/load/load_backdrop.py +++ b/pype/plugins/nuke/load/load_backdrop.py @@ -256,8 +256,11 @@ class LoadBackdropNodes(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - self.log.error("Please create Viewer node before you " - "run this action again") + if not (len(nodes) < 2): + msg = "Please create Viewer node before you " + "run this action again" + self.log.error(msg) + nuke.message(msg) return None # get coordinates of Viewer1 diff --git a/pype/plugins/nuke/load/load_gizmo_ip.py b/pype/plugins/nuke/load/load_gizmo_ip.py index 0d78c14214..23d7ef2f4a 100644 --- a/pype/plugins/nuke/load/load_gizmo_ip.py +++ b/pype/plugins/nuke/load/load_gizmo_ip.py @@ -176,8 +176,10 @@ class LoadGizmoInputProcess(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - self.log.error("Please create Viewer node before you " - "run this action again") + msg = "Please create Viewer node before you " + "run this action again" + self.log.error(msg) + nuke.message(msg) return None # get coordinates of Viewer1 diff --git a/pype/plugins/nuke/load/load_luts_ip.py b/pype/plugins/nuke/load/load_luts_ip.py index 5f09adb05f..2b38a9ff08 100644 --- a/pype/plugins/nuke/load/load_luts_ip.py +++ b/pype/plugins/nuke/load/load_luts_ip.py @@ -276,7 +276,10 @@ class LoadLutsInputProcess(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - self.log.error("Please create Viewer node before you run this action again") + msg = "Please create Viewer node before you " + "run this action again" + self.log.error(msg) + nuke.message(msg) return None # get coordinates of Viewer1 diff --git a/pype/plugins/nuke/load/load_matchmove.py b/pype/plugins/nuke/load/load_matchmove.py index 6a674368fb..60d5dc026f 100644 --- a/pype/plugins/nuke/load/load_matchmove.py +++ b/pype/plugins/nuke/load/load_matchmove.py @@ -1,4 +1,5 @@ from avalon import api +import nuke class MatchmoveLoader(api.Loader): @@ -19,6 +20,8 @@ class MatchmoveLoader(api.Loader): exec(open(self.fname).read()) else: - self.log.error("Unsupported script type") + msg = "Unsupported script type" + self.log.error(msg) + nuke.message(msg) return True diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py index c63c289947..169ea1ecb5 100644 --- a/pype/plugins/nuke/publish/validate_rendered_frames.py +++ b/pype/plugins/nuke/publish/validate_rendered_frames.py @@ -41,7 +41,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): if not repre.get('files'): msg = ("no frames were collected, " "you need to render them") - self.log.warning(msg) + self.log.error(msg) raise ValidationException(msg) collections, remainder = clique.assemble(repre["files"]) @@ -75,7 +75,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): self.log.info( 'len(collection.indexes): {}'.format(collected_frames_len) ) - + if "slate" in instance.data["families"]: collected_frames_len -= 1 From ad6d5a1d55389a277e9b4fd45c761d372c2a8438 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Tue, 28 Jan 2020 23:50:52 +0100 Subject: [PATCH 045/434] fix zerotransform Pivots --- pype/plugins/maya/load/load_reference.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index 376fcc2c01..e5b0c0e238 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -40,14 +40,11 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): namespace = cmds.referenceQuery(nodes[0], namespace=True) shapes = cmds.ls(nodes, shapes=True, long=True) - print(shapes) newNodes = (list(set(nodes) - set(shapes))) - print(newNodes) groupNode = pm.PyNode(groupName) roots = set() - print(nodes) for node in newNodes: try: @@ -57,7 +54,7 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): for root in roots: root.setParent(world=True) - groupNode.root().zeroTransformPivots() + groupNode.zeroTransformPivots() for root in roots: root.setParent(groupNode) From 837807d5357406dcce1e42a2dadbd50e2e609173 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 29 Jan 2020 00:50:58 +0100 Subject: [PATCH 046/434] syntax fixes --- pype/nuke/lib.py | 13 +++++++------ pype/plugins/nuke/create/create_gizmo.py | 8 ++++---- pype/plugins/nuke/create/create_write.py | 8 ++++---- .../plugins/nuke/publish/extract_review_data_lut.py | 2 +- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 9282443fcf..db1a5919c3 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -180,9 +180,10 @@ def format_anatomy(data): try: padding = int(anatomy.templates['render']['padding']) except KeyError as e: - msg = "`padding` key is not in `render` " + msg = ("`padding` key is not in `render` " "Anatomy template. Please, add it there and restart " - "the pipeline (padding: \"4\"): `{}`".format(e) + "the pipeline (padding: \"4\"): `{}`").format(e) + log.error(msg) nuke.message(msg) @@ -770,11 +771,11 @@ class WorkfileSettings(object): } if any(x for x in data.values() if x is None): - msg = "Missing set shot attributes in DB." + msg = ("Missing set shot attributes in DB." "\nContact your supervisor!." "\n\nWidth: `{width}`" "\nHeight: `{height}`" - "\nPixel Asspect: `{pixel_aspect}`".format(**data) + "\nPixel Asspect: `{pixel_aspect}`").format(**data) log.error(msg) nuke.message(msg) @@ -793,8 +794,8 @@ class WorkfileSettings(object): ) except Exception as e: bbox = None - msg = "{}: {} \nFormat:Crop need to be set with dots, example: " - "0.0.1920.1080, /nSetting to default".format(__name__, e) + msg = ("{}:{} \nFormat:Crop need to be set with dots, example: " + "0.0.1920.1080, /nSetting to default").format(__name__, e) log.error(msg) nuke.message(msg) diff --git a/pype/plugins/nuke/create/create_gizmo.py b/pype/plugins/nuke/create/create_gizmo.py index 93fbbcf144..ca199b8800 100644 --- a/pype/plugins/nuke/create/create_gizmo.py +++ b/pype/plugins/nuke/create/create_gizmo.py @@ -36,8 +36,8 @@ class CreateGizmo(Creator): node["tile_color"].setValue(int(self.node_color, 16)) return anlib.imprint(node, self.data) else: - msg = "Please select a group node " - "you wish to publish as the gizmo" + msg = ("Please select a group node " + "you wish to publish as the gizmo") self.log.error(msg) nuke.message(msg) @@ -60,8 +60,8 @@ class CreateGizmo(Creator): return anlib.imprint(gizmo_node, self.data) else: - msg = "Please select nodes you " - "wish to add to the gizmo" + msg = ("Please select nodes you " + "wish to add to the gizmo") self.log.error(msg) nuke.message(msg) return diff --git a/pype/plugins/nuke/create/create_write.py b/pype/plugins/nuke/create/create_write.py index c5c7d659e3..74e450f267 100644 --- a/pype/plugins/nuke/create/create_write.py +++ b/pype/plugins/nuke/create/create_write.py @@ -42,8 +42,8 @@ class CreateWriteRender(plugin.PypeCreator): nodes = self.nodes if not (len(nodes) < 2): - msg = "Select only one node. The node you want to connect to, " - "or tick off `Use selection`" + msg = ("Select only one node. The node you want to connect to, " + "or tick off `Use selection`") log.error(msg) nuke.message(msg) @@ -137,8 +137,8 @@ class CreateWritePrerender(plugin.PypeCreator): nodes = self.nodes if not (len(nodes) < 2): - msg = "Select only one node. The node you want to connect to, " - "or tick off `Use selection`" + msg = ("Select only one node. The node you want to connect to, " + "or tick off `Use selection`") self.log.error(msg) nuke.message(msg) diff --git a/pype/plugins/nuke/publish/extract_review_data_lut.py b/pype/plugins/nuke/publish/extract_review_data_lut.py index 4373309363..90b1fda1ec 100644 --- a/pype/plugins/nuke/publish/extract_review_data_lut.py +++ b/pype/plugins/nuke/publish/extract_review_data_lut.py @@ -41,7 +41,7 @@ class ExtractReviewDataLut(pype.api.Extractor): with anlib.maintained_selection(): exporter = pnlib.ExporterReviewLut( self, instance - ) + ) data = exporter.generate_lut() # assign to representations From b2b6a0e79013eb8de5875a35c69cdb8c20db0b12 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 29 Jan 2020 01:22:49 +0100 Subject: [PATCH 047/434] remove capture_gui dependency --- .../plugins/maya/publish/extract_quicktime.py | 59 +++++++++- .../plugins/maya/publish/extract_thumbnail.py | 104 +++++++++--------- 2 files changed, 107 insertions(+), 56 deletions(-) diff --git a/pype/plugins/maya/publish/extract_quicktime.py b/pype/plugins/maya/publish/extract_quicktime.py index 1031955260..94b5a716a2 100644 --- a/pype/plugins/maya/publish/extract_quicktime.py +++ b/pype/plugins/maya/publish/extract_quicktime.py @@ -1,16 +1,14 @@ import os +import glob import contextlib -import capture_gui import clique +import capture # import pype.maya.lib as lib import pype.api # from maya import cmds, mel import pymel.core as pm -# import ffmpeg -# # from pype.scripts import otio_burnin -# reload(ffmpeg) # TODO: move codec settings to presets @@ -93,7 +91,18 @@ class ExtractQuicktime(pype.api.Extractor): pm.currentTime(refreshFrameInt, edit=True) with maintained_time(): - playblast = capture_gui.lib.capture_scene(preset) + filename = preset.get("filename", "%TEMP%") + + # Force viewer to False in call to capture because we have our own + # viewer opening call to allow a signal to trigger between playblast + # and viewer + preset['viewer'] = False + + # Remove panel key since it's internal value to capture_gui + preset.pop("panel", None) + + path = capture.capture(**preset) + playblast = self._fix_playblast_output_path(path) self.log.info("file list {}".format(playblast)) @@ -119,6 +128,46 @@ class ExtractQuicktime(pype.api.Extractor): } instance.data["representations"].append(representation) + def _fix_playblast_output_path(self, filepath): + """Workaround a bug in maya.cmds.playblast to return correct filepath. + + When the `viewer` argument is set to False and maya.cmds.playblast + does not automatically open the playblasted file the returned + filepath does not have the file's extension added correctly. + + To workaround this we just glob.glob() for any file extensions and + assume the latest modified file is the correct file and return it. + + """ + # Catch cancelled playblast + if filepath is None: + self.log.warning("Playblast did not result in output path. " + "Playblast is probably interrupted.") + return None + + # Fix: playblast not returning correct filename (with extension) + # Lets assume the most recently modified file is the correct one. + if not os.path.exists(filepath): + directory = os.path.dirname(filepath) + filename = os.path.basename(filepath) + # check if the filepath is has frame based filename + # example : capture.####.png + parts = filename.split(".") + if len(parts) == 3: + query = os.path.join(directory, "{}.*.{}".format(parts[0], + parts[-1])) + files = glob.glob(query) + else: + files = glob.glob("{}.*".format(filepath)) + + if not files: + raise RuntimeError("Couldn't find playblast from: " + "{0}".format(filepath)) + filepath = max(files, key=os.path.getmtime) + + return filepath + + @contextlib.contextmanager def maintained_time(): diff --git a/pype/plugins/maya/publish/extract_thumbnail.py b/pype/plugins/maya/publish/extract_thumbnail.py index dc8044cf19..8377af1ac0 100644 --- a/pype/plugins/maya/publish/extract_thumbnail.py +++ b/pype/plugins/maya/publish/extract_thumbnail.py @@ -1,31 +1,14 @@ import os import contextlib -import time -import sys +import glob -import capture_gui -import clique +import capture import pype.maya.lib as lib import pype.api from maya import cmds import pymel.core as pm -# import ffmpeg -# reload(ffmpeg) - -import avalon.maya - -# import maya_utils as mu - -# from tweakHUD import master -# from tweakHUD import draft_hud as dHUD -# from tweakHUD import ftrackStrings as fStrings - -# -# def soundOffsetFunc(oSF, SF, H): -# tmOff = (oSF - H) - SF -# return tmOff class ExtractThumbnail(pype.api.Extractor): @@ -47,39 +30,8 @@ class ExtractThumbnail(pype.api.Extractor): end = cmds.currentTime(query=True) self.log.info("start: {}, end: {}".format(start, end)) - members = instance.data['setMembers'] camera = instance.data['review_camera'] - # project_code = ftrack_data['Project']['code'] - # task_type = ftrack_data['Task']['type'] - # - # # load Preset - # studio_repos = os.path.abspath(os.environ.get('studio_repos')) - # shot_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # (project_code + '_' + task_type + '_' + asset + '.json')) - # - # task_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # (project_code + '_' + task_type + '.json')) - # - # project_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # (project_code + '.json')) - # - # default_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # 'default.json') - # - # if os.path.isfile(shot_preset_path): - # preset_to_use = shot_preset_path - # elif os.path.isfile(task_preset_path): - # preset_to_use = task_preset_path - # elif os.path.isfile(project_preset_path): - # preset_to_use = project_preset_path - # else: - # preset_to_use = default_preset_path - capture_preset = "" capture_preset = instance.context.data['presets']['maya']['capture'] try: @@ -126,7 +78,18 @@ class ExtractThumbnail(pype.api.Extractor): pm.currentTime(refreshFrameInt, edit=True) with maintained_time(): - playblast = capture_gui.lib.capture_scene(preset) + filename = preset.get("filename", "%TEMP%") + + # Force viewer to False in call to capture because we have our own + # viewer opening call to allow a signal to trigger between + # playblast and viewer + preset['viewer'] = False + + # Remove panel key since it's internal value to capture_gui + preset.pop("panel", None) + + path = capture.capture(**preset) + playblast = self._fix_playblast_output_path(path) _, thumbnail = os.path.split(playblast) @@ -144,6 +107,45 @@ class ExtractThumbnail(pype.api.Extractor): } instance.data["representations"].append(representation) + def _fix_playblast_output_path(self, filepath): + """Workaround a bug in maya.cmds.playblast to return correct filepath. + + When the `viewer` argument is set to False and maya.cmds.playblast + does not automatically open the playblasted file the returned + filepath does not have the file's extension added correctly. + + To workaround this we just glob.glob() for any file extensions and + assume the latest modified file is the correct file and return it. + + """ + # Catch cancelled playblast + if filepath is None: + self.log.warning("Playblast did not result in output path. " + "Playblast is probably interrupted.") + return None + + # Fix: playblast not returning correct filename (with extension) + # Lets assume the most recently modified file is the correct one. + if not os.path.exists(filepath): + directory = os.path.dirname(filepath) + filename = os.path.basename(filepath) + # check if the filepath is has frame based filename + # example : capture.####.png + parts = filename.split(".") + if len(parts) == 3: + query = os.path.join(directory, "{}.*.{}".format(parts[0], + parts[-1])) + files = glob.glob(query) + else: + files = glob.glob("{}.*".format(filepath)) + + if not files: + raise RuntimeError("Couldn't find playblast from: " + "{0}".format(filepath)) + filepath = max(files, key=os.path.getmtime) + + return filepath + @contextlib.contextmanager def maintained_time(): From 3e4fa756568037848c6ad78d24ba4fd71d1c979b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 29 Jan 2020 13:46:42 +0100 Subject: [PATCH 048/434] fix(global): integrate new was mixing padding number src to dst --- pype/plugins/global/publish/integrate_new.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index e577c477c3..1be712c14a 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -339,10 +339,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): for i in src_collection.indexes: src_padding = src_padding_exp % i - # for adding first frame into db - if not dst_start_frame: - dst_start_frame = src_padding - src_file_name = "{0}{1}{2}".format( src_head, src_padding, src_tail) @@ -364,6 +360,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("source: {}".format(src)) instance.data["transfers"].append([src, dst]) + # for adding first frame into db + if not dst_start_frame: + dst_start_frame = dst_padding + + dst = "{0}{1}{2}".format( dst_head, dst_start_frame, From 153dcba79cf8a0aba9869e84a0a61c3a9be255f8 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 29 Jan 2020 13:53:36 +0100 Subject: [PATCH 049/434] feat(scripts): otio_burnin is able to render image sequence --- pype/scripts/otio_burnin.py | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index d5bc2594a4..f128352974 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -5,6 +5,7 @@ import json import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins from pypeapp.lib import config from pype import api as pype +from subprocess import Popen, PIPE # FFmpeg in PATH is required @@ -21,6 +22,7 @@ else: FFMPEG = ( '{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s' ).format(os.path.normpath(ffmpeg_path + "ffmpeg")) + FFPROBE = ( '{} -v quiet -print_format json -show_format -show_streams %(source)s' ).format(os.path.normpath(ffmpeg_path + "ffprobe")) @@ -248,6 +250,33 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): 'filters': filters }).strip() + def render(self, output, args=None, overwrite=False, **kwargs): + """ + Render the media to a specified destination. + + :param str output: output file + :param str args: additional FFMPEG arguments + :param bool overwrite: overwrite the output if it exists + """ + if not overwrite and os.path.exists(output): + raise RuntimeError("Destination '%s' exists, please " + "use overwrite" % output) + + is_sequence = "%" in output + + command = self.command(output=output, + args=args, + overwrite=overwrite) + proc = Popen(command, shell=True) + proc.communicate() + if proc.returncode != 0: + raise RuntimeError("Failed to render '%s': %s'" + % (output, command)) + if is_sequence: + output = output % kwargs.get("duration") + if not os.path.exists(output): + raise RuntimeError("Failed to generate this fucking file '%s'" % output) + def example(input_path, output_path): options_init = { @@ -349,7 +378,7 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) frame_start = data.get("frame_start") frame_start_tc = data.get('frame_start_tc', frame_start) - + stream = burnin._streams[0] if "resolution_width" not in data: data["resolution_width"] = stream.get("width", "Unknown") @@ -436,7 +465,7 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) if codec_data is not []: codec_args = " ".join(codec_data) - burnin.render(output_path, args=codec_args, overwrite=overwrite) + burnin.render(output_path, args=codec_args, overwrite=overwrite, **data) if __name__ == '__main__': From 862faa8325446ba8d734832be8b0deee1d236624 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 29 Jan 2020 13:54:27 +0100 Subject: [PATCH 050/434] feat(global): burnin extracting into image sequence --- pype/plugins/global/publish/extract_burnin.py | 24 +++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 4988f0d042..26f6d34e91 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -156,15 +156,35 @@ class ExtractBurnin(pype.api.Extractor): self.log.debug("Output: {}".format(output)) repre_update = { + "anatomy_template": "render", "files": movieFileBurnin, "name": repre["name"], "tags": [x for x in repre["tags"] if x != "delete"] } + + if is_sequence: + burnin_seq_files = list() + for frame_index in range(_prep_data["duration"] + 1): + if frame_index == 0: + continue + burnin_seq_files.append(movieFileBurnin % frame_index) + repre_update.update({ + "files": burnin_seq_files + }) + instance.data["representations"][i].update(repre_update) # removing the source mov file - os.remove(full_movie_path) - self.log.debug("Removed: `{}`".format(full_movie_path)) + if is_sequence: + for frame_index in range(_prep_data["duration"] + 1): + if frame_index == 0: + continue + rm_file = full_movie_path % frame_index + os.remove(rm_file) + self.log.debug("Removed: `{}`".format(rm_file)) + else: + os.remove(full_movie_path) + self.log.debug("Removed: `{}`".format(full_movie_path)) # Remove any representations tagged for deletion. for repre in instance.data["representations"]: From 17483ed05fab449bf9ce92eae2750d897f87af5b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 29 Jan 2020 17:20:01 +0100 Subject: [PATCH 051/434] fix(nuke): removing subsetgroups from publish plugins --- pype/plugins/nuke/publish/collect_workfile.py | 3 +-- pype/plugins/nuke/publish/collect_writes.py | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/pype/plugins/nuke/publish/collect_workfile.py b/pype/plugins/nuke/publish/collect_workfile.py index 4fff9f46ed..9c01a3ec97 100644 --- a/pype/plugins/nuke/publish/collect_workfile.py +++ b/pype/plugins/nuke/publish/collect_workfile.py @@ -72,8 +72,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): "publish": root.knob('publish').value(), "family": family, "families": [family], - "representations": list(), - "subsetGroup": "workfiles" + "representations": list() }) # adding basic script data diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 37c86978b6..3eff527d47 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -127,8 +127,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "families": families, "colorspace": node["colorspace"].value(), "deadlineChunkSize": deadlineChunkSize, - "deadlinePriority": deadlinePriority, - "subsetGroup": "renders" + "deadlinePriority": deadlinePriority }) self.log.debug("instance.data: {}".format(instance.data)) From d9c59dced9926def2c238b09a71175e0f4a1a8e7 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 29 Jan 2020 17:24:04 +0100 Subject: [PATCH 052/434] feat(nk): adding png to loader plugin sequence --- pype/plugins/nuke/load/load_sequence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 8f01d4511b..76599c3351 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -73,7 +73,7 @@ class LoadSequence(api.Loader): """Load image sequence into Nuke""" families = ["write", "source", "plate", "render"] - representations = ["exr", "dpx", "jpg", "jpeg"] + representations = ["exr", "dpx", "jpg", "jpeg", "png"] label = "Load sequence" order = -10 From fc28acb88fead4178480e8873f645c35415db114 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 29 Jan 2020 18:14:25 +0100 Subject: [PATCH 053/434] updated event_user_assignment and extract_burnin where format_all is used --- pype/ftrack/events/event_user_assigment.py | 7 ++++++- pype/plugins/global/publish/extract_burnin.py | 11 ++++++++--- 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/pype/ftrack/events/event_user_assigment.py b/pype/ftrack/events/event_user_assigment.py index 87994d34b2..efdfb7665d 100644 --- a/pype/ftrack/events/event_user_assigment.py +++ b/pype/ftrack/events/event_user_assigment.py @@ -207,7 +207,12 @@ class UserAssigmentEvent(BaseEvent): # formatting work dir is easiest part as we can use whole path work_dir = anatomy.format(data)['avalon']['work'] # we also need publish but not whole - publish = anatomy.format_all(data)['partial']['avalon']['publish'] + filled_all = anatomy.format_all(data) + if "partial" not in filled_all: + publish = filled_all['avalon']['publish'] + else: + # Backwards compatibility + publish = filled_all["partial"]['avalon']['publish'] # now find path to {asset} m = re.search("(^.+?{})".format(data['asset']), publish) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 8f5a4aa000..a3df47518c 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -95,9 +95,14 @@ class ExtractBurnin(pype.api.Extractor): # create copy of prep_data for anatomy formatting _prep_data = copy.deepcopy(prep_data) _prep_data["representation"] = repre["name"] - _prep_data["anatomy"] = ( - anatomy.format_all(_prep_data).get("solved") or {} - ) + filled_anatomy = anatomy.format_all(_prep_data) + if hasattr(filled_anatomy, "get_solved"): + _filled_anatomy = filled_anatomy.get_solved() + else: + # Backwards compatibility + _filled_anatomy = filled_anatomy.get("solved") + _prep_data["anatomy"] = _filled_anatomy or {} + burnin_data = { "input": full_movie_path.replace("\\", "/"), "codec": repre.get("codec", []), From 3aaa524f79c340b7ec47734fd26e4779b3b5a2c8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 29 Jan 2020 18:14:56 +0100 Subject: [PATCH 054/434] updated action_delivery, this change is NOT backwards compatible --- pype/ftrack/actions/action_delivery.py | 44 ++++++++++---------------- 1 file changed, 17 insertions(+), 27 deletions(-) diff --git a/pype/ftrack/actions/action_delivery.py b/pype/ftrack/actions/action_delivery.py index afd20d12d1..29fdfe39ae 100644 --- a/pype/ftrack/actions/action_delivery.py +++ b/pype/ftrack/actions/action_delivery.py @@ -312,42 +312,32 @@ class Delivery(BaseAction): anatomy_data = copy.deepcopy(repre["context"]) anatomy_data["root"] = location_path - anatomy_filled = anatomy.format(anatomy_data) - test_path = ( - anatomy_filled - .get("delivery", {}) - .get(anatomy_name) - ) + anatomy_filled = anatomy.format_all(anatomy_data) + test_path = anatomy_filled["delivery"][anatomy_name] - if not test_path: + if not test_path.solved: msg = ( "Missing keys in Representation's context" " for anatomy template \"{}\"." ).format(anatomy_name) - all_anatomies = anatomy.format_all(anatomy_data) - result = None - for anatomies in all_anatomies.values(): - for key, temp in anatomies.get("delivery", {}).items(): - if key != anatomy_name: - continue + if test_path.missing_keys: + keys = ", ".join(test_path.missing_keys) + sub_msg = ( + "Representation: {}
- Missing keys: \"{}\"
" + ).format(str(repre["_id"]), keys) - result = temp - break + if test_path.invalid_types: + items = [] + for key, value in test_path.invalid_types.items(): + items.append("\"{}\" {}".format(key, str(value))) - # TODO log error! - missing keys in anatomy - if result: - missing_keys = [ - key[1] for key in string.Formatter().parse(result) - if key[1] is not None - ] - else: - missing_keys = ["unknown"] + keys = ", ".join(items) + sub_msg = ( + "Representation: {}
" + "- Invalid value DataType: \"{}\"
" + ).format(str(repre["_id"]), keys) - keys = ", ".join(missing_keys) - sub_msg = ( - "Representation: {}
- Missing keys: \"{}\"
" - ).format(str(repre["_id"]), keys) self.report_items[msg].append(sub_msg) self.log.warning( "{} Representation: \"{}\" Filled: <{}>".format( From 2d1bd6227fe1e1a00529dee4100d973addc7ae75 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 29 Jan 2020 18:34:29 +0100 Subject: [PATCH 055/434] fix(nks): handles was taken from wrong attribute --- pype/plugins/nukestudio/publish/collect_clips.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 0729f20957..3759d50f6a 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -106,8 +106,8 @@ class CollectClips(api.ContextPlugin): "family": "clip", "families": [], "handles": 0, - "handleStart": projectdata.get("handles", 0), - "handleEnd": projectdata.get("handles", 0), + "handleStart": projectdata.get("handleStart", 0), + "handleEnd": projectdata.get("handleEnd", 0), "version": int(version)}) instance = context.create_instance(**data) From 8f2f88aeae9f4a91ef77b3edfff4e3731a1d4c03 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 30 Jan 2020 11:27:09 +0100 Subject: [PATCH 056/434] user server is not laucnhed with stdout override to be able get output --- pype/ftrack/ftrack_server/socket_thread.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/pype/ftrack/ftrack_server/socket_thread.py b/pype/ftrack/ftrack_server/socket_thread.py index 1bf9d69ad7..8e217870ba 100644 --- a/pype/ftrack/ftrack_server/socket_thread.py +++ b/pype/ftrack/ftrack_server/socket_thread.py @@ -53,8 +53,7 @@ class SocketThread(threading.Thread): ) self.subproc = subprocess.Popen( - [sys.executable, self.filepath, "-port", str(self.port)], - stdout=subprocess.PIPE + [sys.executable, self.filepath, "-port", str(self.port)] ) # Listen for incoming connections @@ -116,11 +115,6 @@ class SocketThread(threading.Thread): if self.subproc.poll() is None: self.subproc.terminate() - lines = self.subproc.stdout.readlines() - if lines: - print("*** Socked Thread stdout ***") - for line in lines: - os.write(1, line) self.finished = True def get_data_from_con(self, connection): From 2f992a6ea4b1f97e6bc87f40f90265753503577f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 30 Jan 2020 11:28:30 +0100 Subject: [PATCH 057/434] sub_user_server print out exception on crash --- pype/ftrack/ftrack_server/sub_user_server.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/ftrack/ftrack_server/sub_user_server.py b/pype/ftrack/ftrack_server/sub_user_server.py index 8b2a9277cf..f0d39447a8 100644 --- a/pype/ftrack/ftrack_server/sub_user_server.py +++ b/pype/ftrack/ftrack_server/sub_user_server.py @@ -2,6 +2,8 @@ import sys import signal import socket +import traceback + from ftrack_server import FtrackServer from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub @@ -31,6 +33,8 @@ def main(args): server = FtrackServer("action") log.debug("Launched User Ftrack Server") server.run_server(session=session) + except Exception: + traceback.print_exception(*sys.exc_info()) finally: log.debug("Closing socket") From cc35ed7ea9b606f7ded44d1d722f1d7e32853621 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 30 Jan 2020 11:29:17 +0100 Subject: [PATCH 058/434] thumbid key is ignored in event_sync_to_avalon --- pype/ftrack/events/event_sync_to_avalon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 23284a2ae6..f74abaf8cb 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -31,7 +31,7 @@ class SyncToAvalonEvent(BaseEvent): "timelog", "auth_userrole", "appointment" ] ignore_ent_types = ["Milestone"] - ignore_keys = ["statusid"] + ignore_keys = ["statusid", "thumbid"] project_query = ( "select full_name, name, custom_attributes" From 8eb14bade235f4c063f533410bf34069294d1130 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 30 Jan 2020 11:37:15 +0100 Subject: [PATCH 059/434] added warning message for cases when entityId is set to list (happened in client) --- pype/ftrack/events/event_sync_to_avalon.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index f74abaf8cb..67e0bee9d7 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -486,6 +486,14 @@ class SyncToAvalonEvent(BaseEvent): action = ent_info["action"] ftrack_id = ent_info["entityId"] + if isinstance(ftrack_id, list): + self.log.warning(( + "BUG REPORT: Entity info has `entityId` as `list` \"{}\"" + ).format(ent_info)) + if len(ftrack_id) == 0: + continue + ftrack_id = ftrack_id[0] + if action == "move": ent_keys = ent_info["keys"] # Seprate update info from move action From b715990e1e081741408358f572055bfa4e8a10a5 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 30 Jan 2020 14:10:36 +0100 Subject: [PATCH 060/434] fix(nuke): created too many backdrops --- pype/nuke/lib.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index db1a5919c3..01b63392cd 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -1103,9 +1103,9 @@ class BuildWorkfile(WorkfileSettings): # move position self.position_right() - bdn = self.create_backdrop(label="Loaded Reads", - color='0x2d7702ff', layer=-1, - nodes=nodes_backdrop) + self.create_backdrop(label="Loaded Reads", + color='0x2d7702ff', layer=-1, + nodes=nodes_backdrop) def read_loader(self, representation): """ From a46773450f9a618ad757b233d1ab48fe107a39f2 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 30 Jan 2020 14:11:00 +0100 Subject: [PATCH 061/434] feat(nuke): added support for `png` --- pype/nuke/lib.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 01b63392cd..64ac83ba81 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -1013,7 +1013,8 @@ class BuildWorkfile(WorkfileSettings): def process(self, regex_filter=None, version=None, - representations=["exr", "dpx", "lutJson", "mov", "preview"]): + representations=["exr", "dpx", "lutJson", "mov", + "preview", "png"]): """ A short description. From 34515cf14965b96a8658d90e3316f474de769bab Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 30 Jan 2020 14:11:36 +0100 Subject: [PATCH 062/434] feat(nuke): feat(nuke): reads mov are now in colorspace presets --- pype/plugins/nuke/load/load_mov.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py index e598839405..d4a3a7f6b9 100644 --- a/pype/plugins/nuke/load/load_mov.py +++ b/pype/plugins/nuke/load/load_mov.py @@ -1,8 +1,9 @@ +import re +import nuke import contextlib from avalon import api, io - -import nuke +from pype.nuke import presets from pype.api import Logger log = Logger().get_logger(__name__, "nuke") @@ -24,7 +25,7 @@ def preserve_trim(node): offset_frame = None if node['frame_mode'].value() == "start at": start_at_frame = node['frame'].value() - if node['frame_mode'].value() is "offset": + if node['frame_mode'].value() == "offset": offset_frame = node['frame'].value() try: @@ -122,7 +123,6 @@ class LoadMov(api.Loader): repr_cont["subset"], repr_cont["representation"]) - # Create the Loader with the filename path set with viewer_update_and_undo_stop(): # TODO: it might be universal read to img/geo/camera @@ -139,7 +139,20 @@ class LoadMov(api.Loader): read_node["last"].setValue(last) read_node["frame_mode"].setValue("start at") read_node["frame"].setValue(str(offset_frame)) - # add additional metadata from the version to imprint to Avalon knob + + # load nuke presets for Read's colorspace + read_clrs_presets = presets.get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + if preset_clrsp is not None: + read_node["colorspace"].setValue(str(preset_clrsp)) + + # add additional metadata from the version to imprint Avalon knob add_keys = [ "frameStart", "frameEnd", "handles", "source", "author", "fps", "version", "handleStart", "handleEnd" @@ -147,7 +160,7 @@ class LoadMov(api.Loader): data_imprint = {} for key in add_keys: - if key is 'version': + if key == 'version': data_imprint.update({ key: context["version"]['name'] }) From a35969f0df62542bfd9f2b870c9566335254d913 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 30 Jan 2020 14:11:56 +0100 Subject: [PATCH 063/434] feat(nuke): reads sequences are now in colorspace presets --- pype/plugins/nuke/load/load_sequence.py | 26 +++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 8f01d4511b..5d853d10d3 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -1,10 +1,12 @@ +import re +import nuke import contextlib from avalon import api, io - -import nuke +from pype.nuke import presets from pype.api import Logger + log = Logger().get_logger(__name__, "nuke") @@ -24,7 +26,7 @@ def preserve_trim(node): offset_frame = None if node['frame_mode'].value() == "start at": start_at_frame = node['frame'].value() - if node['frame_mode'].value() is "offset": + if node['frame_mode'].value() == "offset": offset_frame = node['frame'].value() try: @@ -134,20 +136,32 @@ class LoadSequence(api.Loader): if colorspace is not None: r["colorspace"].setValue(str(colorspace)) + # load nuke presets for Read's colorspace + read_clrs_presets = presets.get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + if preset_clrsp is not None: + r["colorspace"].setValue(str(preset_clrsp)) + loader_shift(r, first, relative=True) r["origfirst"].setValue(int(first)) r["first"].setValue(int(first)) r["origlast"].setValue(int(last)) r["last"].setValue(int(last)) - # add additional metadata from the version to imprint to Avalon knob + # add additional metadata from the version to imprint Avalon knob add_keys = ["frameStart", "frameEnd", "source", "colorspace", "author", "fps", "version", "handleStart", "handleEnd"] data_imprint = {} for k in add_keys: - if k is 'version': + if k == 'version': data_imprint.update({k: context["version"]['name']}) else: data_imprint.update( @@ -179,7 +193,7 @@ class LoadSequence(api.Loader): rtn["after"].setValue("continue") rtn["input.first_lock"].setValue(True) rtn["input.first"].setValue( - self.handle_start + self.first_frame + self.handle_start + self.first_frame ) if time_warp_nodes != []: From be7dbb115172357fdfcdef6587429eff8a948bff Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 30 Jan 2020 18:18:01 +0100 Subject: [PATCH 064/434] fix(nuke): printing objects and docstring --- pype/nuke/lib.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 64ac83ba81..fdbd578a76 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -1055,9 +1055,10 @@ class BuildWorkfile(WorkfileSettings): wn["render"].setValue(True) vn.setInput(0, wn) - bdn = self.create_backdrop(label="Render write \n\n\n\nOUTPUT", - color='0xcc1102ff', layer=-1, - nodes=[wn]) + # adding backdrop under write + self.create_backdrop(label="Render write \n\n\n\nOUTPUT", + color='0xcc1102ff', layer=-1, + nodes=[wn]) # move position self.position_up(4) @@ -1071,10 +1072,12 @@ class BuildWorkfile(WorkfileSettings): version=version, representations=representations) - log.info("__ subsets: `{}`".format(subsets)) + for name, subset in subsets.items(): + log.debug("___________________") + log.debug(name) + log.debug(subset["version"]) nodes_backdrop = list() - for name, subset in subsets.items(): if "lut" in name: continue @@ -1104,6 +1107,7 @@ class BuildWorkfile(WorkfileSettings): # move position self.position_right() + # adding backdrop under all read nodes self.create_backdrop(label="Loaded Reads", color='0x2d7702ff', layer=-1, nodes=nodes_backdrop) From 0f6c79967a0fb8a3fd7fa4d9b3fd08b663fc7b11 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 30 Jan 2020 18:48:58 +0100 Subject: [PATCH 065/434] added warning messages for cases when entity does not have custom attributes --- pype/ftrack/events/event_sync_to_avalon.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 67e0bee9d7..8d25b5b801 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -1828,6 +1828,13 @@ class SyncToAvalonEvent(BaseEvent): obj_type_id = ent_info["objectTypeId"] ent_cust_attrs = cust_attrs_by_obj_id.get(obj_type_id) + if ent_cust_attrs is None: + self.log.warning(( + "BUG REPORT: Entity has ent type without" + " custom attributes <{}> \"{}\"" + ).format(entType, ent_info)) + continue + for key, values in ent_info["changes"].items(): if key in hier_attrs_keys: self.hier_cust_attrs_changes[key].append(ftrack_id) From a51604bf6f98f39119a9bb9b40f73cea2162896d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 30 Jan 2020 19:19:52 +0100 Subject: [PATCH 066/434] store asset version objects to instance data after ftrack integration --- pype/plugins/ftrack/publish/integrate_ftrack_api.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_api.py b/pype/plugins/ftrack/publish/integrate_ftrack_api.py index adb22aabba..9dd803aafd 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_api.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_api.py @@ -77,6 +77,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): info_msg = "Created new {entity_type} with data: {data}" info_msg += ", metadata: {metadata}." + used_asset_versions = [] # Iterate over components and publish for data in instance.data.get("ftrackComponentsList", []): @@ -386,3 +387,14 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): tp, value, tb = sys.exc_info() session.rollback() six.reraise(tp, value, tb) + + if assetversion_entity not in used_asset_versions: + used_asset_versions.append(assettype_entity) + + asset_versions_key = "ftrackIntegratedAssetVersions" + if asset_versions_key not in instance.context.data: + instance.context.data[asset_versions_key] = [] + + for asset_version in used_asset_versions: + if asset_version not in instance.context.data[asset_versions_key]: + instance.context.data[asset_versions_key].append(asset_version) From a3922a3b8193b4e9654e9f2fed074bc89738c9b4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 30 Jan 2020 19:27:51 +0100 Subject: [PATCH 067/434] added integrate ftrack note plugin --- .../ftrack/publish/integrate_ftrack_note.py | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 pype/plugins/ftrack/publish/integrate_ftrack_note.py diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py new file mode 100644 index 0000000000..e24c839be2 --- /dev/null +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -0,0 +1,35 @@ +import sys +import pyblish.api +import six + + +class IntegrateFtrackNote(pyblish.api.InstancePlugin): + """Create comments in Ftrack.""" + + order = pyblish.api.IntegratorOrder + label = "Integrate Comments to Ftrack." + families = ["ftrack"] + optional = True + + def process(self, instance): + comment = (instance.context.data.get("comment") or "").strip() + if not comment: + return + + asset_versions_key = "ftrackIntegratedAssetVersions" + asset_versions = instance.data.get(asset_versions_key) + if not asset_versions: + return + + session = context.data["ftrackSession"] + + note = session.create("Note", {"content": comment}) + for asset_version in asset_versions: + asset_version["notes"].extend(note) + + try: + session.commit() + except Exception: + tp, value, tb = sys.exc_info() + session.rollback() + six.reraise(tp, value, tb) From e4c09abeb1fd20e3f7b75383ee481628724abea5 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Fri, 31 Jan 2020 00:40:18 +0100 Subject: [PATCH 068/434] wip on collecting assumed render files --- pype/plugins/maya/create/create_render.py | 4 +- pype/plugins/maya/publish/collect_render.py | 226 +++++++++++++++++++- 2 files changed, 225 insertions(+), 5 deletions(-) diff --git a/pype/plugins/maya/create/create_render.py b/pype/plugins/maya/create/create_render.py index faed231ac5..668c6412eb 100644 --- a/pype/plugins/maya/create/create_render.py +++ b/pype/plugins/maya/create/create_render.py @@ -45,8 +45,8 @@ class CreateRender(avalon.maya.Creator): sets = [] for layer in layers: print(" - creating set for {}".format(layer.name())) - set = cmds.sets(n="LAYER_{}".format(layer.name())) - sets.append(set) + render_set = cmds.sets(n="LAYER_{}".format(layer.name())) + sets.append(render_set) cmds.sets(sets, forceElement=instance) def _create_render_settings(self): diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index fe2ba31c2b..3f288dc0c7 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -1,9 +1,11 @@ import re +import os +import types from maya import cmds from maya import OpenMaya as om -from pprint import pprint - +import maya.aovs as aovs +import pymel.core as pm import maya.app.renderSetup.model.renderSetup as renderSetup import pyblish.api @@ -12,6 +14,47 @@ from avalon import maya, api import pype.maya.lib as lib +R_SINGLE_FRAME = re.compile(r'^(-?)\d+$') +R_FRAME_RANGE = re.compile(r'^(?P(-?)\d+)-(?P(-?)\d+)$') +R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+') +R_LAYER_TOKEN = re.compile( + r'.*%l.*|.*.*|.*.*', re.IGNORECASE) +R_AOV_TOKEN = re.compile(r'.*%l.*|.*.*|.*.*', re.IGNORECASE) +R_SUBSTITUTE_LAYER_TOKEN = re.compile( + r'%l||', re.IGNORECASE) +R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) +R_SUBSTITUTE_SCENE_TOKEN = re.compile(r'%s|', re.IGNORECASE) + +RENDERER_NAMES = { + 'mentalray': 'MentalRay', + 'vray': 'V-Ray', + 'arnold': 'Arnold', + 'renderman': 'Renderman', + 'redshift': 'Redshift' +} + +# not sure about the renderman image prefix +ImagePrefixes = { + 'mentalray': 'defaultRenderGlobals.imageFilePrefix', + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' +} + +# Arnold AOV driver extension mapping +# Is there a better way? +aiDriverExtension = { + 'jpeg': 'jpg', + 'exr': 'exr', + 'deepexr': 'exr', + 'png': 'png', + 'tiff': 'tif', + 'mtoa_shaders': 'ass', # TODO: research what those last two should be + 'maya': '' +} + + class CollectMayaRender(pyblish.api.ContextPlugin): """Gather all publishable render layers from renderSetup""" @@ -20,6 +63,132 @@ class CollectMayaRender(pyblish.api.ContextPlugin): label = "Collect Render Layers" families = ["render"] + def _get_expected_files(self, layer): + # ______________________________________________ + # ____________________/ ____________________________________________/ + # 1 - get scene name /__________________/ + # ____________________/ + scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True)) + scene_name, _ = os.path.splitext(scene_basename) + + # ______________________________________________ + # ____________________/ ____________________________________________/ + # 2 - detect renderer /__________________/ + # ____________________/ + renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower() + if renderer.startswith('renderman'): + renderer = 'renderman' + + # ________________________________________________ + # __________________/ ______________________________________________/ + # 3 - image prefix /__________________/ + # __________________/ + try: + file_prefix = cmds.getAttr(ImagePrefixes[renderer]) + except KeyError: + raise RuntimeError("Unsupported renderer {}".format(renderer)) + + # ________________________________________________ + # __________________/ ______________________________________________/ + # 4 - get renderabe cameras_____________/ + # __________________/ + cam_parents = [cmds.listRelatives(x, ap=True)[-1] + for x in cmds.ls(cameras=True)] + + self.log.info("cameras in scene: %s" % ", ".join(cam_parents)) + + renderable_cameras = [] + for cam in cam_parents: + renderable = False + if self.maya_is_true(cmds.getAttr('{}.renderable'.format(cam))): + renderable = True + + for override in self.get_layer_overrides( + '{}.renderable'.format(cam), 'rs_{}'.format(layer)): + renderable = self.maya_is_true(override) + + if renderable: + renderable_cameras.append(cam) + + self.log.info("renderable cameras: %s" % ", ".join(renderable_cameras)) + + # ________________________________________________ + # __________________/ ______________________________________________/ + # 5 - get AOVs /_____________/ + # __________________/ + + enabled_aovs = [] + + if renderer == "arnold": + + if (cmds.getAttr('defaultArnoldRenderOptions.aovMode') and + not cmds.getAttr('defaultArnoldDriver.mergeAOVs')): + # AOVs are set to be rendered separately. We should expect + # token in path. + mergeAOVs = False + else: + mergeAOVs = True + + if not mergeAOVs: + ai_aovs = [n for n in cmds.ls(type='aiAOV')] + + for aov in ai_aovs: + enabled = self.maya_is_true( + cmds.getAttr('{}.enabled'.format(aov))) + ai_driver = cmds.listConnections( + '{}.outputs'.format(aov))[0] + ai_translator = cmds.getAttr( + '{}.aiTranslator'.format(ai_driver)) + try: + aov_ext = aiDriverExtension[ai_translator] + except KeyError: + msg = ('Unrecognized arnold ' + 'drive format for AOV - {}').format( + cmds.getAttr('{}.name'.format(aov)) + ) + self.log.error(msg) + raise RuntimeError(msg) + + for override in self.get_layer_overrides( + '{}.enabled'.format(aov), 'rs_{}'.format(layer)): + enabled = self.maya_is_true(override) + if enabled: + enabled_aovs.append((aov, aov_ext)) + + self.log.info("enabled aovs: %s" % ", ".join( + [cmds.getAttr('%s.name' % (n,)) for n in enabled_aovs])) + + elif renderer == "vray": + # todo: implement vray aovs + pass + + elif renderer == "redshift": + # todo: implement redshift aovs + pass + + elif renderer == "mentalray": + # todo: implement mentalray aovs + pass + + elif renderer == "renderman": + # todo: implement renderman aovs + pass + + mappings = ( + (R_SUBSTITUTE_SCENE_TOKEN, scene_name), + (R_SUBSTITUTE_LAYER_TOKEN, layer), + (R_SUBSTITUTE_CAMERA_TOKEN, camera), + ) + + # if we have token in prefix path we'll expect output for + # every renderable camera in layer. + + + + for regex, value in mappings: + file_prefix = re.sub(regex, value, file_prefix) + + def process(self, context): render_instance = None for instance in context: @@ -79,6 +248,32 @@ class CollectMayaRender(pyblish.api.ContextPlugin): layer_name = "rs_{}".format(expected_layer_name) + # collect all frames we are expecting to be rendered + files = cmds.renderSettings(fp=True, fin=True, lin=True, + lut=True, lyr=expected_layer_name) + + if len(files) == 1: + # if last file is not specified, maya is not set for animation + pass + else: + # get frame position and padding + + # get extension + re.search(r'\.(\w+)$', files[0]) + + # find token. If no AOVs are specified, assume + # is 'beauty' + render_passes = ['beauty'] + if pm.getAttr('defaultRenderGlobals.currentRenderer') == 'arnold': # noqa: E501 + # arnold is our renderer + for node in cmd.ls(type="aiAOV"): + render_pass = node.split('_')[1] + + + + + + # Get layer specific settings, might be overrides data = { "subset": expected_layer_name, @@ -120,7 +315,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): data[attr] = value # Include (optional) global settings - # TODO(marcus): Take into account layer overrides # Get global overrides and translate to Deadline values overrides = self.parse_options(str(render_globals)) data.update(**overrides) @@ -225,3 +419,29 @@ class CollectMayaRender(pyblish.api.ContextPlugin): def get_render_attribute(self, attr, layer): return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr), layer=layer) + + def _get_layer_overrides(self, attr, layer): + connections = cmds.listConnections(attr, plugs=True) + if connections: + for connection in connections: + if connection: + node_name = connection.split('.')[0] + if cmds.nodeType(node_name) == 'renderLayer': + attr_name = '%s.value' % '.'.join( + connection.split('.')[:-1]) + if node_name == layer: + yield cmds.getAttr(attr_name) + + def _maya_is_true(self, attr_val): + """ + Whether a Maya attr evaluates to True. + When querying an attribute value from an ambiguous object the + Maya API will return a list of values, which need to be properly + handled to evaluate properly. + """ + if isinstance(attr_val, types.BooleanType): + return attr_val + elif isinstance(attr_val, (types.ListType, types.GeneratorType)): + return any(attr_val) + else: + return bool(attr_val) From 8f8bfeb14937697b03cb632dc1015f7fac4a2f74 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 31 Jan 2020 11:33:44 +0100 Subject: [PATCH 069/434] remove maya deprecated loaders --- pype/plugins/maya/load/load_reference.py | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index e5b0c0e238..b2544222c0 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -91,19 +91,3 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): def switch(self, container, representation): self.update(container, representation) - - -# for backwards compatibility -class AbcLoader(ReferenceLoader): - label = "Deprecated loader (don't use)" - families = ["pointcache", "animation"] - representations = ["abc"] - tool_names = [] - - -# for backwards compatibility -class ModelLoader(ReferenceLoader): - label = "Deprecated loader (don't use)" - families = ["model", "pointcache"] - representations = ["abc"] - tool_names = [] From 0e807f05dac35655dd1793523f8674edd3b7a74c Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 31 Jan 2020 12:05:07 +0100 Subject: [PATCH 070/434] remove obsolete backwards compatibility --- pype/ftrack/events/event_user_assigment.py | 7 ++----- pype/plugins/global/publish/extract_burnin.py | 7 +------ 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/pype/ftrack/events/event_user_assigment.py b/pype/ftrack/events/event_user_assigment.py index efdfb7665d..eaacfd959a 100644 --- a/pype/ftrack/events/event_user_assigment.py +++ b/pype/ftrack/events/event_user_assigment.py @@ -208,11 +208,8 @@ class UserAssigmentEvent(BaseEvent): work_dir = anatomy.format(data)['avalon']['work'] # we also need publish but not whole filled_all = anatomy.format_all(data) - if "partial" not in filled_all: - publish = filled_all['avalon']['publish'] - else: - # Backwards compatibility - publish = filled_all["partial"]['avalon']['publish'] + publish = filled_all['avalon']['publish'] + # now find path to {asset} m = re.search("(^.+?{})".format(data['asset']), publish) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index a3df47518c..8a96e66d27 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -96,12 +96,7 @@ class ExtractBurnin(pype.api.Extractor): _prep_data = copy.deepcopy(prep_data) _prep_data["representation"] = repre["name"] filled_anatomy = anatomy.format_all(_prep_data) - if hasattr(filled_anatomy, "get_solved"): - _filled_anatomy = filled_anatomy.get_solved() - else: - # Backwards compatibility - _filled_anatomy = filled_anatomy.get("solved") - _prep_data["anatomy"] = _filled_anatomy or {} + _prep_data["anatomy"] = filled_anatomy.get_solved() burnin_data = { "input": full_movie_path.replace("\\", "/"), From 45c76919c0df080af5905b2a5c7288298a6135cc Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 31 Jan 2020 12:09:08 +0100 Subject: [PATCH 071/434] store asset versions to instance.data instead of context.data --- pype/plugins/ftrack/publish/integrate_ftrack_api.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_api.py b/pype/plugins/ftrack/publish/integrate_ftrack_api.py index 9dd803aafd..922c8e119f 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_api.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_api.py @@ -392,9 +392,9 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): used_asset_versions.append(assettype_entity) asset_versions_key = "ftrackIntegratedAssetVersions" - if asset_versions_key not in instance.context.data: - instance.context.data[asset_versions_key] = [] + if asset_versions_key not in instance.data: + instance.data[asset_versions_key] = [] for asset_version in used_asset_versions: - if asset_version not in instance.context.data[asset_versions_key]: - instance.context.data[asset_versions_key].append(asset_version) + if asset_version not in instance.data[asset_versions_key]: + instance.data[asset_versions_key].append(asset_version) From eb50cd369d6dd31f914bd16820f4a3c7078d6f28 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 31 Jan 2020 12:09:17 +0100 Subject: [PATCH 072/434] store assetversion instead of assettype --- pype/plugins/ftrack/publish/integrate_ftrack_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_api.py b/pype/plugins/ftrack/publish/integrate_ftrack_api.py index 922c8e119f..cd94b2a150 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_api.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_api.py @@ -389,7 +389,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): six.reraise(tp, value, tb) if assetversion_entity not in used_asset_versions: - used_asset_versions.append(assettype_entity) + used_asset_versions.append(assetversion_entity) asset_versions_key = "ftrackIntegratedAssetVersions" if asset_versions_key not in instance.data: From e4ba53ac15358dbc648bfa6f8e4a507dfe513072 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 31 Jan 2020 12:13:20 +0100 Subject: [PATCH 073/434] integrate ftrack note add notes to each integrated asset version --- .../ftrack/publish/integrate_ftrack_note.py | 26 +++++++++++++++---- 1 file changed, 21 insertions(+), 5 deletions(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py index e24c839be2..f7fb5addbb 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_note.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -6,29 +6,45 @@ import six class IntegrateFtrackNote(pyblish.api.InstancePlugin): """Create comments in Ftrack.""" - order = pyblish.api.IntegratorOrder - label = "Integrate Comments to Ftrack." + # Must be after integrate asset new + order = pyblish.api.IntegratorOrder + 0.4999 + label = "Integrate Ftrack note" families = ["ftrack"] optional = True def process(self, instance): comment = (instance.context.data.get("comment") or "").strip() if not comment: + self.log.info("Comment is not set.") return + self.log.debug("Comment is set to {}".format(comment)) + asset_versions_key = "ftrackIntegratedAssetVersions" asset_versions = instance.data.get(asset_versions_key) if not asset_versions: + self.log.info("There are any integrated AssetVersions") return - session = context.data["ftrackSession"] + session = instance.context.data["ftrackSession"] + user = session.query( + "User where username is \"{}\"".format(session.api_user) + ).first() + if not user: + self.log.warning( + "Was not able to query current User {}".format( + session.api_user + ) + ) - note = session.create("Note", {"content": comment}) for asset_version in asset_versions: - asset_version["notes"].extend(note) + asset_version.create_note(comment, author=user) try: session.commit() + self.log.debug("Note added to AssetVersion \"{}\"".format( + str(asset_version) + )) except Exception: tp, value, tb = sys.exc_info() session.rollback() From 1afd2b40cef4446dc0fcfcab98aea0e732f970b4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 31 Jan 2020 12:13:39 +0100 Subject: [PATCH 074/434] comment is not overriden with empty string if is already set --- pype/plugins/global/publish/collect_comment.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_comment.py b/pype/plugins/global/publish/collect_comment.py index 22970665a1..062142ace9 100644 --- a/pype/plugins/global/publish/collect_comment.py +++ b/pype/plugins/global/publish/collect_comment.py @@ -15,4 +15,5 @@ class CollectComment(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder def process(self, context): - context.data["comment"] = "" + comment = (context.data.get("comment") or "").strip() + context.data["comment"] = comment From 607ede0c0752fd6761401a08dba75c070c3a7875 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 31 Jan 2020 12:13:52 +0100 Subject: [PATCH 075/434] collect matchmove family filtering was fixed --- pype/plugins/standalonepublisher/publish/collect_matchmove.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/standalonepublisher/publish/collect_matchmove.py b/pype/plugins/standalonepublisher/publish/collect_matchmove.py index b46efc1cf3..5d9e8ddfb4 100644 --- a/pype/plugins/standalonepublisher/publish/collect_matchmove.py +++ b/pype/plugins/standalonepublisher/publish/collect_matchmove.py @@ -21,7 +21,7 @@ class CollectMatchmovePublish(pyblish.api.InstancePlugin): label = "Collect Matchmove - SA Publish" order = pyblish.api.CollectorOrder - family = ["matchmove"] + families = ["matchmove"] hosts = ["standalonepublisher"] def process(self, instance): From 35a7040930ee9a5013ffefb82f5165e5615a53da Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 31 Jan 2020 14:43:02 +0100 Subject: [PATCH 076/434] fix(nuke): new way of imprinting data --- pype/nuke/lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index db1a5919c3..1c5601f34f 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -374,7 +374,7 @@ def create_write_node(name, data, input=None, prenodes=None): now_node.setInput(0, prev_node) # imprinting group node - GN = avalon.nuke.imprint(GN, data["avalon"]) + avalon.nuke.imprint(GN, data["avalon"], tab="Pype") divider = nuke.Text_Knob('') GN.addKnob(divider) From e05356bb904e2cef66acbb9726461d714a4fc420 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 31 Jan 2020 15:05:49 +0100 Subject: [PATCH 077/434] gracefully skip missing thumbnail path --- pype/plugins/global/publish/extract_jpeg.py | 10 +++++----- pype/plugins/global/publish/integrate_thumbnail.py | 6 ++++++ 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 00e8a6fedf..4978649ba2 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -6,7 +6,7 @@ import pype.api class ExtractJpegEXR(pyblish.api.InstancePlugin): - """Resolve any dependency issies + """Resolve any dependency issues This plug-in resolves any paths which, if not updated might break the published file. @@ -55,8 +55,8 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): filename = os.path.splitext(input_file)[0] if not filename.endswith('.'): filename += "." - jpegFile = filename + "jpg" - full_output_path = os.path.join(stagingdir, jpegFile) + jpeg_file = filename + "jpg" + full_output_path = os.path.join(stagingdir, jpeg_file) self.log.info("output {}".format(full_output_path)) @@ -87,9 +87,9 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): instance.data["representations"] = [] representation = { - 'name': 'jpg', + 'name': 'thumbnail', 'ext': 'jpg', - 'files': jpegFile, + 'files': jpeg_file, "stagingDir": stagingdir, "thumbnail": True, "tags": ['thumbnail'] diff --git a/pype/plugins/global/publish/integrate_thumbnail.py b/pype/plugins/global/publish/integrate_thumbnail.py index bf6c62155f..1c4399b386 100644 --- a/pype/plugins/global/publish/integrate_thumbnail.py +++ b/pype/plugins/global/publish/integrate_thumbnail.py @@ -19,6 +19,12 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): families = ["review"] def process(self, instance): + + if not os.environ.get("AVALON_THUMBNAIL_ROOT"): + self.log.info("AVALON_THUMBNAIL_ROOT is not set." + " Skipping thumbnail integration.") + return + published_repres = instance.data.get("published_representations") if not published_repres: self.log.debug( From 65fd98b1fad6a67a94d2393fa09487d02e9159fd Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 31 Jan 2020 17:39:16 +0100 Subject: [PATCH 078/434] unify loaders to a single reference loader --- pype/maya/__init__.py | 1 + pype/maya/lib.py | 17 +++++ pype/plugins/maya/load/load_camera.py | 62 ---------------- pype/plugins/maya/load/load_fbx.py | 54 -------------- pype/plugins/maya/load/load_mayaascii.py | 66 ---------------- pype/plugins/maya/load/load_reference.py | 42 ++++++++++- pype/plugins/maya/load/load_rig.py | 95 ------------------------ pype/plugins/maya/load/load_vrayproxy.py | 2 +- 8 files changed, 58 insertions(+), 281 deletions(-) delete mode 100644 pype/plugins/maya/load/load_camera.py delete mode 100644 pype/plugins/maya/load/load_fbx.py delete mode 100644 pype/plugins/maya/load/load_mayaascii.py delete mode 100644 pype/plugins/maya/load/load_rig.py diff --git a/pype/maya/__init__.py b/pype/maya/__init__.py index b4dbc52bc8..f027893a0e 100644 --- a/pype/maya/__init__.py +++ b/pype/maya/__init__.py @@ -162,6 +162,7 @@ def on_open(_): # Validate FPS after update_task_from_path to # ensure it is using correct FPS for the asset lib.validate_fps() + lib.fix_incompatible_containers() if any_outdated(): log.warning("Scene has outdated content.") diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 0890d3863e..e1a72b5929 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2318,6 +2318,23 @@ def get_attr_in_layer(attr, layer): return cmds.getAttr(attr) +def fix_incompatible_containers(): + """Return whether the current scene has any outdated content""" + + host = avalon.api.registered_host() + for container in host.ls(): + loader = container['loader'] + + print(container['loader']) + + if loader in ["MayaAsciiLoader", + "AbcLoader", + "ModelLoader", + "CameraLoader"]: + cmds.setAttr(container["objectName"] + ".loader", + "ReferenceLoader", type="string") + + def _null(*args): pass diff --git a/pype/plugins/maya/load/load_camera.py b/pype/plugins/maya/load/load_camera.py deleted file mode 100644 index e9bf265b98..0000000000 --- a/pype/plugins/maya/load/load_camera.py +++ /dev/null @@ -1,62 +0,0 @@ -import pype.maya.plugin -import os -from pypeapp import config - - -class CameraLoader(pype.maya.plugin.ReferenceLoader): - """Specific loader of Alembic for the pype.camera family""" - - families = ["camera"] - label = "Reference camera" - representations = ["abc", "ma"] - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - import maya.cmds as cmds - # Get family type from the context - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "camera" - - cmds.loadPlugin("AbcImport.mll", quiet=True) - groupName = "{}:{}".format(namespace, name) - nodes = cmds.file(self.fname, - namespace=namespace, - sharedReferenceFile=False, - groupReference=True, - groupName="{}:{}".format(namespace, name), - reference=True, - returnNewNodes=True) - - cameras = cmds.ls(nodes, type="camera") - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - - # Check the Maya version, lockTransform has been introduced since - # Maya 2016.5 Ext 2 - version = int(cmds.about(version=True)) - if version >= 2016: - for camera in cameras: - cmds.camera(camera, edit=True, lockTransform=True) - else: - self.log.warning("This version of Maya does not support locking of" - " transforms of cameras.") - - self[:] = nodes - - return nodes - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_fbx.py b/pype/plugins/maya/load/load_fbx.py deleted file mode 100644 index 14df300c3c..0000000000 --- a/pype/plugins/maya/load/load_fbx.py +++ /dev/null @@ -1,54 +0,0 @@ -import pype.maya.plugin -import os -from pypeapp import config - - -class FBXLoader(pype.maya.plugin.ReferenceLoader): - """Load the FBX""" - - families = ["fbx"] - representations = ["fbx"] - - label = "Reference FBX" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - import maya.cmds as cmds - from avalon import maya - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "fbx" - - # Ensure FBX plug-in is loaded - cmds.loadPlugin("fbxmaya", quiet=True) - - with maya.maintained_selection(): - nodes = cmds.file(self.fname, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName="{}:{}".format(namespace, name)) - - groupName = "{}:{}".format(namespace, name) - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - - self[:] = nodes - - return nodes - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_mayaascii.py b/pype/plugins/maya/load/load_mayaascii.py deleted file mode 100644 index ab7b2daffb..0000000000 --- a/pype/plugins/maya/load/load_mayaascii.py +++ /dev/null @@ -1,66 +0,0 @@ -import pype.maya.plugin -from pypeapp import config -import os - - -class MayaAsciiLoader(pype.maya.plugin.ReferenceLoader): - """Load the model""" - - families = [] - representations = ["ma"] - - label = "Reference Maya Ascii" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - import maya.cmds as cmds - from avalon import maya - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "model" - - with maya.maintained_selection(): - nodes = cmds.file(self.fname, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName="{}:{}".format(namespace, name)) - - self[:] = nodes - groupName = "{}:{}".format(namespace, name) - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - cmds.setAttr(groupName + ".displayHandle", 1) - # get bounding box - bbox = cmds.exactWorldBoundingBox(groupName) - # get pivot position on world space - pivot = cmds.xform(groupName, q=True, sp=True, ws=True) - # center of bounding box - cx = (bbox[0] + bbox[3]) / 2 - cy = (bbox[1] + bbox[4]) / 2 - cz = (bbox[2] + bbox[5]) / 2 - # add pivot position to calculate offset - cx = cx + pivot[0] - cy = cy + pivot[1] - cz = cz + pivot[2] - # set selection handle offset to center of bounding box - cmds.setAttr(groupName + ".selectHandleX", cx) - cmds.setAttr(groupName + ".selectHandleY", cy) - cmds.setAttr(groupName + ".selectHandleZ", cz) - return nodes - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index 91f946b728..5104ac26f8 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -1,4 +1,6 @@ import pype.maya.plugin +from avalon import api, maya +from maya import cmds import os from pypeapp import config @@ -11,8 +13,10 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): "animation", "mayaAscii", "setdress", - "layout"] - representations = ["ma", "abc"] + "layout", + "camera", + "rig"] + representations = ["ma", "abc", "fbx"] tool_names = ["loader"] label = "Reference" @@ -42,7 +46,7 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): reference=True, returnNewNodes=True) - namespace = cmds.referenceQuery(nodes[0], namespace=True) + # namespace = cmds.referenceQuery(nodes[0], namespace=True) shapes = cmds.ls(nodes, shapes=True, long=True) @@ -92,7 +96,39 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): cmds.setAttr(groupName + ".selectHandleY", cy) cmds.setAttr(groupName + ".selectHandleZ", cz) + if data.get("post_process", True): + if family == "rig": + self._post_process_rig(name, namespace, context, data) + return newNodes def switch(self, container, representation): self.update(container, representation) + + def _post_process_rig(self, name, namespace, context, data): + + output = next((node for node in self if + node.endswith("out_SET")), None) + controls = next((node for node in self if + node.endswith("controls_SET")), None) + + assert output, "No out_SET in rig, this is a bug." + assert controls, "No controls_SET in rig, this is a bug." + + # Find the roots amongst the loaded nodes + roots = cmds.ls(self[:], assemblies=True, long=True) + assert roots, "No root nodes in rig, this is a bug." + + asset = api.Session["AVALON_ASSET"] + dependency = str(context["representation"]["_id"]) + + self.log.info("Creating subset: {}".format(namespace)) + + # Create the animation instance + with maya.maintained_selection(): + cmds.select([output, controls] + roots, noExpand=True) + api.create(name=namespace, + asset=asset, + family="animation", + options={"useSelection": True}, + data={"dependencies": dependency}) diff --git a/pype/plugins/maya/load/load_rig.py b/pype/plugins/maya/load/load_rig.py deleted file mode 100644 index fc6e666ac6..0000000000 --- a/pype/plugins/maya/load/load_rig.py +++ /dev/null @@ -1,95 +0,0 @@ -from maya import cmds - -import pype.maya.plugin -from avalon import api, maya -import os -from pypeapp import config - - -class RigLoader(pype.maya.plugin.ReferenceLoader): - """Specific loader for rigs - - This automatically creates an instance for animators upon load. - - """ - - families = ["rig"] - representations = ["ma"] - - label = "Reference rig" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "rig" - - groupName = "{}:{}".format(namespace, name) - nodes = cmds.file(self.fname, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName=groupName) - - cmds.xform(groupName, pivots=(0, 0, 0)) - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - - shapes = cmds.ls(nodes, shapes=True, long=True) - print(shapes) - - newNodes = (list(set(nodes) - set(shapes))) - print(newNodes) - - # Store for post-process - self[:] = newNodes - if data.get("post_process", True): - self._post_process(name, namespace, context, data) - - return newNodes - - def _post_process(self, name, namespace, context, data): - - # TODO(marcus): We are hardcoding the name "out_SET" here. - # Better register this keyword, so that it can be used - # elsewhere, such as in the Integrator plug-in, - # without duplication. - - output = next((node for node in self if - node.endswith("out_SET")), None) - controls = next((node for node in self if - node.endswith("controls_SET")), None) - - assert output, "No out_SET in rig, this is a bug." - assert controls, "No controls_SET in rig, this is a bug." - - # Find the roots amongst the loaded nodes - roots = cmds.ls(self[:], assemblies=True, long=True) - assert roots, "No root nodes in rig, this is a bug." - - asset = api.Session["AVALON_ASSET"] - dependency = str(context["representation"]["_id"]) - - # Create the animation instance - with maya.maintained_selection(): - cmds.select([output, controls] + roots, noExpand=True) - api.create(name=namespace, - asset=asset, - family="animation", - options={"useSelection": True}, - data={"dependencies": dependency}) - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_vrayproxy.py b/pype/plugins/maya/load/load_vrayproxy.py index 9b07dc7e30..35d93676a0 100644 --- a/pype/plugins/maya/load/load_vrayproxy.py +++ b/pype/plugins/maya/load/load_vrayproxy.py @@ -117,7 +117,7 @@ class VRayProxyLoader(api.Loader): vray_mesh = cmds.createNode('VRayMesh', name="{}_VRMS".format(name)) mesh_shape = cmds.createNode("mesh", name="{}_GEOShape".format(name)) vray_mat = cmds.shadingNode("VRayMeshMaterial", asShader=True, - name="{}_VRMM".format(name)) + name="{}_VRMM".format(name)) vray_mat_sg = cmds.sets(name="{}_VRSG".format(name), empty=True, renderable=True, From 2e7d4a94670b8eac96c2889ff5f4112da5dc0d37 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 31 Jan 2020 17:45:31 +0100 Subject: [PATCH 079/434] more families in automatic scene upgrade --- pype/maya/lib.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/maya/lib.py b/pype/maya/lib.py index e1a72b5929..ec39b3556e 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2330,7 +2330,9 @@ def fix_incompatible_containers(): if loader in ["MayaAsciiLoader", "AbcLoader", "ModelLoader", - "CameraLoader"]: + "CameraLoader", + "RigLoader", + "FBXLoader"]: cmds.setAttr(container["objectName"] + ".loader", "ReferenceLoader", type="string") From 27774a0791cae0a8d9f82fdc69036b63770c8495 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 31 Jan 2020 17:48:36 +0100 Subject: [PATCH 080/434] pep8 --- pype/plugins/maya/load/actions.py | 8 ++++---- pype/plugins/maya/load/load_reference.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pype/plugins/maya/load/actions.py b/pype/plugins/maya/load/actions.py index 9f6a5c4d34..77d18b0ee3 100644 --- a/pype/plugins/maya/load/actions.py +++ b/pype/plugins/maya/load/actions.py @@ -140,9 +140,9 @@ class ImportMayaLoader(api.Loader): message = "Are you sure you want import this" state = QtWidgets.QMessageBox.warning(None, - "Are you sure?", - message, - buttons=buttons, - defaultButton=accept) + "Are you sure?", + message, + buttons=buttons, + defaultButton=accept) return state == accept diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index 5104ac26f8..3de35451a6 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -58,7 +58,7 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): for node in newNodes: try: roots.add(pm.PyNode(node).getAllParents()[-2]) - except: + except: # noqa: E722 pass for root in roots: root.setParent(world=True) From 68f28ce4574c5234451e945d844fb84b042914d7 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 31 Jan 2020 18:57:16 +0100 Subject: [PATCH 081/434] fix(nuke): refactoring loaders --- pype/plugins/nuke/load/load_mov.py | 67 +++++++++++++------------ pype/plugins/nuke/load/load_sequence.py | 53 +++++++++---------- 2 files changed, 62 insertions(+), 58 deletions(-) diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py index e598839405..917abdf098 100644 --- a/pype/plugins/nuke/load/load_mov.py +++ b/pype/plugins/nuke/load/load_mov.py @@ -24,7 +24,7 @@ def preserve_trim(node): offset_frame = None if node['frame_mode'].value() == "start at": start_at_frame = node['frame'].value() - if node['frame_mode'].value() is "offset": + if node['frame_mode'].value() == "offset": offset_frame = node['frame'].value() try: @@ -85,30 +85,26 @@ class LoadMov(api.Loader): containerise, viewer_update_and_undo_stop ) - version = context['version'] version_data = version.get("data", {}) - orig_first = version_data.get("frameStart", None) - orig_last = version_data.get("frameEnd", None) + orig_first = version_data.get("frameStart") + orig_last = version_data.get("frameEnd") diff = orig_first - 1 - # set first to 1 + first = orig_first - diff last = orig_last - diff - handles = version_data.get("handles", None) - handle_start = version_data.get("handleStart", None) - handle_end = version_data.get("handleEnd", None) - repr_cont = context["representation"]["context"] - # fix handle start and end if none are available - if not handle_start and not handle_end: - handle_start = handles - handle_end = handles + handle_start = version_data.get("handleStart") + handle_end = version_data.get("handleEnd") + + colorspace = version_data.get("colorspace") + repr_cont = context["representation"]["context"] # create handles offset (only to last, because of mov) last += handle_start + handle_end # offset should be with handles so it match orig frame range - offset_frame = orig_first + handle_start + offset_frame = orig_first - handle_start # Fallback to asset name when namespace is None if namespace is None: @@ -122,10 +118,8 @@ class LoadMov(api.Loader): repr_cont["subset"], repr_cont["representation"]) - # Create the Loader with the filename path set with viewer_update_and_undo_stop(): - # TODO: it might be universal read to img/geo/camera read_node = nuke.createNode( "Read", "name {}".format(read_name) @@ -139,7 +133,11 @@ class LoadMov(api.Loader): read_node["last"].setValue(last) read_node["frame_mode"].setValue("start at") read_node["frame"].setValue(str(offset_frame)) - # add additional metadata from the version to imprint to Avalon knob + + if colorspace: + read_node["colorspace"].setValue(str(colorspace)) + + # add additional metadata from the version to imprint Avalon knob add_keys = [ "frameStart", "frameEnd", "handles", "source", "author", "fps", "version", "handleStart", "handleEnd" @@ -147,7 +145,7 @@ class LoadMov(api.Loader): data_imprint = {} for key in add_keys: - if key is 'version': + if key == 'version': data_imprint.update({ key: context["version"]['name'] }) @@ -186,10 +184,10 @@ class LoadMov(api.Loader): ) node = nuke.toNode(container['objectName']) - # TODO: prepare also for other Read img/geo/camera + assert node.Class() == "Read", "Must be Read" - file = api.get_representation_path(representation) + file = self.fname.replace("\\", "/") # Get start frame from version data version = io.find_one({ @@ -207,15 +205,17 @@ class LoadMov(api.Loader): version_data = version.get("data", {}) - orig_first = version_data.get("frameStart", None) - orig_last = version_data.get("frameEnd", None) + orig_first = version_data.get("frameStart") + orig_last = version_data.get("frameEnd") diff = orig_first - 1 + # set first to 1 first = orig_first - diff last = orig_last - diff handles = version_data.get("handles", 0) handle_start = version_data.get("handleStart", 0) handle_end = version_data.get("handleEnd", 0) + colorspace = version_data.get("colorspace") if first is None: log.warning("Missing start frame for updated version" @@ -231,11 +231,11 @@ class LoadMov(api.Loader): # create handles offset (only to last, because of mov) last += handle_start + handle_end # offset should be with handles so it match orig frame range - offset_frame = orig_first + handle_start + offset_frame = orig_first - handle_start # Update the loader's path whilst preserving some values with preserve_trim(node): - node["file"].setValue(file["path"]) + node["file"].setValue(file) log.info("__ node['file']: {}".format(node["file"].value())) # Set the global in to the start frame of the sequence @@ -247,19 +247,22 @@ class LoadMov(api.Loader): node["frame_mode"].setValue("start at") node["frame"].setValue(str(offset_frame)) + if colorspace: + node["colorspace"].setValue(str(colorspace)) + updated_dict = {} updated_dict.update({ "representation": str(representation["_id"]), - "frameStart": version_data.get("frameStart"), - "frameEnd": version_data.get("frameEnd"), - "version": version.get("name"), + "frameStart": str(first), + "frameEnd": str(last), + "version": str(version.get("name")), + "colorspace": version_data.get("colorspace"), "source": version_data.get("source"), - "handles": version_data.get("handles"), - "handleStart": version_data.get("handleStart"), - "handleEnd": version_data.get("handleEnd"), - "fps": version_data.get("fps"), + "handleStart": str(handle_start), + "handleEnd": str(handle_end), + "fps": str(version_data.get("fps")), "author": version_data.get("author"), - "outputDir": version_data.get("outputDir"), + "outputDir": version_data.get("outputDir") }) # change color of node diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 76599c3351..67d1f75dc8 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -24,7 +24,7 @@ def preserve_trim(node): offset_frame = None if node['frame_mode'].value() == "start at": start_at_frame = node['frame'].value() - if node['frame_mode'].value() is "offset": + if node['frame_mode'].value() == "offset": offset_frame = node['frame'].value() try: @@ -93,7 +93,6 @@ class LoadSequence(api.Loader): self.first_frame = int(nuke.root()["first_frame"].getValue()) self.handle_start = version_data.get("handleStart", 0) - self.handle_start = version_data.get("handleStart", 0) self.handle_end = version_data.get("handleEnd", 0) first = version_data.get("frameStart", None) @@ -108,7 +107,10 @@ class LoadSequence(api.Loader): file = self.fname.replace("\\", "/") - log.info("file: {}\n".format(self.fname)) + if "#" not in file: + frame = repr_cont.get("frame") + padding = len(frame) + file = file.replace(frame, "#"*padding) repr_cont = context["representation"]["context"] read_name = "Read_{0}_{1}_{2}".format( @@ -116,11 +118,6 @@ class LoadSequence(api.Loader): repr_cont["subset"], repr_cont["representation"]) - if "#" not in file: - frame = repr_cont.get("frame") - padding = len(frame) - file = file.replace(frame, "#"*padding) - # Create the Loader with the filename path set with viewer_update_and_undo_stop(): # TODO: it might be universal read to img/geo/camera @@ -130,8 +127,8 @@ class LoadSequence(api.Loader): r["file"].setValue(file) # Set colorspace defined in version data - colorspace = context["version"]["data"].get("colorspace", None) - if colorspace is not None: + colorspace = context["version"]["data"].get("colorspace") + if colorspace: r["colorspace"].setValue(str(colorspace)) loader_shift(r, first, relative=True) @@ -140,14 +137,14 @@ class LoadSequence(api.Loader): r["origlast"].setValue(int(last)) r["last"].setValue(int(last)) - # add additional metadata from the version to imprint to Avalon knob + # add additional metadata from the version to imprint Avalon knob add_keys = ["frameStart", "frameEnd", "source", "colorspace", "author", "fps", "version", "handleStart", "handleEnd"] data_imprint = {} for k in add_keys: - if k is 'version': + if k == 'version': data_imprint.update({k: context["version"]['name']}) else: data_imprint.update( @@ -179,7 +176,7 @@ class LoadSequence(api.Loader): rtn["after"].setValue("continue") rtn["input.first_lock"].setValue(True) rtn["input.first"].setValue( - self.handle_start + self.first_frame + self.handle_start + self.first_frame ) if time_warp_nodes != []: @@ -210,16 +207,20 @@ class LoadSequence(api.Loader): """ from avalon.nuke import ( - ls_img_sequence, update_container ) node = nuke.toNode(container['objectName']) - # TODO: prepare also for other Read img/geo/camera + assert node.Class() == "Read", "Must be Read" - path = api.get_representation_path(representation) - file = ls_img_sequence(path) + repr_cont = representation["context"] + file = self.fname.replace("\\", "/") + + if "#" not in file: + frame = repr_cont.get("frame") + padding = len(frame) + file = file.replace(frame, "#"*padding) # Get start frame from version data version = io.find_one({ @@ -241,8 +242,8 @@ class LoadSequence(api.Loader): self.handle_start = version_data.get("handleStart", 0) self.handle_end = version_data.get("handleEnd", 0) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) + first = version_data.get("frameStart") + last = version_data.get("frameEnd") if first is None: log.warning("Missing start frame for updated version" @@ -255,7 +256,7 @@ class LoadSequence(api.Loader): # Update the loader's path whilst preserving some values with preserve_trim(node): - node["file"].setValue(file["path"]) + node["file"].setValue(file) log.info("__ node['file']: {}".format(node["file"].value())) # Set the global in to the start frame of the sequence @@ -268,14 +269,14 @@ class LoadSequence(api.Loader): updated_dict = {} updated_dict.update({ "representation": str(representation["_id"]), - "frameStart": version_data.get("frameStart"), - "frameEnd": version_data.get("frameEnd"), - "version": version.get("name"), + "frameStart": str(first), + "frameEnd": str(last), + "version": str(version.get("name")), "colorspace": version_data.get("colorspace"), "source": version_data.get("source"), - "handleStart": version_data.get("handleStart"), - "handleEnd": version_data.get("handleEnd"), - "fps": version_data.get("fps"), + "handleStart": str(self.handle_start), + "handleEnd": str(self.handle_end), + "fps": str(version_data.get("fps")), "author": version_data.get("author"), "outputDir": version_data.get("outputDir"), }) From c501ce323085989958368ef310d588c4c0647d33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Fri, 31 Jan 2020 19:11:52 +0100 Subject: [PATCH 082/434] refactored collecting assumed render files --- pype/plugins/maya/publish/collect_render.py | 484 +++++++++++++------- 1 file changed, 317 insertions(+), 167 deletions(-) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 3f288dc0c7..375b237583 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -1,11 +1,9 @@ import re import os import types +from abc import ABC, abstractmethod from maya import cmds -from maya import OpenMaya as om -import maya.aovs as aovs -import pymel.core as pm import maya.app.renderSetup.model.renderSetup as renderSetup import pyblish.api @@ -19,7 +17,8 @@ R_FRAME_RANGE = re.compile(r'^(?P(-?)\d+)-(?P(-?)\d+)$') R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+') R_LAYER_TOKEN = re.compile( r'.*%l.*|.*.*|.*.*', re.IGNORECASE) -R_AOV_TOKEN = re.compile(r'.*%l.*|.*.*|.*.*', re.IGNORECASE) +R_AOV_TOKEN = re.compile(r'.*%a.*|.*.*|.*.*', re.IGNORECASE) +R_SUBSTITUTE_AOV_TOKEN = re.compile(r'%a||', re.IGNORECASE) R_SUBSTITUTE_LAYER_TOKEN = re.compile( r'%l||', re.IGNORECASE) R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) @@ -42,18 +41,6 @@ ImagePrefixes = { 'redshift': 'defaultRenderGlobals.imageFilePrefix' } -# Arnold AOV driver extension mapping -# Is there a better way? -aiDriverExtension = { - 'jpeg': 'jpg', - 'exr': 'exr', - 'deepexr': 'exr', - 'png': 'png', - 'tiff': 'tif', - 'mtoa_shaders': 'ass', # TODO: research what those last two should be - 'maya': '' -} - class CollectMayaRender(pyblish.api.ContextPlugin): """Gather all publishable render layers from renderSetup""" @@ -63,132 +50,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): label = "Collect Render Layers" families = ["render"] - def _get_expected_files(self, layer): - # ______________________________________________ - # ____________________/ ____________________________________________/ - # 1 - get scene name /__________________/ - # ____________________/ - scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True)) - scene_name, _ = os.path.splitext(scene_basename) - - # ______________________________________________ - # ____________________/ ____________________________________________/ - # 2 - detect renderer /__________________/ - # ____________________/ - renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower() - if renderer.startswith('renderman'): - renderer = 'renderman' - - # ________________________________________________ - # __________________/ ______________________________________________/ - # 3 - image prefix /__________________/ - # __________________/ - try: - file_prefix = cmds.getAttr(ImagePrefixes[renderer]) - except KeyError: - raise RuntimeError("Unsupported renderer {}".format(renderer)) - - # ________________________________________________ - # __________________/ ______________________________________________/ - # 4 - get renderabe cameras_____________/ - # __________________/ - cam_parents = [cmds.listRelatives(x, ap=True)[-1] - for x in cmds.ls(cameras=True)] - - self.log.info("cameras in scene: %s" % ", ".join(cam_parents)) - - renderable_cameras = [] - for cam in cam_parents: - renderable = False - if self.maya_is_true(cmds.getAttr('{}.renderable'.format(cam))): - renderable = True - - for override in self.get_layer_overrides( - '{}.renderable'.format(cam), 'rs_{}'.format(layer)): - renderable = self.maya_is_true(override) - - if renderable: - renderable_cameras.append(cam) - - self.log.info("renderable cameras: %s" % ", ".join(renderable_cameras)) - - # ________________________________________________ - # __________________/ ______________________________________________/ - # 5 - get AOVs /_____________/ - # __________________/ - - enabled_aovs = [] - - if renderer == "arnold": - - if (cmds.getAttr('defaultArnoldRenderOptions.aovMode') and - not cmds.getAttr('defaultArnoldDriver.mergeAOVs')): - # AOVs are set to be rendered separately. We should expect - # token in path. - mergeAOVs = False - else: - mergeAOVs = True - - if not mergeAOVs: - ai_aovs = [n for n in cmds.ls(type='aiAOV')] - - for aov in ai_aovs: - enabled = self.maya_is_true( - cmds.getAttr('{}.enabled'.format(aov))) - ai_driver = cmds.listConnections( - '{}.outputs'.format(aov))[0] - ai_translator = cmds.getAttr( - '{}.aiTranslator'.format(ai_driver)) - try: - aov_ext = aiDriverExtension[ai_translator] - except KeyError: - msg = ('Unrecognized arnold ' - 'drive format for AOV - {}').format( - cmds.getAttr('{}.name'.format(aov)) - ) - self.log.error(msg) - raise RuntimeError(msg) - - for override in self.get_layer_overrides( - '{}.enabled'.format(aov), 'rs_{}'.format(layer)): - enabled = self.maya_is_true(override) - if enabled: - enabled_aovs.append((aov, aov_ext)) - - self.log.info("enabled aovs: %s" % ", ".join( - [cmds.getAttr('%s.name' % (n,)) for n in enabled_aovs])) - - elif renderer == "vray": - # todo: implement vray aovs - pass - - elif renderer == "redshift": - # todo: implement redshift aovs - pass - - elif renderer == "mentalray": - # todo: implement mentalray aovs - pass - - elif renderer == "renderman": - # todo: implement renderman aovs - pass - - mappings = ( - (R_SUBSTITUTE_SCENE_TOKEN, scene_name), - (R_SUBSTITUTE_LAYER_TOKEN, layer), - (R_SUBSTITUTE_CAMERA_TOKEN, camera), - ) - - # if we have token in prefix path we'll expect output for - # every renderable camera in layer. - - - - for regex, value in mappings: - file_prefix = re.sub(regex, value, file_prefix) - - def process(self, context): render_instance = None for instance in context: @@ -249,30 +110,15 @@ class CollectMayaRender(pyblish.api.ContextPlugin): layer_name = "rs_{}".format(expected_layer_name) # collect all frames we are expecting to be rendered - files = cmds.renderSettings(fp=True, fin=True, lin=True, - lut=True, lyr=expected_layer_name) - - if len(files) == 1: - # if last file is not specified, maya is not set for animation - pass - else: - # get frame position and padding - - # get extension - re.search(r'\.(\w+)$', files[0]) - - # find token. If no AOVs are specified, assume - # is 'beauty' - render_passes = ['beauty'] - if pm.getAttr('defaultRenderGlobals.currentRenderer') == 'arnold': # noqa: E501 - # arnold is our renderer - for node in cmd.ls(type="aiAOV"): - render_pass = node.split('_')[1] - - - - + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + # handle various renderman names + if renderer.startswith('renderman'): + renderer = 'renderman' + # return all expected files for all cameras and aovs in given + # frame range + exp_files = ExpectedFiles().get(renderer, layer_name) # Get layer specific settings, might be overrides data = { @@ -298,7 +144,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin): # Add source to allow tracing back to the scene from # which was submitted originally - "source": filepath + "source": filepath, + "expectedFiles": exp_files } # Apply each user defined attribute as data @@ -432,7 +279,147 @@ class CollectMayaRender(pyblish.api.ContextPlugin): if node_name == layer: yield cmds.getAttr(attr_name) - def _maya_is_true(self, attr_val): + +class ExpectedFiles: + + def get(renderer, layer): + if renderer.lower() == 'arnold': + return ExpectedFilesArnold(layer).get_files() + elif renderer.lower() == 'vray': + return ExpectedFilesVray(layer).get_files() + elif renderer.lower() == 'redshift': + return ExpectedFilesRedshift(layer).get_files() + elif renderer.lower() == 'mentalray': + renderer.ExpectedFilesMentalray(layer).get_files() + elif renderer.lower() == 'renderman': + renderer.ExpectedFilesRenderman(layer).get_files() + else: + raise UnsupportedRendererException( + "unsupported {}".format(renderer)) + + +class AExpectedFiles(ABC): + + renderer = None + layer = None + + def __init__(self, layer): + self.layer = layer + + @abstractmethod + def get_aovs(self): + pass + + def get_files(self): + # ______________________________________________ + # ____________________/ ____________________________________________/ + # 1 - get scene name /__________________/ + # ____________________/ + scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True)) + scene_name, _ = os.path.splitext(scene_basename) + + # ______________________________________________ + # ____________________/ ____________________________________________/ + # 2 - detect renderer /__________________/ + # ____________________/ + renderer = self.renderer + + # ________________________________________________ + # __________________/ ______________________________________________/ + # 3 - image prefix /__________________/ + # __________________/ + try: + file_prefix = cmds.getAttr(ImagePrefixes[renderer]) + except KeyError: + raise UnsupportedRendererException( + "Unsupported renderer {}".format(renderer)) + + if not file_prefix: + raise RuntimeError("Image prefix not set") + + default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey') + + # ________________________________________________ + # __________________/ ______________________________________________/ + # 4 - get renderable cameras_____________/ + # __________________/ + + renderable_cameras = self.get_renderable_cameras() + # ________________________________________________ + # __________________/ ______________________________________________/ + # 5 - get AOVs /____________________/ + # __________________/ + + enabled_aovs = self.get_aovs() + + # if we have token in prefix path we'll expect output for + # every renderable camera in layer. + + expected_files = [] + start_frame = int(self.get_render_attribute('startFrame')) + end_frame = int(self.get_render_attribute('endFrame')) + frame_step = int(self.get_render_attribute('byFrameStep')) + padding = int(self.get_render_attribute('extensionPadding')) + + resolved_path = file_prefix + for cam in renderable_cameras: + if enabled_aovs: + for aov in enabled_aovs: + + mappings = ( + (R_SUBSTITUTE_SCENE_TOKEN, scene_name), + (R_SUBSTITUTE_LAYER_TOKEN, self.layer), + (R_SUBSTITUTE_CAMERA_TOKEN, cam), + (R_SUBSTITUTE_AOV_TOKEN, aov[0]) + ) + + for regex, value in mappings: + file_prefix = re.sub(regex, value, file_prefix) + + for frame in range( + int(start_frame), int(end_frame), int(frame_step)): + expected_files.append( + '{}.{}.{}'.format(file_prefix, + str(frame).rjust(padding, "0"), + aov[1])) + file_prefix = resolved_path + else: + mappings = ( + (R_SUBSTITUTE_SCENE_TOKEN, scene_name), + (R_SUBSTITUTE_LAYER_TOKEN, self.layer), + (R_SUBSTITUTE_CAMERA_TOKEN, cam) + ) + + for regex, value in mappings: + file_prefix = re.sub(regex, value, file_prefix) + + for frame in range( + int(start_frame), int(end_frame), int(frame_step)): + expected_files.append( + '{}.{}.{}'.format(file_prefix, + str(frame).rjust(padding, "0"), + default_ext)) + + return expected_files + + def get_renderable_cameras(self): + cam_parents = [cmds.listRelatives(x, ap=True)[-1] + for x in cmds.ls(cameras=True)] + + renderable_cameras = [] + for cam in cam_parents: + renderable = False + if self.maya_is_true(cmds.getAttr('{}.renderable'.format(cam))): + renderable = True + + for override in self.get_layer_overrides( + '{}.renderable'.format(cam), self.layer): + renderable = self.maya_is_true(override) + + if renderable: + renderable_cameras.append(cam) + + def maya_is_true(self, attr_val): """ Whether a Maya attr evaluates to True. When querying an attribute value from an ambiguous object the @@ -445,3 +432,166 @@ class CollectMayaRender(pyblish.api.ContextPlugin): return any(attr_val) else: return bool(attr_val) + + +class ExpectedFilesArnold(AExpectedFiles): + + # Arnold AOV driver extension mapping + # Is there a better way? + aiDriverExtension = { + 'jpeg': 'jpg', + 'exr': 'exr', + 'deepexr': 'exr', + 'png': 'png', + 'tiff': 'tif', + 'mtoa_shaders': 'ass', # TODO: research what those last two should be + 'maya': '' + } + + def __init__(self, layer): + super(self).__init__(layer) + self.renderer = 'arnold' + + def _get_aovs(self): + enabled_aovs = [] + if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode') + and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')): + # AOVs are merged in mutli-channel file + return enabled_aovs + + # AOVs are set to be rendered separately. We should expect + # token in path. + + ai_aovs = [n for n in cmds.ls(type='aiAOV')] + + for aov in ai_aovs: + enabled = self.maya_is_true( + cmds.getAttr('{}.enabled'.format(aov))) + ai_driver = cmds.listConnections( + '{}.outputs'.format(aov))[0] + ai_translator = cmds.getAttr( + '{}.aiTranslator'.format(ai_driver)) + try: + aov_ext = self.aiDriverExtension[ai_translator] + except KeyError: + msg = ('Unrecognized arnold ' + 'driver format for AOV - {}').format( + cmds.getAttr('{}.name'.format(aov)) + ) + raise AOVError(msg) + + for override in self.get_layer_overrides( + '{}.enabled'.format(aov), self.layer): + enabled = self.maya_is_true(override) + if enabled: + enabled_aovs.append( + ( + cmds.getAttr('%s.name' % aov), + aov_ext + ) + ) + return enabled_aovs + + +class ExpectedFilesVray(AExpectedFiles): + + def __init__(self, layer): + super(self).__init__(layer) + self.renderer = 'vray' + + def _get_aovs(self): + + default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey') + enabled_aovs = [] + vr_aovs = [n for n in cmds.ls( + type=["VRayRenderElement", "VRayRenderElementSet"])] + + # todo: find out how to detect multichannel exr for vray + for aov in vr_aovs: + enabled = self.maya_is_true( + cmds.getAttr('{}.enabled'.format(aov))) + for override in self.get_layer_overrides( + '{}.enabled'.format(aov), 'rs_{}'.format(self.layer)): + enabled = self.maya_is_true(override) + + if enabled: + # todo: find how vray set format for AOVs + enabled_aovs.append( + ( + self._get_vray_aov_name(aov), + default_ext) + ) + return enabled_aovs + + def _get_vray_aov_name(self, node): + + # Get render element pass type + vray_node_attr = next(attr for attr in cmds.listAttr(node) + if attr.startswith("vray_name")) + pass_type = vray_node_attr.rsplit("_", 1)[-1] + + # Support V-Ray extratex explicit name (if set by user) + if pass_type == "extratex": + explicit_attr = "{}.vray_explicit_name_extratex".format(node) + explicit_name = cmds.getAttr(explicit_attr) + if explicit_name: + return explicit_name + + # Node type is in the attribute name but we need to check if value + # of the attribute as it can be changed + return cmds.getAttr("{}.{}".format(node, vray_node_attr)) + + +class ExpectedFilesRedshift(AExpectedFiles): + + def __init__(self, layer): + super(self).__init__(layer) + self.renderer = 'redshift' + + def _get_aovs(self): + enabled_aovs = [] + default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey') + rs_aovs = [n for n in cmds.ls(type='RedshiftAOV')] + + # todo: find out how to detect multichannel exr for redshift + for aov in rs_aovs: + enabled = self.maya_is_true( + cmds.getAttr('{}.enabled'.format(aov))) + for override in self.get_layer_overrides( + '{}.enabled'.format(aov), self.layer): + enabled = self.maya_is_true(override) + + if enabled: + # todo: find how redshift set format for AOVs + enabled_aovs.append( + ( + cmds.getAttr('%s.name' % aov), + default_ext + ) + ) + + return enabled_aovs + + +class ExpectedFilesRenderman(AExpectedFiles): + + def __init__(self, layer): + raise UnimplementedRendererException('Renderman not implemented') + + +class ExpectedFilesMentalray(AExpectedFiles): + + def __init__(self, layer): + raise UnimplementedRendererException('Mentalray not implemented') + + +class AOVError(Exception): + pass + + +class UnsupportedRendererException(Exception): + pass + + +class UnimplementedRendererException(Exception): + pass From 539d772788feaf9a8808e2ae5a2e6bcf222b6351 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 31 Jan 2020 19:52:27 +0100 Subject: [PATCH 083/434] feat(global): adding validator instance versions --- .../plugins/global/publish/validate_version.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 pype/plugins/global/publish/validate_version.py diff --git a/pype/plugins/global/publish/validate_version.py b/pype/plugins/global/publish/validate_version.py new file mode 100644 index 0000000000..c484c517bb --- /dev/null +++ b/pype/plugins/global/publish/validate_version.py @@ -0,0 +1,18 @@ +import pyblish + + +class ValidateVersion(pyblish.api.InstancePlugin): + """Validate instance version. + + Pype is not allowing overwiting previously published versions. + """ + + order = pyblish.api.ValidatorOrder + + label = "Validate Version" + + def process(self, instance): + version = int(instance.data.get("version")) + last_version = int(instance.data.get("lastVersion")) + + assert (version != last_version), "This workfile version is already in published: database: `{0}`, workfile: `{1}`".format(last_version, version) From c2124b212da8954e5cf97c619805e2f901bbce48 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 31 Jan 2020 19:54:05 +0100 Subject: [PATCH 084/434] add schemas --- schema/application-1.0.json | 68 ++++++++++++++++ schema/asset-1.0.json | 35 ++++++++ schema/asset-2.0.json | 55 +++++++++++++ schema/asset-3.0.json | 55 +++++++++++++ schema/config-1.0.json | 86 ++++++++++++++++++++ schema/container-1.0.json | 100 +++++++++++++++++++++++ schema/container-2.0.json | 59 ++++++++++++++ schema/inventory-1.0.json | 10 +++ schema/project-2.0.json | 86 ++++++++++++++++++++ schema/representation-1.0.json | 28 +++++++ schema/representation-2.0.json | 78 ++++++++++++++++++ schema/session-1.0.json | 143 +++++++++++++++++++++++++++++++++ schema/session-2.0.json | 142 ++++++++++++++++++++++++++++++++ schema/shaders-1.0.json | 32 ++++++++ schema/subset-1.0.json | 35 ++++++++ schema/subset-2.0.json | 51 ++++++++++++ schema/subset-3.0.json | 62 ++++++++++++++ schema/thumbnail-1.0.json | 42 ++++++++++ schema/version-1.0.json | 50 ++++++++++++ schema/version-2.0.json | 92 +++++++++++++++++++++ schema/version-3.0.json | 84 +++++++++++++++++++ 21 files changed, 1393 insertions(+) create mode 100644 schema/application-1.0.json create mode 100644 schema/asset-1.0.json create mode 100644 schema/asset-2.0.json create mode 100644 schema/asset-3.0.json create mode 100644 schema/config-1.0.json create mode 100644 schema/container-1.0.json create mode 100644 schema/container-2.0.json create mode 100644 schema/inventory-1.0.json create mode 100644 schema/project-2.0.json create mode 100644 schema/representation-1.0.json create mode 100644 schema/representation-2.0.json create mode 100644 schema/session-1.0.json create mode 100644 schema/session-2.0.json create mode 100644 schema/shaders-1.0.json create mode 100644 schema/subset-1.0.json create mode 100644 schema/subset-2.0.json create mode 100644 schema/subset-3.0.json create mode 100644 schema/thumbnail-1.0.json create mode 100644 schema/version-1.0.json create mode 100644 schema/version-2.0.json create mode 100644 schema/version-3.0.json diff --git a/schema/application-1.0.json b/schema/application-1.0.json new file mode 100644 index 0000000000..e2418037c6 --- /dev/null +++ b/schema/application-1.0.json @@ -0,0 +1,68 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:application-1.0", + "description": "An application definition.", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "label", + "application_dir", + "executable" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "label": { + "description": "Nice name of application.", + "type": "string" + }, + "application_dir": { + "description": "Name of directory used for application resources.", + "type": "string" + }, + "executable": { + "description": "Name of callable executable, this is called to launch the application", + "type": "string" + }, + "description": { + "description": "Description of application.", + "type": "string" + }, + "environment": { + "description": "Key/value pairs for environment variables related to this application. Supports lists for paths, such as PYTHONPATH.", + "type": "object", + "items": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + } + }, + "default_dirs": { + "type": "array", + "items": { + "type": "string" + } + }, + "copy": { + "type": "object", + "patternProperties": { + "^.*$": { + "anyOf": [ + {"type": "string"}, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + } +} diff --git a/schema/asset-1.0.json b/schema/asset-1.0.json new file mode 100644 index 0000000000..6f3665c628 --- /dev/null +++ b/schema/asset-1.0.json @@ -0,0 +1,35 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:asset-1.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "name", + "subsets" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "name": { + "description": "Name of directory", + "type": "string" + }, + "subsets": { + "type": "array", + "items": { + "$ref": "subset.json" + } + } + }, + + "definitions": {} +} \ No newline at end of file diff --git a/schema/asset-2.0.json b/schema/asset-2.0.json new file mode 100644 index 0000000000..066cb33498 --- /dev/null +++ b/schema/asset-2.0.json @@ -0,0 +1,55 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:asset-2.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "silo", + "data" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:asset-2.0"], + "example": "avalon-core:asset-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["asset"], + "example": "asset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of asset", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "Bruce" + }, + "silo": { + "description": "Group or container of asset", + "type": "string", + "example": "assets" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": {"key": "value"} + } + }, + + "definitions": {} +} diff --git a/schema/asset-3.0.json b/schema/asset-3.0.json new file mode 100644 index 0000000000..a3a22e917b --- /dev/null +++ b/schema/asset-3.0.json @@ -0,0 +1,55 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:asset-3.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:asset-3.0", "pype:asset-3.0"], + "example": "avalon-core:asset-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["asset"], + "example": "asset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of asset", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "Bruce" + }, + "silo": { + "description": "Group or container of asset", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "assets" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": {"key": "value"} + } + }, + + "definitions": {} +} diff --git a/schema/config-1.0.json b/schema/config-1.0.json new file mode 100644 index 0000000000..b3c4362f41 --- /dev/null +++ b/schema/config-1.0.json @@ -0,0 +1,86 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:config-1.0", + "description": "A project configuration.", + + "type": "object", + + "additionalProperties": false, + "required": [ + "template", + "tasks", + "apps" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "template": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^.*$": { + "type": "string" + } + } + }, + "tasks": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "group": {"type": "string"}, + "label": {"type": "string"} + }, + "required": ["name"] + } + }, + "apps": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "group": {"type": "string"}, + "label": {"type": "string"} + }, + "required": ["name"] + } + }, + "families": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "label": {"type": "string"}, + "hideFilter": {"type": "boolean"} + }, + "required": ["name"] + } + }, + "groups": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "color": {"type": "string"}, + "order": {"type": ["integer", "number"]} + }, + "required": ["name"] + } + }, + "copy": { + "type": "object" + } + } +} diff --git a/schema/container-1.0.json b/schema/container-1.0.json new file mode 100644 index 0000000000..d9e4e39f7f --- /dev/null +++ b/schema/container-1.0.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:container-1.0", + "description": "A loaded asset", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "id", + "objectName", + "name", + "author", + "loader", + "families", + "time", + "subset", + "asset", + "representation", + "version", + "silo", + "path", + "source" + ], + "properties": { + "id": { + "description": "Identifier for finding object in host", + "type": "string", + "enum": ["pyblish.mindbender.container"], + "example": "pyblish.mindbender.container" + }, + "objectName": { + "description": "Name of internal object, such as the objectSet in Maya.", + "type": "string", + "example": "Bruce_:rigDefault_CON" + }, + "name": { + "description": "Full name of application object", + "type": "string", + "example": "modelDefault" + }, + "author": { + "description": "Name of the author of the published version", + "type": "string", + "example": "Marcus Ottosson" + }, + "loader": { + "description": "Name of loader plug-in used to produce this container", + "type": "string", + "example": "ModelLoader" + }, + "families": { + "description": "Families associated with the this subset", + "type": "string", + "example": "mindbender.model" + }, + "time": { + "description": "File-system safe, formatted time", + "type": "string", + "example": "20170329T131545Z" + }, + "subset": { + "description": "Name of source subset", + "type": "string", + "example": "modelDefault" + }, + "asset": { + "description": "Name of source asset", + "type": "string" , + "example": "Bruce" + }, + "representation": { + "description": "Name of source representation", + "type": "string" , + "example": ".ma" + }, + "version": { + "description": "Version number", + "type": "number", + "example": 12 + }, + "silo": { + "description": "Silo of parent asset", + "type": "string", + "example": "assets" + }, + "path": { + "description": "Absolute path on disk", + "type": "string", + "example": "{root}/assets/Bruce/publish/rigDefault/v002" + }, + "source": { + "description": "Absolute path to file from which this version was published", + "type": "string", + "example": "{root}/assets/Bruce/work/rigging/maya/scenes/rig_v001.ma" + } + } +} diff --git a/schema/container-2.0.json b/schema/container-2.0.json new file mode 100644 index 0000000000..7b84209ea0 --- /dev/null +++ b/schema/container-2.0.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:container-2.0", + "description": "A loaded asset", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "id", + "objectName", + "name", + "namespace", + "loader", + "representation" + ], + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:container-2.0", "pype:container-2.0"], + "example": "pype:container-2.0" + }, + "id": { + "description": "Identifier for finding object in host", + "type": "string", + "enum": ["pyblish.avalon.container"], + "example": "pyblish.avalon.container" + }, + "objectName": { + "description": "Name of internal object, such as the objectSet in Maya.", + "type": "string", + "example": "Bruce_:rigDefault_CON" + }, + "loader": { + "description": "Name of loader plug-in used to produce this container", + "type": "string", + "example": "ModelLoader" + }, + "name": { + "description": "Internal object name of container in application", + "type": "string", + "example": "modelDefault_01" + }, + "namespace": { + "description": "Internal namespace of container in application", + "type": "string", + "example": "Bruce_" + }, + "representation": { + "description": "Unique id of representation in database", + "type": "string", + "example": "59523f355f8c1b5f6c5e8348" + } + } +} \ No newline at end of file diff --git a/schema/inventory-1.0.json b/schema/inventory-1.0.json new file mode 100644 index 0000000000..888ba7945a --- /dev/null +++ b/schema/inventory-1.0.json @@ -0,0 +1,10 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:config-1.0", + "description": "A project configuration.", + + "type": "object", + + "additionalProperties": true +} diff --git a/schema/project-2.0.json b/schema/project-2.0.json new file mode 100644 index 0000000000..ad0e460f4d --- /dev/null +++ b/schema/project-2.0.json @@ -0,0 +1,86 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:project-2.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "data", + "config" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:project-2.0", "pype:project-2.0"], + "example": "avalon-core:project-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["project"], + "example": "project" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "hulk" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": { + "fps": 24, + "width": 1920, + "height": 1080 + } + }, + "config": { + "type": "object", + "description": "Document metadata", + "example": { + "schema": "pype:config-1.0", + "apps": [ + { + "name": "maya2016", + "label": "Autodesk Maya 2016" + }, + { + "name": "nuke10", + "label": "The Foundry Nuke 10.0" + } + ], + "tasks": [ + {"name": "model"}, + {"name": "render"}, + {"name": "animate"}, + {"name": "rig"}, + {"name": "lookdev"}, + {"name": "layout"} + ], + "template": { + "work": + "{root}/{project}/{silo}/{asset}/work/{task}/{app}", + "publish": + "{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/{subset}.{representation}" + } + }, + "$ref": "config-1.0.json" + } + }, + + "definitions": {} +} diff --git a/schema/representation-1.0.json b/schema/representation-1.0.json new file mode 100644 index 0000000000..10ae72928e --- /dev/null +++ b/schema/representation-1.0.json @@ -0,0 +1,28 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:representation-1.0", + "description": "The inverse of an instance", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "format", + "path" + ], + + "properties": { + "schema": {"type": "string"}, + "format": { + "description": "File extension, including '.'", + "type": "string" + }, + "path": { + "description": "Unformatted path to version.", + "type": "string" + } + } +} diff --git a/schema/representation-2.0.json b/schema/representation-2.0.json new file mode 100644 index 0000000000..e12dea8564 --- /dev/null +++ b/schema/representation-2.0.json @@ -0,0 +1,78 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:representation-2.0", + "description": "The inverse of an instance", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:representation-2.0", "pype:representation-2.0"], + "example": "pype:representation-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["representation"], + "example": "representation" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of representation", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "abc" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": { + "label": "Alembic" + } + }, + "dependencies": { + "description": "Other representation that this representation depends on", + "type": "array", + "items": {"type": "string"}, + "example": [ + "592d547a5f8c1b388093c145" + ] + }, + "context": { + "description": "Summary of the context to which this representation belong.", + "type": "object", + "properties": { + "project": {"type": "object"}, + "asset": {"type": "string"}, + "silo": {"type": ["string", "null"]}, + "subset": {"type": "string"}, + "version": {"type": "number"}, + "representation": {"type": "string"} + }, + "example": { + "project": "hulk", + "asset": "Bruce", + "silo": "assets", + "subset": "rigDefault", + "version": 12, + "representation": "ma" + } + } + } +} diff --git a/schema/session-1.0.json b/schema/session-1.0.json new file mode 100644 index 0000000000..2b201f9c61 --- /dev/null +++ b/schema/session-1.0.json @@ -0,0 +1,143 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:session-1.0", + "description": "The Avalon environment", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "AVALON_PROJECTS", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_SILO", + "AVALON_CONFIG" + ], + + "properties": { + "AVALON_PROJECTS": { + "description": "Absolute path to root of project directories", + "type": "string", + "example": "/nas/projects" + }, + "AVALON_PROJECT": { + "description": "Name of project", + "type": "string", + "pattern": "^\\w*$", + "example": "Hulk" + }, + "AVALON_ASSET": { + "description": "Name of asset", + "type": "string", + "pattern": "^\\w*$", + "example": "Bruce" + }, + "AVALON_SILO": { + "description": "Name of asset group or container", + "type": "string", + "pattern": "^\\w*$", + "example": "assets" + }, + "AVALON_TASK": { + "description": "Name of task", + "type": "string", + "pattern": "^\\w*$", + "example": "modeling" + }, + "AVALON_CONFIG": { + "description": "Name of Avalon configuration", + "type": "string", + "pattern": "^\\w*$", + "example": "polly" + }, + "AVALON_APP": { + "description": "Name of application", + "type": "string", + "pattern": "^\\w*$", + "example": "maya2016" + }, + "AVALON_MONGO": { + "description": "Address to the asset database", + "type": "string", + "pattern": "^mongodb://[\\w/@:.]*$", + "example": "mongodb://localhost:27017", + "default": "mongodb://localhost:27017" + }, + "AVALON_DB": { + "description": "Name of database", + "type": "string", + "pattern": "^\\w*$", + "example": "avalon", + "default": "avalon" + }, + "AVALON_LABEL": { + "description": "Nice name of Avalon, used in e.g. graphical user interfaces", + "type": "string", + "example": "Mindbender", + "default": "Avalon" + }, + "AVALON_SENTRY": { + "description": "Address to Sentry", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "https://5b872b280de742919b115bdc8da076a5:8d278266fe764361b8fa6024af004a9c@logs.mindbender.com/2", + "default": null + }, + "AVALON_DEADLINE": { + "description": "Address to Deadline", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "http://192.168.99.101", + "default": null + }, + "AVALON_TIMEOUT": { + "description": "Wherever there is a need for a timeout, this is the default value.", + "type": "string", + "pattern": "^[0-9]*$", + "default": "1000", + "example": "1000" + }, + "AVALON_UPLOAD": { + "description": "Boolean of whether to upload published material to central asset repository", + "type": "string", + "default": null, + "example": "True" + }, + "AVALON_USERNAME": { + "description": "Generic username", + "type": "string", + "pattern": "^\\w*$", + "default": "avalon", + "example": "myself" + }, + "AVALON_PASSWORD": { + "description": "Generic password", + "type": "string", + "pattern": "^\\w*$", + "default": "secret", + "example": "abc123" + }, + "AVALON_INSTANCE_ID": { + "description": "Unique identifier for instances in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.instance", + "example": "avalon.instance" + }, + "AVALON_CONTAINER_ID": { + "description": "Unique identifier for a loaded representation in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.container", + "example": "avalon.container" + }, + "AVALON_DEBUG": { + "description": "Enable debugging mode. Some applications may use this for e.g. extended verbosity or mock plug-ins.", + "type": "string", + "default": null, + "example": "True" + } + } +} \ No newline at end of file diff --git a/schema/session-2.0.json b/schema/session-2.0.json new file mode 100644 index 0000000000..006a9e2dbf --- /dev/null +++ b/schema/session-2.0.json @@ -0,0 +1,142 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:session-2.0", + "description": "The Avalon environment", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "AVALON_PROJECTS", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_CONFIG" + ], + + "properties": { + "AVALON_PROJECTS": { + "description": "Absolute path to root of project directories", + "type": "string", + "example": "/nas/projects" + }, + "AVALON_PROJECT": { + "description": "Name of project", + "type": "string", + "pattern": "^\\w*$", + "example": "Hulk" + }, + "AVALON_ASSET": { + "description": "Name of asset", + "type": "string", + "pattern": "^\\w*$", + "example": "Bruce" + }, + "AVALON_SILO": { + "description": "Name of asset group or container", + "type": "string", + "pattern": "^\\w*$", + "example": "assets" + }, + "AVALON_TASK": { + "description": "Name of task", + "type": "string", + "pattern": "^\\w*$", + "example": "modeling" + }, + "AVALON_CONFIG": { + "description": "Name of Avalon configuration", + "type": "string", + "pattern": "^\\w*$", + "example": "polly" + }, + "AVALON_APP": { + "description": "Name of application", + "type": "string", + "pattern": "^\\w*$", + "example": "maya2016" + }, + "AVALON_MONGO": { + "description": "Address to the asset database", + "type": "string", + "pattern": "^mongodb://[\\w/@:.]*$", + "example": "mongodb://localhost:27017", + "default": "mongodb://localhost:27017" + }, + "AVALON_DB": { + "description": "Name of database", + "type": "string", + "pattern": "^\\w*$", + "example": "avalon", + "default": "avalon" + }, + "AVALON_LABEL": { + "description": "Nice name of Avalon, used in e.g. graphical user interfaces", + "type": "string", + "example": "Mindbender", + "default": "Avalon" + }, + "AVALON_SENTRY": { + "description": "Address to Sentry", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "https://5b872b280de742919b115bdc8da076a5:8d278266fe764361b8fa6024af004a9c@logs.mindbender.com/2", + "default": null + }, + "AVALON_DEADLINE": { + "description": "Address to Deadline", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "http://192.168.99.101", + "default": null + }, + "AVALON_TIMEOUT": { + "description": "Wherever there is a need for a timeout, this is the default value.", + "type": "string", + "pattern": "^[0-9]*$", + "default": "1000", + "example": "1000" + }, + "AVALON_UPLOAD": { + "description": "Boolean of whether to upload published material to central asset repository", + "type": "string", + "default": null, + "example": "True" + }, + "AVALON_USERNAME": { + "description": "Generic username", + "type": "string", + "pattern": "^\\w*$", + "default": "avalon", + "example": "myself" + }, + "AVALON_PASSWORD": { + "description": "Generic password", + "type": "string", + "pattern": "^\\w*$", + "default": "secret", + "example": "abc123" + }, + "AVALON_INSTANCE_ID": { + "description": "Unique identifier for instances in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.instance", + "example": "avalon.instance" + }, + "AVALON_CONTAINER_ID": { + "description": "Unique identifier for a loaded representation in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.container", + "example": "avalon.container" + }, + "AVALON_DEBUG": { + "description": "Enable debugging mode. Some applications may use this for e.g. extended verbosity or mock plug-ins.", + "type": "string", + "default": null, + "example": "True" + } + } +} diff --git a/schema/shaders-1.0.json b/schema/shaders-1.0.json new file mode 100644 index 0000000000..e66cc735e8 --- /dev/null +++ b/schema/shaders-1.0.json @@ -0,0 +1,32 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:shaders-1.0", + "description": "Relationships between shaders and Avalon IDs", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "shader" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "shader": { + "description": "Name of directory", + "type": "array", + "items": { + "type": "str", + "description": "Avalon ID and optional face indexes, e.g. 'f9520572-ac1d-11e6-b39e-3085a99791c9.f[5002:5185]'" + } + } + }, + + "definitions": {} +} diff --git a/schema/subset-1.0.json b/schema/subset-1.0.json new file mode 100644 index 0000000000..90ae0349fa --- /dev/null +++ b/schema/subset-1.0.json @@ -0,0 +1,35 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:subset-1.0", + "description": "A container of instances", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "name", + "versions" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "name": { + "description": "Name of directory", + "type": "string" + }, + "versions": { + "type": "array", + "items": { + "$ref": "version.json" + } + } + }, + + "definitions": {} +} \ No newline at end of file diff --git a/schema/subset-2.0.json b/schema/subset-2.0.json new file mode 100644 index 0000000000..98f39c4f3e --- /dev/null +++ b/schema/subset-2.0.json @@ -0,0 +1,51 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:subset-2.0", + "description": "A container of instances", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["pype:subset-2.0"], + "example": "pype:subset-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["subset"], + "example": "subset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "shot01" + }, + "data": { + "type": "object", + "description": "Document metadata", + "example": { + "frameStart": 1000, + "frameEnd": 1201 + } + } + } +} diff --git a/schema/subset-3.0.json b/schema/subset-3.0.json new file mode 100644 index 0000000000..a0af9d340f --- /dev/null +++ b/schema/subset-3.0.json @@ -0,0 +1,62 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:subset-3.0", + "description": "A container of instances", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["avalon-core:subset-3.0", "pype:subset-3.0"], + "example": "pype:subset-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["subset"], + "example": "subset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "shot01" + }, + "data": { + "description": "Document metadata", + "type": "object", + "required": ["families"], + "properties": { + "families": { + "type": "array", + "items": {"type": "string"}, + "description": "One or more families associated with this subset" + } + }, + "example": { + "families" : [ + "avalon.camera" + ], + "frameStart": 1000, + "frameEnd": 1201 + } + } + } +} diff --git a/schema/thumbnail-1.0.json b/schema/thumbnail-1.0.json new file mode 100644 index 0000000000..96b540ab7e --- /dev/null +++ b/schema/thumbnail-1.0.json @@ -0,0 +1,42 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:thumbnail-1.0", + "description": "Entity with thumbnail data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["pype:thumbnail-1.0"], + "example": "pype:thumbnail-1.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["thumbnail"], + "example": "thumbnail" + }, + "data": { + "description": "Thumbnail data", + "type": "object", + "example": { + "binary_data": "Binary({byte data of image})", + "template": "{thumbnail_root}/{project[name]}/{_id}{ext}}", + "template_data": { + "ext": ".jpg" + } + } + } + } +} diff --git a/schema/version-1.0.json b/schema/version-1.0.json new file mode 100644 index 0000000000..c784a25175 --- /dev/null +++ b/schema/version-1.0.json @@ -0,0 +1,50 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:version-1.0", + "description": "An individual version", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "version", + "path", + "time", + "author", + "source", + "representations" + ], + + "properties": { + "schema": {"type": "string"}, + "representations": { + "type": "array", + "items": { + "$ref": "representation.json" + } + }, + "time": { + "description": "ISO formatted, file-system compatible time", + "type": "string" + }, + "author": { + "description": "User logged on to the machine at time of publish", + "type": "string" + }, + "version": { + "description": "Number of this version", + "type": "number" + }, + "path": { + "description": "Unformatted path, e.g. '{root}/assets/Bruce/publish/lookdevDefault/v001", + "type": "string" + }, + "source": { + "description": "Original file from which this version was made.", + "type": "string" + } + } +} diff --git a/schema/version-2.0.json b/schema/version-2.0.json new file mode 100644 index 0000000000..5bb4a56f96 --- /dev/null +++ b/schema/version-2.0.json @@ -0,0 +1,92 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:version-2.0", + "description": "An individual version", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["pype:version-2.0"], + "example": "pype:version-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["version"], + "example": "version" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Number of version", + "type": "number", + "example": 12 + }, + "locations": { + "description": "Where on the planet this version can be found.", + "type": "array", + "items": {"type": "string"}, + "example": ["data.avalon.com"] + }, + "data": { + "description": "Document metadata", + "type": "object", + "required": ["families", "author", "source", "time"], + "properties": { + "time": { + "description": "ISO formatted, file-system compatible time", + "type": "string" + }, + "timeFormat": { + "description": "ISO format of time", + "type": "string" + }, + "author": { + "description": "User logged on to the machine at time of publish", + "type": "string" + }, + "version": { + "description": "Number of this version", + "type": "number" + }, + "path": { + "description": "Unformatted path, e.g. '{root}/assets/Bruce/publish/lookdevDefault/v001", + "type": "string" + }, + "source": { + "description": "Original file from which this version was made.", + "type": "string" + }, + "families": { + "type": "array", + "items": {"type": "string"}, + "description": "One or more families associated with this version" + } + }, + "example": { + "source" : "{root}/f02_prod/assets/BubbleWitch/work/modeling/marcus/maya/scenes/model_v001.ma", + "author" : "marcus", + "families" : [ + "avalon.model" + ], + "time" : "20170510T090203Z" + } + } + } +} diff --git a/schema/version-3.0.json b/schema/version-3.0.json new file mode 100644 index 0000000000..808650da0d --- /dev/null +++ b/schema/version-3.0.json @@ -0,0 +1,84 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:version-3.0", + "description": "An individual version", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["avalon-core:version-3.0", "pype:version-3.0"], + "example": "pype:version-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["version"], + "example": "version" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Number of version", + "type": "number", + "example": 12 + }, + "locations": { + "description": "Where on the planet this version can be found.", + "type": "array", + "items": {"type": "string"}, + "example": ["data.avalon.com"] + }, + "data": { + "description": "Document metadata", + "type": "object", + "required": ["author", "source", "time"], + "properties": { + "time": { + "description": "ISO formatted, file-system compatible time", + "type": "string" + }, + "timeFormat": { + "description": "ISO format of time", + "type": "string" + }, + "author": { + "description": "User logged on to the machine at time of publish", + "type": "string" + }, + "version": { + "description": "Number of this version", + "type": "number" + }, + "path": { + "description": "Unformatted path, e.g. '{root}/assets/Bruce/publish/lookdevDefault/v001", + "type": "string" + }, + "source": { + "description": "Original file from which this version was made.", + "type": "string" + } + }, + "example": { + "source" : "{root}/f02_prod/assets/BubbleWitch/work/modeling/marcus/maya/scenes/model_v001.ma", + "author" : "marcus", + "time" : "20170510T090203Z" + } + } + } +} From bcd8bd742d631a207c8d2ddb475af3f9e5902f1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Fri, 31 Jan 2020 20:02:42 +0100 Subject: [PATCH 085/434] wip json file assumed files --- .../global/publish/collect_filesequences.py | 17 +++----- .../global/publish/submit_publish_job.py | 42 +++++++++++++++++++ 2 files changed, 47 insertions(+), 12 deletions(-) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 6c06229304..305604ae00 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -72,9 +72,9 @@ def collect(root, class CollectRenderedFrames(pyblish.api.ContextPlugin): """Gather file sequences from working directory - When "FILESEQUENCE" environment variable is set these paths (folders or - .json files) are parsed for image sequences. Otherwise the current - working directory is searched for file sequences. + When "PYPE_PUBLISH_PATHS" environment variable is set these paths + (folders or .json files) are parsed for image sequences. + Otherwise the current working directory is searched for file sequences. The json configuration may have the optional keys: asset (str): The asset to publish to. If not provided fall back to @@ -101,7 +101,6 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): lut_path = None slate_frame = None families_data = None - baked_mov_path = None subset = None version = None frame_start = 0 @@ -167,8 +166,6 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): families_data = instance.get("families") slate_frame = instance.get("slateFrame") version = instance.get("version") - - else: # Search in directory data = dict() @@ -199,10 +196,6 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): fps = data.get("fps", 25) - # adding publish comment and intent to context - context.data["comment"] = data.get("comment", "") - context.data["intent"] = data.get("intent", "") - if data.get("user"): context.data["user"] = data["user"] @@ -266,6 +259,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): "anatomy_template": "render", "fps": fps, "tags": ["review"], + "frameStart": frame_start, + "frameEnd": frame_end } instance.data["representations"].append( representation) @@ -428,8 +423,6 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): "name": ext, "ext": "{}".format(ext), "files": list(collection), - "frameStart": start, - "frameEnd": end, "stagingDir": root, "anatomy_template": "render", "fps": fps, diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 0008123509..48efbcde7a 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -253,7 +253,49 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): :param instance: Instance data :type instance: dict + + Data needed for collect_filesequences: + + root + asset * + source * + frameStart + frameEnd + subset + ftrack + fps + user + version * + attachTo *: + subset + version + regex ! + exclude_regex ! + + metadata: + session * + instance *: + family + pixelAspect * + resolutionWidth + resolutionHeight + lutPath * + bakeRenderPath + families + slateFrame + version + representations: + name + ext + files": []] + frameStart + frameEnd + stagingDir + anatomy_template + fps + tags """ + # Get a submission job data = instance.data.copy() render_job = data.pop("deadlineSubmissionJob", None) From ae59f724a1a429e72ee3c6a3238e6a320f780815 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 31 Jan 2020 20:07:40 +0100 Subject: [PATCH 086/434] clean(nk): removing unneeded files --- .../_load_unused/extract_write_next_render.py | 24 ---- pype/plugins/nuke/_load_unused/load_backdrop | 0 .../_publish_unused/collect_active_viewer.py | 14 --- .../nuke/_publish_unused/extract_frames.py | 22 ---- .../_publish_unused/extract_nuke_write.py | 116 ------------------ .../nuke/_publish_unused/extract_script.py | 40 ------ .../_publish_unused/integrate_staging_dir.py | 27 ---- .../publish_image_sequences.py | 98 --------------- .../_publish_unused/validate_active_viewer.py | 24 ---- .../_publish_unused/validate_version_match.py | 36 ------ .../validate_write_families.py | 59 --------- .../nukestudio/publish/validate_version.py | 79 ------------ 12 files changed, 539 deletions(-) delete mode 100644 pype/plugins/nuke/_load_unused/extract_write_next_render.py delete mode 100644 pype/plugins/nuke/_load_unused/load_backdrop delete mode 100644 pype/plugins/nuke/_publish_unused/collect_active_viewer.py delete mode 100644 pype/plugins/nuke/_publish_unused/extract_frames.py delete mode 100644 pype/plugins/nuke/_publish_unused/extract_nuke_write.py delete mode 100644 pype/plugins/nuke/_publish_unused/extract_script.py delete mode 100644 pype/plugins/nuke/_publish_unused/integrate_staging_dir.py delete mode 100644 pype/plugins/nuke/_publish_unused/publish_image_sequences.py delete mode 100644 pype/plugins/nuke/_publish_unused/validate_active_viewer.py delete mode 100644 pype/plugins/nuke/_publish_unused/validate_version_match.py delete mode 100644 pype/plugins/nuke/_publish_unused/validate_write_families.py delete mode 100644 pype/plugins/nukestudio/publish/validate_version.py diff --git a/pype/plugins/nuke/_load_unused/extract_write_next_render.py b/pype/plugins/nuke/_load_unused/extract_write_next_render.py deleted file mode 100644 index 40bfe59ec2..0000000000 --- a/pype/plugins/nuke/_load_unused/extract_write_next_render.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyblish.api - - -class WriteToRender(pyblish.api.InstancePlugin): - """Swith Render knob on write instance to on, - so next time publish will be set to render - """ - - order = pyblish.api.ExtractorOrder + 0.1 - label = "Write to render next" - optional = True - hosts = ["nuke", "nukeassist"] - families = ["write"] - - def process(self, instance): - return - if [f for f in instance.data["families"] - if ".frames" in f]: - instance[0]["render"].setValue(True) - self.log.info("Swith write node render to `on`") - else: - # swith to - instance[0]["render"].setValue(False) - self.log.info("Swith write node render to `Off`") diff --git a/pype/plugins/nuke/_load_unused/load_backdrop b/pype/plugins/nuke/_load_unused/load_backdrop deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pype/plugins/nuke/_publish_unused/collect_active_viewer.py b/pype/plugins/nuke/_publish_unused/collect_active_viewer.py deleted file mode 100644 index 5a6cc02b88..0000000000 --- a/pype/plugins/nuke/_publish_unused/collect_active_viewer.py +++ /dev/null @@ -1,14 +0,0 @@ -import pyblish.api -import nuke - - -class CollectActiveViewer(pyblish.api.ContextPlugin): - """Collect any active viewer from nodes - """ - - order = pyblish.api.CollectorOrder + 0.3 - label = "Collect Active Viewer" - hosts = ["nuke"] - - def process(self, context): - context.data["ActiveViewer"] = nuke.activeViewer() diff --git a/pype/plugins/nuke/_publish_unused/extract_frames.py b/pype/plugins/nuke/_publish_unused/extract_frames.py deleted file mode 100644 index b75f893802..0000000000 --- a/pype/plugins/nuke/_publish_unused/extract_frames.py +++ /dev/null @@ -1,22 +0,0 @@ -import pyblish - - -class ExtractFramesToIntegrate(pyblish.api.InstancePlugin): - """Extract rendered frames for integrator - """ - - order = pyblish.api.ExtractorOrder - label = "Extract rendered frames" - hosts = ["nuke"] - families = ["render"] - - def process(self, instance\ - return - - # staging_dir = instance.data.get('stagingDir', None) - # output_dir = instance.data.get('outputDir', None) - # - # if not staging_dir: - # staging_dir = output_dir - # instance.data['stagingDir'] = staging_dir - # # instance.data['transfer'] = False diff --git a/pype/plugins/nuke/_publish_unused/extract_nuke_write.py b/pype/plugins/nuke/_publish_unused/extract_nuke_write.py deleted file mode 100644 index 155b5cf56d..0000000000 --- a/pype/plugins/nuke/_publish_unused/extract_nuke_write.py +++ /dev/null @@ -1,116 +0,0 @@ -import os - -import nuke -import pyblish.api - - -class Extract(pyblish.api.InstancePlugin): - """Super class for write and writegeo extractors.""" - - order = pyblish.api.ExtractorOrder - optional = True - label = "Extract Nuke [super]" - hosts = ["nuke"] - match = pyblish.api.Subset - - # targets = ["process.local"] - - def execute(self, instance): - # Get frame range - node = instance[0] - first_frame = nuke.root()["first_frame"].value() - last_frame = nuke.root()["last_frame"].value() - - if node["use_limit"].value(): - first_frame = node["first"].value() - last_frame = node["last"].value() - - # Render frames - nuke.execute(node.name(), int(first_frame), int(last_frame)) - - -class ExtractNukeWrite(Extract): - """ Extract output from write nodes. """ - - families = ["write", "local"] - label = "Extract Write" - - def process(self, instance): - - self.execute(instance) - - # Validate output - for filename in list(instance.data["collection"]): - if not os.path.exists(filename): - instance.data["collection"].remove(filename) - self.log.warning("\"{0}\" didn't render.".format(filename)) - - -class ExtractNukeCache(Extract): - - label = "Cache" - families = ["cache", "local"] - - def process(self, instance): - - self.execute(instance) - - # Validate output - msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) - assert os.path.exists(instance.data["output_path"]), msg - - -class ExtractNukeCamera(Extract): - - label = "Camera" - families = ["camera", "local"] - - def process(self, instance): - - node = instance[0] - node["writeGeometries"].setValue(False) - node["writePointClouds"].setValue(False) - node["writeAxes"].setValue(False) - - file_path = node["file"].getValue() - node["file"].setValue(instance.data["output_path"]) - - self.execute(instance) - - node["writeGeometries"].setValue(True) - node["writePointClouds"].setValue(True) - node["writeAxes"].setValue(True) - - node["file"].setValue(file_path) - - # Validate output - msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) - assert os.path.exists(instance.data["output_path"]), msg - - -class ExtractNukeGeometry(Extract): - - label = "Geometry" - families = ["geometry", "local"] - - def process(self, instance): - - node = instance[0] - node["writeCameras"].setValue(False) - node["writePointClouds"].setValue(False) - node["writeAxes"].setValue(False) - - file_path = node["file"].getValue() - node["file"].setValue(instance.data["output_path"]) - - self.execute(instance) - - node["writeCameras"].setValue(True) - node["writePointClouds"].setValue(True) - node["writeAxes"].setValue(True) - - node["file"].setValue(file_path) - - # Validate output - msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) - assert os.path.exists(instance.data["output_path"]), msg diff --git a/pype/plugins/nuke/_publish_unused/extract_script.py b/pype/plugins/nuke/_publish_unused/extract_script.py deleted file mode 100644 index 7d55ea0da4..0000000000 --- a/pype/plugins/nuke/_publish_unused/extract_script.py +++ /dev/null @@ -1,40 +0,0 @@ - -import pyblish.api -import os -import pype -import shutil - - -class ExtractScript(pype.api.Extractor): - """Publish script - """ - label = 'Extract Script' - order = pyblish.api.ExtractorOrder - 0.05 - optional = True - hosts = ['nuke'] - families = ["workfile"] - - def process(self, instance): - self.log.debug("instance extracting: {}".format(instance.data)) - current_script = instance.context.data["currentFile"] - - # Define extract output file path - stagingdir = self.staging_dir(instance) - filename = "{0}".format(instance.data["name"]) - path = os.path.join(stagingdir, filename) - - self.log.info("Performing extraction..") - shutil.copy(current_script, path) - - if "representations" not in instance.data: - instance.data["representations"] = list() - - representation = { - 'name': 'nk', - 'ext': '.nk', - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.info("Extracted instance '%s' to: %s" % (instance.name, path)) diff --git a/pype/plugins/nuke/_publish_unused/integrate_staging_dir.py b/pype/plugins/nuke/_publish_unused/integrate_staging_dir.py deleted file mode 100644 index e05c42ae50..0000000000 --- a/pype/plugins/nuke/_publish_unused/integrate_staging_dir.py +++ /dev/null @@ -1,27 +0,0 @@ -import pyblish.api -import shutil -import os - - -class CopyStagingDir(pyblish.api.InstancePlugin): - """Copy data rendered into temp local directory - """ - - order = pyblish.api.IntegratorOrder - 2 - label = "Copy data from temp dir" - hosts = ["nuke", "nukeassist"] - families = ["render.local"] - - def process(self, instance): - temp_dir = instance.data.get("stagingDir") - output_dir = instance.data.get("outputDir") - - # copy data to correct dir - if not os.path.exists(output_dir): - os.makedirs(output_dir) - self.log.info("output dir has been created") - - for f in os.listdir(temp_dir): - self.log.info("copy file to correct destination: {}".format(f)) - shutil.copy(os.path.join(temp_dir, os.path.basename(f)), - os.path.join(output_dir, os.path.basename(f))) diff --git a/pype/plugins/nuke/_publish_unused/publish_image_sequences.py b/pype/plugins/nuke/_publish_unused/publish_image_sequences.py deleted file mode 100644 index 34634dcc6b..0000000000 --- a/pype/plugins/nuke/_publish_unused/publish_image_sequences.py +++ /dev/null @@ -1,98 +0,0 @@ -import re -import os -import json -import subprocess - -import pyblish.api - -from pype.action import get_errored_plugins_from_data - - -def _get_script(): - """Get path to the image sequence script""" - - # todo: use a more elegant way to get the python script - - try: - from pype.fusion.scripts import publish_filesequence - except Exception: - raise RuntimeError("Expected module 'publish_imagesequence'" - "to be available") - - module_path = publish_filesequence.__file__ - if module_path.endswith(".pyc"): - module_path = module_path[:-len(".pyc")] + ".py" - - return module_path - - -class PublishImageSequence(pyblish.api.InstancePlugin): - """Publish the generated local image sequences.""" - - order = pyblish.api.IntegratorOrder - label = "Publish Rendered Image Sequence(s)" - hosts = ["fusion"] - families = ["saver.renderlocal"] - - def process(self, instance): - - # Skip this plug-in if the ExtractImageSequence failed - errored_plugins = get_errored_plugins_from_data(instance.context) - if any(plugin.__name__ == "FusionRenderLocal" for plugin in - errored_plugins): - raise RuntimeError("Fusion local render failed, " - "publishing images skipped.") - - subset = instance.data["subset"] - ext = instance.data["ext"] - - # Regex to match resulting renders - regex = "^{subset}.*[0-9]+{ext}+$".format(subset=re.escape(subset), - ext=re.escape(ext)) - - # The instance has most of the information already stored - metadata = { - "regex": regex, - "frameStart": instance.context.data["frameStart"], - "frameEnd": instance.context.data["frameEnd"], - "families": ["imagesequence"], - } - - # Write metadata and store the path in the instance - output_directory = instance.data["outputDir"] - path = os.path.join(output_directory, - "{}_metadata.json".format(subset)) - with open(path, "w") as f: - json.dump(metadata, f) - - assert os.path.isfile(path), ("Stored path is not a file for %s" - % instance.data["name"]) - - # Suppress any subprocess console - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - startupinfo.wShowWindow = subprocess.SW_HIDE - - process = subprocess.Popen(["python", _get_script(), - "--paths", path], - bufsize=1, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - startupinfo=startupinfo) - - while True: - output = process.stdout.readline() - # Break when there is no output or a return code has been given - if output == '' and process.poll() is not None: - process.stdout.close() - break - if output: - line = output.strip() - if line.startswith("ERROR"): - self.log.error(line) - else: - self.log.info(line) - - if process.returncode != 0: - raise RuntimeError("Process quit with non-zero " - "return code: {}".format(process.returncode)) diff --git a/pype/plugins/nuke/_publish_unused/validate_active_viewer.py b/pype/plugins/nuke/_publish_unused/validate_active_viewer.py deleted file mode 100644 index 618a7f1502..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_active_viewer.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyblish.api -import nuke - - -class ValidateActiveViewer(pyblish.api.ContextPlugin): - """Validate presentse of the active viewer from nodes - """ - - order = pyblish.api.ValidatorOrder - label = "Validate Active Viewer" - hosts = ["nuke"] - - def process(self, context): - viewer_process_node = context.data.get("ViewerProcess") - - assert viewer_process_node, ( - "Missing active viewer process! Please click on output write node and push key number 1-9" - ) - active_viewer = context.data["ActiveViewer"] - active_input = active_viewer.activeInput() - - assert active_input is not None, ( - "Missing active viewer input! Please click on output write node and push key number 1-9" - ) diff --git a/pype/plugins/nuke/_publish_unused/validate_version_match.py b/pype/plugins/nuke/_publish_unused/validate_version_match.py deleted file mode 100644 index 1358d9a7b3..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_version_match.py +++ /dev/null @@ -1,36 +0,0 @@ -import os -import pyblish.api -import pype.utils - - - -@pyblish.api.log -class RepairNukeWriteNodeVersionAction(pyblish.api.Action): - label = "Repair" - on = "failed" - icon = "wrench" - - def process(self, context, plugin): - import pype.nuke.lib as nukelib - instances = pype.utils.filter_instances(context, plugin) - - for instance in instances: - node = instance[0] - render_path = nukelib.get_render_path(node) - self.log.info("render_path: {}".format(render_path)) - node['file'].setValue(render_path.replace("\\", "/")) - - -class ValidateVersionMatch(pyblish.api.InstancePlugin): - """Checks if write version matches workfile version""" - - label = "Validate Version Match" - order = pyblish.api.ValidatorOrder - actions = [RepairNukeWriteNodeVersionAction] - hosts = ["nuke"] - families = ['write'] - - def process(self, instance): - - assert instance.data['version'] == instance.context.data['version'], "\ - Version in write doesn't match version of the workfile" diff --git a/pype/plugins/nuke/_publish_unused/validate_write_families.py b/pype/plugins/nuke/_publish_unused/validate_write_families.py deleted file mode 100644 index 73f710867d..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_write_families.py +++ /dev/null @@ -1,59 +0,0 @@ - -import pyblish.api -import pype.api -import pype.nuke.actions - - -class RepairWriteFamiliesAction(pyblish.api.Action): - label = "Fix Write's render attributes" - on = "failed" - icon = "wrench" - - def process(self, instance, plugin): - self.log.info("instance {}".format(instance)) - instance["render"].setValue(True) - self.log.info("Rendering toggled ON") - - -@pyblish.api.log -class ValidateWriteFamilies(pyblish.api.InstancePlugin): - """ Validates write families. """ - - order = pyblish.api.ValidatorOrder - label = "Valitade writes families" - hosts = ["nuke"] - families = ["write"] - actions = [pype.nuke.actions.SelectInvalidAction, pype.api.RepairAction] - - @staticmethod - def get_invalid(self, instance): - if not [f for f in instance.data["families"] - if ".frames" in f]: - return - - if not instance.data.get('files'): - return (instance) - - def process(self, instance): - self.log.debug('instance.data["files"]: {}'.format(instance.data['files'])) - - invalid = self.get_invalid(self, instance) - - if invalid: - raise ValueError(str("`{}`: Switch `Render` on! " - "> {}".format(__name__, invalid))) - - # if any(".frames" in f for f in instance.data["families"]): - # if not instance.data["files"]: - # raise ValueError("instance {} is set to publish frames\ - # but no files were collected, render the frames first or\ - # check 'render' checkbox onthe no to 'ON'".format(instance))) - # - # - # self.log.info("Checked correct writes families") - - @classmethod - def repair(cls, instance): - cls.log.info("instance {}".format(instance)) - instance[0]["render"].setValue(True) - cls.log.info("Rendering toggled ON") diff --git a/pype/plugins/nukestudio/publish/validate_version.py b/pype/plugins/nukestudio/publish/validate_version.py deleted file mode 100644 index ebb8f357f8..0000000000 --- a/pype/plugins/nukestudio/publish/validate_version.py +++ /dev/null @@ -1,79 +0,0 @@ -import pyblish -from avalon import io -from pype.action import get_errored_instances_from_context -import pype.api as pype - - -@pyblish.api.log -class RepairNukestudioVersionUp(pyblish.api.Action): - label = "Version Up Workfile" - on = "failed" - icon = "wrench" - - def process(self, context, plugin): - - errored_instances = get_errored_instances_from_context(context) - - # Apply pyblish logic to get the instances for the plug-in - instances = pyblish.api.instances_by_plugin(errored_instances, plugin) - - if instances: - project = context.data["activeProject"] - path = context.data.get("currentFile") - - new_path = pype.version_up(path) - - if project: - project.saveAs(new_path) - - self.log.info("Project workfile version was fixed") - - -class ValidateVersion(pyblish.api.InstancePlugin): - """Validate clip's versions. - - """ - - order = pyblish.api.ValidatorOrder - families = ["plate"] - label = "Validate Version" - actions = [RepairNukestudioVersionUp] - hosts = ["nukestudio"] - - def process(self, instance): - version = int(instance.data.get("version", 0)) - asset_name = instance.data.get("asset", None) - subset_name = instance.data.get("subset", None) - - assert version, "The file is missing version string! example: filename_v001.hrox `{}`" - - self.log.debug("Collected version: `{0}`".format(version)) - - found_v = 0 - try: - io.install() - project = io.find_one({"type": "project"}) - - asset = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project["_id"] - }) - - subset = io.find_one({ - "type": "subset", - "parent": asset["_id"], - "name": subset_name - }) - - version_db = io.find_one({ - 'type': 'version', - 'parent': subset["_id"], - 'name': version - }) or {} - found_v = version_db.get("name", 0) - self.log.debug("Found version: `{0}`".format(found_v)) - except Exception as e: - self.log.debug("Problem to get data from database for asset `{0}` subset `{1}`. Error: `{2}`".format(asset_name, subset_name, e)) - - assert (found_v != version), "Version must not be the same as in database `{0}`, Versions file: `{1}`, db: `{2}`".format(asset_name, version, found_v) From fb710bbef45ebebfcac50b9cf9dc69651e0ab9e1 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 31 Jan 2020 18:58:51 +0100 Subject: [PATCH 087/434] fix(nuke): remove commented code --- pype/nuke/__init__.py | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py index dfd61f4b39..f1f87e40c8 100644 --- a/pype/nuke/__init__.py +++ b/pype/nuke/__init__.py @@ -33,42 +33,6 @@ if os.getenv("PYBLISH_GUI", None): pyblish.register_gui(os.getenv("PYBLISH_GUI", None)) -# class NukeHandler(logging.Handler): -# ''' -# Nuke Handler - emits logs into nuke's script editor. -# warning will emit nuke.warning() -# critical and fatal would popup msg dialog to alert of the error. -# ''' -# -# def __init__(self): -# logging.Handler.__init__(self) -# self.set_name("Pype_Nuke_Handler") -# -# def emit(self, record): -# # Formated message: -# msg = self.format(record) -# -# if record.levelname.lower() in [ -# # "warning", -# "critical", -# "fatal", -# "error" -# ]: -# msg = self.format(record) -# nuke.message(msg) -# -# -# '''Adding Nuke Logging Handler''' -# log.info([handler.get_name() for handler in logging.root.handlers[:]]) -# nuke_handler = NukeHandler() -# if nuke_handler.get_name() \ -# not in [handler.get_name() -# for handler in logging.root.handlers[:]]: -# logging.getLogger().addHandler(nuke_handler) -# logging.getLogger().setLevel(logging.INFO) -# log.info([handler.get_name() for handler in logging.root.handlers[:]]) - - def reload_config(): """Attempt to reload pipeline at run-time. From 8e9b44817359f02f30eecbd775bc500e3f492cbe Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 31 Jan 2020 21:06:16 +0100 Subject: [PATCH 088/434] get padding from anatomy --- pype/plugins/global/publish/integrate_new.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 2b8aa5b0fc..739cbc30ad 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -326,8 +326,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): index_frame_start = None if repre.get("frameStart"): - frame_start_padding = len(str( - repre.get("frameEnd"))) + frame_start_padding = anatomy.templates["render"]["padding"] index_frame_start = int(repre.get("frameStart")) # exception for slate workflow From 5bceb794413381d7c1a1378de87cb9efaade4041 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 31 Jan 2020 22:48:29 +0100 Subject: [PATCH 089/434] hotfix submit publish job --- pype/plugins/global/publish/submit_publish_job.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index faf4aaef93..a9fa8febd4 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -256,6 +256,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ # Get a submission job data = instance.data.copy() + if hasattr(instance, "_log"): + data['_log'] = instance._log render_job = data.pop("deadlineSubmissionJob", None) submission_type = "deadline" From dc497be92313401bc243127d6cf47c7c230b58a8 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Sat, 1 Feb 2020 00:16:20 +0100 Subject: [PATCH 090/434] feat(nuke): anatomy templates and version data family to render --- pype/nuke/lib.py | 2 +- pype/plugins/nuke/publish/collect_writes.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index db1a5919c3..3b3586cfe6 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -1269,7 +1269,7 @@ class ExporterReview: 'ext': self.ext, 'files': self.file, "stagingDir": self.staging_dir, - "anatomy_template": "publish", + "anatomy_template": "render", "tags": [self.name.replace("_", "-")] + add_tags } diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 3eff527d47..8e86e12c2a 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -97,7 +97,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "frameEnd": last_frame - handle_end, "version": int(instance.data['version']), "colorspace": node["colorspace"].value(), - "families": [instance.data["family"]], + "families": ["render"], "subset": instance.data["subset"], "fps": instance.context.data["fps"] } From de93ef648ea3d99f0a390113da7f59ba317b73a9 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Sat, 1 Feb 2020 00:17:03 +0100 Subject: [PATCH 091/434] feat(nuke): adding render2d for review --- pype/plugins/nuke/publish/collect_review.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nuke/publish/collect_review.py b/pype/plugins/nuke/publish/collect_review.py index 7e7cbedd6c..e41b55bbbd 100644 --- a/pype/plugins/nuke/publish/collect_review.py +++ b/pype/plugins/nuke/publish/collect_review.py @@ -1,12 +1,12 @@ import pyblish.api import nuke + class CollectReview(pyblish.api.InstancePlugin): """Collect review instance from rendered frames """ order = pyblish.api.CollectorOrder + 0.3 - family = "review" label = "Collect Review" hosts = ["nuke"] families = ["render", "render.local", "render.farm"] @@ -25,4 +25,7 @@ class CollectReview(pyblish.api.InstancePlugin): instance.data["families"].append("review") instance.data['families'].append('ftrack') + instance.data["families"].append("render2d") + self.log.info("Review collected: `{}`".format(instance)) + self.log.debug("__ instance.data: `{}`".format(instance.data)) From 3a09ff2059e5fbc08f1b04a3777badffcf2c2590 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Sat, 1 Feb 2020 00:17:38 +0100 Subject: [PATCH 092/434] feat(global): rename burnin plugin --- pype/plugins/global/publish/extract_burnin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index a3df47518c..f0e4b70d19 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -16,7 +16,7 @@ class ExtractBurnin(pype.api.Extractor): `tags` including `burnin` """ - label = "Quicktime with burnins" + label = "Extract burnins" order = pyblish.api.ExtractorOrder + 0.03 families = ["review", "burnin"] hosts = ["nuke", "maya", "shell"] From c25a70d72ddcc785beaeb10de1ca0167193bd3a2 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Sat, 1 Feb 2020 00:18:13 +0100 Subject: [PATCH 093/434] fix(global): ftrack attributes validator failing --- .../plugins/global/publish/validate_custom_ftrack_attributes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/validate_custom_ftrack_attributes.py b/pype/plugins/global/publish/validate_custom_ftrack_attributes.py index 2386b359e4..1e8b239b33 100644 --- a/pype/plugins/global/publish/validate_custom_ftrack_attributes.py +++ b/pype/plugins/global/publish/validate_custom_ftrack_attributes.py @@ -47,7 +47,7 @@ class ValidateFtrackAttributes(pyblish.api.InstancePlugin): host = pyblish.api.current_host() to_check = context.data["presets"].get( - host, {}).get("ftrack_attributes") + host, {}).get("ftrack_custom_attributes") if not to_check: self.log.warning("ftrack_attributes preset not found") return From e0d288cdef324078efa8a19a221f664ffb61736b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Sat, 1 Feb 2020 00:18:58 +0100 Subject: [PATCH 094/434] fix(nuke): some nodes are failing due disable knob --- pype/plugins/nuke/publish/collect_instances.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nuke/publish/collect_instances.py b/pype/plugins/nuke/publish/collect_instances.py index 5b123ed7b9..cbbef70e4a 100644 --- a/pype/plugins/nuke/publish/collect_instances.py +++ b/pype/plugins/nuke/publish/collect_instances.py @@ -28,12 +28,15 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): self.log.debug("nuke.allNodes(): {}".format(nuke.allNodes())) for node in nuke.allNodes(): + if node.Class() in ["Viewer", "Dot"]: + continue + try: if node["disable"].value(): continue except Exception as E: self.log.warning(E) - + # get data from avalon knob self.log.debug("node[name]: {}".format(node['name'].value())) From f3bc7258df212d34d7882bdb6bf2ad662c87739d Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Sat, 1 Feb 2020 00:19:43 +0100 Subject: [PATCH 095/434] clean(nuke): commented code --- pype/plugins/nuke/publish/validate_script.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pype/plugins/nuke/publish/validate_script.py b/pype/plugins/nuke/publish/validate_script.py index 307e3ade59..f7dd84d714 100644 --- a/pype/plugins/nuke/publish/validate_script.py +++ b/pype/plugins/nuke/publish/validate_script.py @@ -15,12 +15,6 @@ class ValidateScript(pyblish.api.InstancePlugin): def process(self, instance): ctx_data = instance.context.data asset_name = ctx_data["asset"] - - # asset = io.find_one({ - # "type": "asset", - # "name": asset_name - # }) - asset = lib.get_asset(asset_name) asset_data = asset["data"] From f177185a73b0d260098932e0547843460bd6504f Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Sat, 1 Feb 2020 00:58:26 +0100 Subject: [PATCH 096/434] fix(nuke): moving `render2d` to more global level --- pype/plugins/nuke/publish/collect_review.py | 1 - pype/plugins/nuke/publish/collect_writes.py | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nuke/publish/collect_review.py b/pype/plugins/nuke/publish/collect_review.py index e41b55bbbd..c95c94541d 100644 --- a/pype/plugins/nuke/publish/collect_review.py +++ b/pype/plugins/nuke/publish/collect_review.py @@ -25,7 +25,6 @@ class CollectReview(pyblish.api.InstancePlugin): instance.data["families"].append("review") instance.data['families'].append('ftrack') - instance.data["families"].append("render2d") self.log.info("Review collected: `{}`".format(instance)) self.log.debug("__ instance.data: `{}`".format(instance.data)) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 8e86e12c2a..bf1c6a4b66 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -14,6 +14,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): families = ["write"] def process(self, instance): + # adding 2d focused rendering + instance.data["families"].append("render2d") node = None for x in instance: From ac53d4345f7ed5e7ef1aa26fd9910c6b4ffae901 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Sat, 1 Feb 2020 01:02:14 +0100 Subject: [PATCH 097/434] fix(nuke): loader failing --- pype/plugins/nuke/load/load_sequence.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 22caa9d6b0..9f3d09186c 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -109,12 +109,12 @@ class LoadSequence(api.Loader): file = self.fname.replace("\\", "/") + repr_cont = context["representation"]["context"] if "#" not in file: frame = repr_cont.get("frame") padding = len(frame) file = file.replace(frame, "#"*padding) - repr_cont = context["representation"]["context"] read_name = "Read_{0}_{1}_{2}".format( repr_cont["asset"], repr_cont["subset"], From 1f6d63d6a540409d57326bf09df7ed4ecef7c2d2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 18:07:08 +0100 Subject: [PATCH 098/434] added collect datetime plugin --- .../global/publish/collect_datetime_data.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 pype/plugins/global/publish/collect_datetime_data.py diff --git a/pype/plugins/global/publish/collect_datetime_data.py b/pype/plugins/global/publish/collect_datetime_data.py new file mode 100644 index 0000000000..f04f924e18 --- /dev/null +++ b/pype/plugins/global/publish/collect_datetime_data.py @@ -0,0 +1,18 @@ +"""These data *must* be collected only once during publishing process. + +Provides: + context -> datetimeData +""" + +import pyblish.api +from pypeapp import config + + +class CollectDateTimeData(pyblish.api.ContextPlugin): + order = pyblish.api.CollectorOrder + label = "Collect DateTime data" + + def process(self, context): + key = "datetimeData" + if key not in context.data: + context.data[key] = config.get_datetime_data() From 7bdb43852253826dc6b8c52e1fae88321740b574 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 18:08:09 +0100 Subject: [PATCH 099/434] extract burnin uses datetime data from context --- pype/plugins/global/publish/extract_burnin.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 4efe02ca3a..85757c101b 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -45,7 +45,8 @@ class ExtractBurnin(pype.api.Extractor): } # Add datetime data to preparation data - prep_data.update(config.get_datetime_data()) + datetime_data = isntance.context.data.get("datetimeData") or {} + prep_data.update(datetime_data) slate_frame_start = frame_start slate_frame_end = frame_end From eacc1ff0bee5d71d8c48c01eca917f5609f82a90 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 18:21:34 +0100 Subject: [PATCH 100/434] integrate new uses anatomy's used_values --- pype/plugins/global/publish/integrate_new.py | 37 ++++++++++---------- 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 9729716a50..81b37d0555 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -80,6 +80,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "assembly" ] exclude_families = ["clip"] + repre_context_stable_keys = [ + "project", "asset", "task", "subset", "version", "representation", + "family", "hierarchy", "task", "username" + ] def process(self, instance): @@ -288,7 +292,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): anatomy.templates[template_name]["path"]) sequence_repre = isinstance(files, list) - + repre_context = None if sequence_repre: src_collections, remainder = clique.assemble(files) self.log.debug( @@ -311,10 +315,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): template_data["representation"] = repre['ext'] template_data["frame"] = src_padding_exp % i anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled[template_name]["path"] + if repre_context is None: + repre_context = template_filled.used_values test_dest_files.append( - os.path.normpath( - anatomy_filled[template_name]["path"]) + os.path.normpath(template_filled) ) self.log.debug( @@ -394,14 +400,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): src = os.path.join(stagingdir, fname) anatomy_filled = anatomy.format(template_data) - dst = os.path.normpath( - anatomy_filled[template_name]["path"]).replace("..", ".") + template_filled = anatomy_filled[template_name]["path"] + repre_context = template_filled.used_values + dst = os.path.normpath(template_filled).replace("..", ".") instance.data["transfers"].append([src, dst]) repre['published_path'] = self.unc_convert(dst) self.log.debug("__ dst: {}".format(dst)) + for key in self.repre_context_stable_keys: + value = template_data.get(key) + if not value: + continue + repre_context[key] = template_data[key] + representation = { "_id": io.ObjectId(), "schema": "pype:representation-2.0", @@ -413,19 +426,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Imprint shortcut to context # for performance reasons. - "context": { - "root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - 'task': TASK, - "silo": asset.get('silo'), - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": version["name"], - "hierarchy": hierarchy, - "representation": repre['ext'] - } + "context": repre_context } if repre.get("outputName"): From ebb5b3b84974fc49e5400eff390321eede39dc9d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 18:23:39 +0100 Subject: [PATCH 101/434] datetime data are added to template data --- pype/plugins/global/publish/integrate_new.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 81b37d0555..c192804833 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -272,6 +272,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "version": int(version["name"]), "hierarchy": hierarchy} + + # Add datetime data to template data + datetime_data = context.data.get("datetimeData") or {} + template_data.update(datetime_data) + resolution_width = repre.get("resolutionWidth") resolution_height = repre.get("resolutionHeight") fps = instance.data.get("fps") From 13de5280887dbb07f5172c5b45c05e945473682f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 18:25:42 +0100 Subject: [PATCH 102/434] removed line --- pype/plugins/global/publish/integrate_new.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index c192804833..bba93ed658 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -272,7 +272,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "version": int(version["name"]), "hierarchy": hierarchy} - # Add datetime data to template data datetime_data = context.data.get("datetimeData") or {} template_data.update(datetime_data) From 908f9887952c610a8707d91953f92b5ce849a6f6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 18:26:50 +0100 Subject: [PATCH 103/434] datetime data added to collect templates --- pype/plugins/global/publish/collect_templates.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py index 383944e293..0c272a6044 100644 --- a/pype/plugins/global/publish/collect_templates.py +++ b/pype/plugins/global/publish/collect_templates.py @@ -90,6 +90,10 @@ class CollectTemplates(pyblish.api.InstancePlugin): "hierarchy": hierarchy.replace("\\", "/"), "representation": "TEMP")} + # Add datetime data to template data + datetime_data = context.data.get("datetimeData") or {} + template_data.update(datetime_data) + resolution_width = instance.data.get("resolutionWidth") resolution_height = instance.data.get("resolutionHeight") fps = instance.data.get("fps") From 40aa0c2f5b97f32fdeed4837d4ca50c1e3ec59bd Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 18:27:01 +0100 Subject: [PATCH 104/434] typo fix in collect templates --- pype/plugins/global/publish/collect_templates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py index 0c272a6044..46d2898875 100644 --- a/pype/plugins/global/publish/collect_templates.py +++ b/pype/plugins/global/publish/collect_templates.py @@ -88,7 +88,7 @@ class CollectTemplates(pyblish.api.InstancePlugin): "subset": subset_name, "version": version_number, "hierarchy": hierarchy.replace("\\", "/"), - "representation": "TEMP")} + "representation": "TEMP"} # Add datetime data to template data datetime_data = context.data.get("datetimeData") or {} From ed280250c4c39df742a52f29deaa0b237053b4ee Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 18:29:45 +0100 Subject: [PATCH 105/434] fixed variable typo --- pype/plugins/global/publish/extract_burnin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 85757c101b..e50ba891d2 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -45,7 +45,7 @@ class ExtractBurnin(pype.api.Extractor): } # Add datetime data to preparation data - datetime_data = isntance.context.data.get("datetimeData") or {} + datetime_data = instance.context.data.get("datetimeData") or {} prep_data.update(datetime_data) slate_frame_start = frame_start From fd42c048b36e1c527dc5ccd9d55f16e80b21850e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 18:36:37 +0100 Subject: [PATCH 106/434] collect templates fix there is not defined context --- pype/plugins/global/publish/collect_templates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py index 46d2898875..f065b3c246 100644 --- a/pype/plugins/global/publish/collect_templates.py +++ b/pype/plugins/global/publish/collect_templates.py @@ -91,7 +91,7 @@ class CollectTemplates(pyblish.api.InstancePlugin): "representation": "TEMP"} # Add datetime data to template data - datetime_data = context.data.get("datetimeData") or {} + datetime_data = instance.context.data.get("datetimeData") or {} template_data.update(datetime_data) resolution_width = instance.data.get("resolutionWidth") From 75b6cdd1489c94ee41b2cf94d5aeb99dbe2eac9f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 18:58:29 +0100 Subject: [PATCH 107/434] renamed repre_context_stable_keys to db_representation_context_keys --- pype/plugins/global/publish/integrate_new.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index bba93ed658..7d95534897 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -80,7 +80,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "assembly" ] exclude_families = ["clip"] - repre_context_stable_keys = [ + db_representation_context_keys = [ "project", "asset", "task", "subset", "version", "representation", "family", "hierarchy", "task", "username" ] @@ -413,7 +413,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): repre['published_path'] = self.unc_convert(dst) self.log.debug("__ dst: {}".format(dst)) - for key in self.repre_context_stable_keys: + for key in self.db_representation_context_keys: value = template_data.get(key) if not value: continue From 30d598911d78c803b4d5a7316097f53cdadfce9f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 19:24:45 +0100 Subject: [PATCH 108/434] removed old integrators --- pype/plugins/global/publish/integrate.py | 417 ----------------- .../publish/integrate_rendered_frames.py | 423 ------------------ 2 files changed, 840 deletions(-) delete mode 100644 pype/plugins/global/publish/integrate.py delete mode 100644 pype/plugins/global/publish/integrate_rendered_frames.py diff --git a/pype/plugins/global/publish/integrate.py b/pype/plugins/global/publish/integrate.py deleted file mode 100644 index 87b9e1a9bd..0000000000 --- a/pype/plugins/global/publish/integrate.py +++ /dev/null @@ -1,417 +0,0 @@ -import os -import logging -import shutil - -import errno -import pyblish.api -from avalon import api, io -from avalon.vendor import filelink - - -log = logging.getLogger(__name__) - - -class IntegrateAsset(pyblish.api.InstancePlugin): - """Resolve any dependency issies - - This plug-in resolves any paths which, if not updated might break - the published file. - - The order of families is important, when working with lookdev you want to - first publish the texture, update the texture paths in the nodes and then - publish the shading network. Same goes for file dependent assets. - """ - - label = "Integrate Asset" - order = pyblish.api.IntegratorOrder - families = [] - exclude_families = ["clip"] - - def process(self, instance): - if [ef for ef in self.exclude_families - if instance.data["family"] in ef]: - return - - self.register(instance) - - self.log.info("Integrating Asset in to the database ...") - if instance.data.get('transfer', True): - self.integrate(instance) - - def register(self, instance): - # Required environment variables - PROJECT = api.Session["AVALON_PROJECT"] - ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"] - LOCATION = api.Session["AVALON_LOCATION"] - - context = instance.context - # Atomicity - # - # Guarantee atomic publishes - each asset contains - # an identical set of members. - # __ - # / o - # / \ - # | o | - # \ / - # o __/ - # - assert all(result["success"] for result in context.data["results"]), ( - "Atomicity not held, aborting.") - - # Assemble - # - # | - # v - # ---> <---- - # ^ - # | - # - stagingdir = instance.data.get("stagingDir") - assert stagingdir, ("Incomplete instance \"%s\": " - "Missing reference to staging area." % instance) - - # extra check if stagingDir actually exists and is available - - self.log.debug("Establishing staging directory @ %s" % stagingdir) - - # Ensure at least one file is set up for transfer in staging dir. - files = instance.data.get("files", []) - assert files, "Instance has no files to transfer" - assert isinstance(files, (list, tuple)), ( - "Instance 'files' must be a list, got: {0}".format(files) - ) - - project = io.find_one({"type": "project"}) - - asset = io.find_one({ - "type": "asset", - "name": ASSET, - "parent": project["_id"] - }) - - assert all([project, asset]), ("Could not find current project or " - "asset '%s'" % ASSET) - - subset = self.get_subset(asset, instance) - - # get next version - latest_version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - {"name": True}, - sort=[("name", -1)] - ) - - next_version = 1 - if latest_version is not None: - next_version += latest_version["name"] - - self.log.info("Verifying version from assumed destination") - - assumed_data = instance.data["assumedTemplateData"] - assumed_version = assumed_data["version"] - if assumed_version != next_version: - raise AttributeError("Assumed version 'v{0:03d}' does not match" - "next version in database " - "('v{1:03d}')".format(assumed_version, - next_version)) - - self.log.debug("Next version: v{0:03d}".format(next_version)) - - version_data = self.create_version_data(context, instance) - version = self.create_version(subset=subset, - version_number=next_version, - locations=[LOCATION], - data=version_data) - - self.log.debug("Creating version ...") - version_id = io.insert_one(version).inserted_id - - # Write to disk - # _ - # | | - # _| |_ - # ____\ / - # |\ \ / \ - # \ \ v \ - # \ \________. - # \|________| - # - root = api.registered_root() - hierarchy = "" - parents = io.find_one({ - "type": 'asset', - "name": ASSET - })['data']['parents'] - if parents and len(parents) > 0: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = os.path.join(*parents) - - template_data = {"root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - "silo": asset['silo'], - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": int(version["name"]), - "hierarchy": hierarchy} - - # template_publish = project["config"]["template"]["publish"] - anatomy = instance.context.data['anatomy'] - - # Find the representations to transfer amongst the files - # Each should be a single representation (as such, a single extension) - representations = [] - destination_list = [] - if 'transfers' not in instance.data: - instance.data['transfers'] = [] - - for files in instance.data["files"]: - - # Collection - # _______ - # |______|\ - # | |\| - # | || - # | || - # | || - # |_______| - # - - if isinstance(files, list): - collection = files - # Assert that each member has identical suffix - _, ext = os.path.splitext(collection[0]) - assert all(ext == os.path.splitext(name)[1] - for name in collection), ( - "Files had varying suffixes, this is a bug" - ) - - assert not any(os.path.isabs(name) for name in collection) - - template_data["representation"] = ext[1:] - - for fname in collection: - - src = os.path.join(stagingdir, fname) - anatomy_filled = anatomy.format(template_data) - dst = anatomy_filled["publish"]["path"] - - instance.data["transfers"].append([src, dst]) - template = anatomy.templates["publish"]["path"] - - else: - # Single file - # _______ - # | |\ - # | | - # | | - # | | - # |_______| - # - fname = files - assert not os.path.isabs(fname), ( - "Given file name is a full path" - ) - _, ext = os.path.splitext(fname) - - template_data["representation"] = ext[1:] - - src = os.path.join(stagingdir, fname) - anatomy_filled = anatomy.format(template_data) - dst = anatomy_filled["publish"]["path"] - - instance.data["transfers"].append([src, dst]) - template = anatomy.templates["publish"]["path"] - - representation = { - "schema": "pype:representation-2.0", - "type": "representation", - "parent": version_id, - "name": ext[1:], - "data": {'path': dst, 'template': template}, - "dependencies": instance.data.get("dependencies", "").split(), - - # Imprint shortcut to context - # for performance reasons. - "context": { - "root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - 'task': api.Session["AVALON_TASK"], - "silo": asset['silo'], - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": version["name"], - "hierarchy": hierarchy, - "representation": ext[1:] - } - } - - destination_list.append(dst) - instance.data['destination_list'] = destination_list - representations.append(representation) - - self.log.info("Registering {} items".format(len(representations))) - - io.insert_many(representations) - - def integrate(self, instance): - """Move the files - - Through `instance.data["transfers"]` - - Args: - instance: the instance to integrate - """ - - transfers = instance.data.get("transfers", list()) - - for src, dest in transfers: - self.log.info("Copying file .. {} -> {}".format(src, dest)) - self.copy_file(src, dest) - - # Produce hardlinked copies - # Note: hardlink can only be produced between two files on the same - # server/disk and editing one of the two will edit both files at once. - # As such it is recommended to only make hardlinks between static files - # to ensure publishes remain safe and non-edited. - hardlinks = instance.data.get("hardlinks", list()) - for src, dest in hardlinks: - self.log.info("Hardlinking file .. {} -> {}".format(src, dest)) - self.hardlink_file(src, dest) - - def copy_file(self, src, dst): - """ Copy given source to destination - - Arguments: - src (str): the source file which needs to be copied - dst (str): the destination of the sourc file - Returns: - None - """ - - dirname = os.path.dirname(dst) - try: - os.makedirs(dirname) - except OSError as e: - if e.errno == errno.EEXIST: - pass - else: - self.log.critical("An unexpected error occurred.") - raise - - shutil.copy(src, dst) - - def hardlink_file(self, src, dst): - - dirname = os.path.dirname(dst) - try: - os.makedirs(dirname) - except OSError as e: - if e.errno == errno.EEXIST: - pass - else: - self.log.critical("An unexpected error occurred.") - raise - - filelink.create(src, dst, filelink.HARDLINK) - - def get_subset(self, asset, instance): - - subset = io.find_one({ - "type": "subset", - "parent": asset["_id"], - "name": instance.data["subset"] - }) - - if subset is None: - subset_name = instance.data["subset"] - self.log.info("Subset '%s' not found, creating.." % subset_name) - - _id = io.insert_one({ - "schema": "avalon-core:subset-2.0", - "type": "subset", - "name": subset_name, - "data": {}, - "parent": asset["_id"] - }).inserted_id - - subset = io.find_one({"_id": _id}) - - return subset - - def create_version(self, subset, version_number, locations, data=None): - """ Copy given source to destination - - Args: - subset (dict): the registered subset of the asset - version_number (int): the version number - locations (list): the currently registered locations - - Returns: - dict: collection of data to create a version - """ - # Imprint currently registered location - version_locations = [location for location in locations if - location is not None] - - return {"schema": "avalon-core:version-2.0", - "type": "version", - "parent": subset["_id"], - "name": version_number, - "locations": version_locations, - "data": data} - - def create_version_data(self, context, instance): - """Create the data collection for the version - - Args: - context: the current context - instance: the current instance being published - - Returns: - dict: the required information with instance.data as key - """ - - families = [] - current_families = instance.data.get("families", list()) - instance_family = instance.data.get("family", None) - - if instance_family is not None: - families.append(instance_family) - families += current_families - - self.log.debug("Registered root: {}".format(api.registered_root())) - # create relative source path for DB - try: - source = instance.data['source'] - except KeyError: - source = context.data["currentFile"] - - relative_path = os.path.relpath(source, api.registered_root()) - source = os.path.join("{root}", relative_path).replace("\\", "/") - - self.log.debug("Source: {}".format(source)) - version_data = {"families": families, - "time": context.data["time"], - "author": context.data["user"], - "source": source, - "comment": context.data.get("comment"), - "machine": context.data.get("machine"), - "fps": context.data.get("fps")} - - # Include optional data if present in - optionals = [ - "frameStart", "frameEnd", "step", "handles", "sourceHashes" - ] - for key in optionals: - if key in instance.data: - version_data[key] = instance.data[key] - - return version_data diff --git a/pype/plugins/global/publish/integrate_rendered_frames.py b/pype/plugins/global/publish/integrate_rendered_frames.py deleted file mode 100644 index 5819051146..0000000000 --- a/pype/plugins/global/publish/integrate_rendered_frames.py +++ /dev/null @@ -1,423 +0,0 @@ -import os -import logging -import shutil -import clique - -import errno -import pyblish.api -from avalon import api, io - - -log = logging.getLogger(__name__) - - -class IntegrateFrames(pyblish.api.InstancePlugin): - """Resolve any dependency issies - - This plug-in resolves any paths which, if not updated might break - the published file. - - The order of families is important, when working with lookdev you want to - first publish the texture, update the texture paths in the nodes and then - publish the shading network. Same goes for file dependent assets. - """ - - label = "Integrate Frames" - order = pyblish.api.IntegratorOrder - families = ["imagesequence"] - - family_targets = [".frames", ".local", ".review", "imagesequence", "render", "source"] - exclude_families = ["clip"] - - def process(self, instance): - if [ef for ef in self.exclude_families - if instance.data["family"] in ef]: - return - - families = [f for f in instance.data["families"] - for search in self.family_targets - if search in f] - - if not families: - return - - self.register(instance) - - # self.log.info("Integrating Asset in to the database ...") - # self.log.info("instance.data: {}".format(instance.data)) - if instance.data.get('transfer', True): - self.integrate(instance) - - def register(self, instance): - - # Required environment variables - PROJECT = api.Session["AVALON_PROJECT"] - ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"] - LOCATION = api.Session["AVALON_LOCATION"] - - context = instance.context - # Atomicity - # - # Guarantee atomic publishes - each asset contains - # an identical set of members. - # __ - # / o - # / \ - # | o | - # \ / - # o __/ - # - assert all(result["success"] for result in context.data["results"]), ( - "Atomicity not held, aborting.") - - # Assemble - # - # | - # v - # ---> <---- - # ^ - # | - # - stagingdir = instance.data.get("stagingDir") - assert stagingdir, ("Incomplete instance \"%s\": " - "Missing reference to staging area." % instance) - - # extra check if stagingDir actually exists and is available - - self.log.debug("Establishing staging directory @ %s" % stagingdir) - - project = io.find_one({"type": "project"}) - - asset = io.find_one({ - "type": "asset", - "name": ASSET, - "parent": project["_id"] - }) - - assert all([project, asset]), ("Could not find current project or " - "asset '%s'" % ASSET) - - subset = self.get_subset(asset, instance) - - # get next version - latest_version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - {"name": True}, - sort=[("name", -1)] - ) - - next_version = 1 - if latest_version is not None: - next_version += latest_version["name"] - - self.log.info("Verifying version from assumed destination") - - assumed_data = instance.data["assumedTemplateData"] - assumed_version = assumed_data["version"] - if assumed_version != next_version: - raise AttributeError("Assumed version 'v{0:03d}' does not match" - "next version in database " - "('v{1:03d}')".format(assumed_version, - next_version)) - - if instance.data.get('version'): - next_version = int(instance.data.get('version')) - - self.log.debug("Next version: v{0:03d}".format(next_version)) - - version_data = self.create_version_data(context, instance) - version = self.create_version(subset=subset, - version_number=next_version, - locations=[LOCATION], - data=version_data) - - self.log.debug("Creating version ...") - version_id = io.insert_one(version).inserted_id - - # Write to disk - # _ - # | | - # _| |_ - # ____\ / - # |\ \ / \ - # \ \ v \ - # \ \________. - # \|________| - # - root = api.registered_root() - hierarchy = "" - parents = io.find_one({"type": 'asset', "name": ASSET})[ - 'data']['parents'] - if parents and len(parents) > 0: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = os.path.join(*parents) - - template_data = {"root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - "silo": asset.get('silo'), - "task": api.Session["AVALON_TASK"], - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": int(version["name"]), - "hierarchy": hierarchy} - - # template_publish = project["config"]["template"]["publish"] - anatomy = instance.context.data['anatomy'] - - # Find the representations to transfer amongst the files - # Each should be a single representation (as such, a single extension) - representations = [] - destination_list = [] - - if 'transfers' not in instance.data: - instance.data['transfers'] = [] - - for files in instance.data["files"]: - # Collection - # _______ - # |______|\ - # | |\| - # | || - # | || - # | || - # |_______| - # - if isinstance(files, list): - - src_collections, remainder = clique.assemble(files) - src_collection = src_collections[0] - # Assert that each member has identical suffix - src_head = src_collection.format("{head}") - src_tail = ext = src_collection.format("{tail}") - - test_dest_files = list() - for i in [1, 2]: - template_data["representation"] = src_tail[1:] - template_data["frame"] = src_collection.format( - "{padding}") % i - anatomy_filled = anatomy.format(template_data) - test_dest_files.append(anatomy_filled["render"]["path"]) - - dst_collections, remainder = clique.assemble(test_dest_files) - dst_collection = dst_collections[0] - dst_head = dst_collection.format("{head}") - dst_tail = dst_collection.format("{tail}") - - for i in src_collection.indexes: - src_padding = src_collection.format("{padding}") % i - src_file_name = "{0}{1}{2}".format( - src_head, src_padding, src_tail) - dst_padding = dst_collection.format("{padding}") % i - dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail) - - src = os.path.join(stagingdir, src_file_name) - instance.data["transfers"].append([src, dst]) - - else: - # Single file - # _______ - # | |\ - # | | - # | | - # | | - # |_______| - # - - template_data.pop("frame", None) - - fname = files - - self.log.info("fname: {}".format(fname)) - - assert not os.path.isabs(fname), ( - "Given file name is a full path" - ) - _, ext = os.path.splitext(fname) - - template_data["representation"] = ext[1:] - - src = os.path.join(stagingdir, fname) - - anatomy_filled = anatomy.format(template_data) - dst = anatomy_filled["render"]["path"] - - instance.data["transfers"].append([src, dst]) - - if ext[1:] not in ["jpeg", "jpg", "mov", "mp4", "wav"]: - template_data["frame"] = "#" * int(anatomy_filled["render"]["padding"]) - - anatomy_filled = anatomy.format(template_data) - path_to_save = anatomy_filled["render"]["path"] - template = anatomy.templates["render"]["path"] - - self.log.debug("path_to_save: {}".format(path_to_save)) - - representation = { - "schema": "pype:representation-2.0", - "type": "representation", - "parent": version_id, - "name": ext[1:], - "data": {'path': path_to_save, 'template': template}, - "dependencies": instance.data.get("dependencies", "").split(), - - # Imprint shortcut to context - # for performance reasons. - "context": { - "root": root, - "project": { - "name": PROJECT, - "code": project['data']['code'] - }, - "task": api.Session["AVALON_TASK"], - "silo": asset['silo'], - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": int(version["name"]), - "hierarchy": hierarchy, - "representation": ext[1:] - } - } - - destination_list.append(dst) - instance.data['destination_list'] = destination_list - representations.append(representation) - - self.log.info("Registering {} items".format(len(representations))) - io.insert_many(representations) - - def integrate(self, instance): - """Move the files - - Through `instance.data["transfers"]` - - Args: - instance: the instance to integrate - """ - - transfers = instance.data["transfers"] - - for src, dest in transfers: - src = os.path.normpath(src) - dest = os.path.normpath(dest) - if src in dest: - continue - - self.log.info("Copying file .. {} -> {}".format(src, dest)) - self.copy_file(src, dest) - - def copy_file(self, src, dst): - """ Copy given source to destination - - Arguments: - src (str): the source file which needs to be copied - dst (str): the destination of the sourc file - Returns: - None - """ - - dirname = os.path.dirname(dst) - try: - os.makedirs(dirname) - except OSError as e: - if e.errno == errno.EEXIST: - pass - else: - self.log.critical("An unexpected error occurred.") - raise - - shutil.copy(src, dst) - - def get_subset(self, asset, instance): - - subset = io.find_one({ - "type": "subset", - "parent": asset["_id"], - "name": instance.data["subset"] - }) - - if subset is None: - subset_name = instance.data["subset"] - self.log.info("Subset '%s' not found, creating.." % subset_name) - - _id = io.insert_one({ - "schema": "pype:subset-2.0", - "type": "subset", - "name": subset_name, - "data": {}, - "parent": asset["_id"] - }).inserted_id - - subset = io.find_one({"_id": _id}) - - return subset - - def create_version(self, subset, version_number, locations, data=None): - """ Copy given source to destination - - Args: - subset (dict): the registered subset of the asset - version_number (int): the version number - locations (list): the currently registered locations - - Returns: - dict: collection of data to create a version - """ - # Imprint currently registered location - version_locations = [location for location in locations if - location is not None] - - return {"schema": "pype:version-2.0", - "type": "version", - "parent": subset["_id"], - "name": version_number, - "locations": version_locations, - "data": data} - - def create_version_data(self, context, instance): - """Create the data collection for the version - - Args: - context: the current context - instance: the current instance being published - - Returns: - dict: the required information with instance.data as key - """ - - families = [] - current_families = instance.data.get("families", list()) - instance_family = instance.data.get("family", None) - - if instance_family is not None: - families.append(instance_family) - families += current_families - - try: - source = instance.data['source'] - except KeyError: - source = context.data["currentFile"] - - relative_path = os.path.relpath(source, api.registered_root()) - source = os.path.join("{root}", relative_path).replace("\\", "/") - - version_data = {"families": families, - "time": context.data["time"], - "author": context.data["user"], - "source": source, - "comment": context.data.get("comment")} - - # Include optional data if present in - optionals = ["frameStart", "frameEnd", "step", - "handles", "colorspace", "fps", "outputDir"] - - for key in optionals: - if key in instance.data: - version_data[key] = instance.data.get(key, None) - - return version_data From 9bcdf7f72a96b9839aee1e9c49acb75475b55cc8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 19:25:33 +0100 Subject: [PATCH 109/434] added avalon entities collector --- .../global/publish/collect_avalon_entities.py | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 pype/plugins/global/publish/collect_avalon_entities.py diff --git a/pype/plugins/global/publish/collect_avalon_entities.py b/pype/plugins/global/publish/collect_avalon_entities.py new file mode 100644 index 0000000000..c256dffd52 --- /dev/null +++ b/pype/plugins/global/publish/collect_avalon_entities.py @@ -0,0 +1,46 @@ +"""Collect Anatomy and global anatomy data. + +Requires: + session -> AVALON_PROJECT, AVALON_ASSET + +Provides: + context -> projectEntity - project entity from database + context -> assetEntity - asset entity from database +""" + +from avalon import io, api +import pyblish.api + + +class CollectAvalonEntities(pyblish.api.ContextPlugin): + """Collect Anatomy into Context""" + + order = pyblish.api.CollectorOrder + label = "Collect Avalon Entities" + + def process(self, context): + project_name = api.Session["AVALON_PROJECT"] + asset_name = api.Session["AVALON_ASSET"] + + project_entity = io.find_one({ + "type": "project", + "name": project_name + }) + assert project_entity, ( + "Project '{0}' was not found." + ).format(project_name) + self.log.debug("Collected Project entity \"{}\"".format(project_entity)) + + asset_entity = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project_entity["_id"] + }) + assert asset_entity, ( + "No asset found by the name '{0}' in project '{1}'" + ).format(asset_name, project_name) + + self.log.debug("Collected Asset entity \"{}\"".format(asset_entity)) + + context.data["projectEntity"] = project_entity + context.data["assetEntity"] = asset_entity From a2d75afe7a8e78fd2481c18a095aa96b9382a9e7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 19:25:57 +0100 Subject: [PATCH 110/434] collect anatomy also collect global anatomy data --- .../plugins/global/publish/collect_anatomy.py | 67 +++++++++++++++++-- 1 file changed, 62 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/collect_anatomy.py b/pype/plugins/global/publish/collect_anatomy.py index 9412209850..0831c16d32 100644 --- a/pype/plugins/global/publish/collect_anatomy.py +++ b/pype/plugins/global/publish/collect_anatomy.py @@ -1,10 +1,24 @@ -""" +"""Collect Anatomy and global anatomy data. + Requires: - None + session -> AVALON_TASK + projectEntity, assetEntity -> collect_avalon_entities *(pyblish.api.CollectorOrder) + username -> collect_pype_user *(pyblish.api.CollectorOrder + 0.001) + datetimeData -> collect_datetime_data *(pyblish.api.CollectorOrder) + +Optional: + comment -> collect_comment *(pyblish.api.CollectorOrder) + intent -> collected in pyblish-lite + Provides: context -> anatomy (pypeapp.Anatomy) + context -> anatomyData """ +import os +import json + +from avalon import io, api, lib from pypeapp import Anatomy import pyblish.api @@ -12,9 +26,52 @@ import pyblish.api class CollectAnatomy(pyblish.api.ContextPlugin): """Collect Anatomy into Context""" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder + 0.002 label = "Collect Anatomy" def process(self, context): - context.data['anatomy'] = Anatomy() - self.log.info("Anatomy templates collected...") + root_path = api.registered_root() + task_name = api.Session["AVALON_TASK"] + + project_entity = context.data["projectEntity"] + asset_entity = context.data["assetEntity"] + + project_name = project_entity["name"] + + context.data["anatomy"] = Anatomy(project_name) + self.log.info( + "Anatomy object collected for project \"{}\".".format(project_name) + ) + + hierarchy_items = asset_entity["data"]["parents"] + hierarchy = "" + if hierarchy_items: + hierarchy = os.path.join(*hierarchy_items) + + context_data = { + "root": root_path, + "project": { + "name": project_name, + "code": project_entity["data"].get("code") + }, + "asset": asset_entity["name"], + "hierarchy": hierarchy.replace("\\", "/"), + "task": task_name, + + "username": context.data["user"] + } + + avalon_app_name = os.environ.get("AVALON_APP_NAME") + if avalon_app_name: + application_def = lib.get_application(avalon_app_name) + app_dir = application_def.get("application_dir") + if app_dir: + context_data["app"] = app_dir + + datetime_data = context.data.get("datetimeData") or {} + context_data.update(datetime_data) + + context.data["anatomyData"] = context_data + + self.log.info("Global anatomy Data collected") + self.log.debug(json.dumps(context_data, indent=4)) From 54f76e7f7f9ec884bdbbe915a5088e7aaf8e3e10 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 19:26:19 +0100 Subject: [PATCH 111/434] collect templates replaced with collect instance anatomy data --- .../publish/collect_instance_anatomy_data.py | 119 ++++++++++++++++++ .../global/publish/collect_templates.py | 117 ----------------- 2 files changed, 119 insertions(+), 117 deletions(-) create mode 100644 pype/plugins/global/publish/collect_instance_anatomy_data.py delete mode 100644 pype/plugins/global/publish/collect_templates.py diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py new file mode 100644 index 0000000000..a1a9278d2a --- /dev/null +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -0,0 +1,119 @@ +""" +Requires: + context -> anatomyData + context -> projectEntity + context -> assetEntity + instance -> asset + instance -> subset + instance -> family + +Optional: + instance -> resolutionWidth + instance -> resolutionHeight + instance -> fps + +Provides: + instance -> anatomyData +""" + +import copy +import json + +from avalon import io +import pyblish.api + + +class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): + """Fill templates with data needed for publish""" + + order = pyblish.api.CollectorOrder + 0.1 + label = "Collect instance anatomy data" + hosts = ["maya", "nuke", "standalonepublisher"] + + def process(self, instance): + # get all the stuff from the database + anatomy_data = copy.deepcopy(instance.context.data["anatomyData"]) + project_entity = instance.context.data["projectEntity"] + context_asset_entity = instance.context.data["assetEntity"] + + asset_name = instance.data["asset"] + # Check if asset name is the same as what is in context + # - they may be different, e.g. in NukeStudio + if context_asset_entity["name"] == asset_name: + asset_entity = context_asset_entity + + else: + asset_entity = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project_entity["_id"] + }) + + instance.context.data["assetEntity"] = asset_entity + instance.context.data["projectEntity"] = project_entity + + subset_name = instance.data["subset"] + subset_entity = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset_entity["_id"] + }) + + version_number = instance.data.get("version") + if version_number is None: + version_number = instance.context.data.get("version") + + latest_version = None + if subset_entity is None: + self.log.debug("Subset entity does not exist yet.") + else: + version_entity = io.find_one( + { + "type": "version", + "parent": subset_entity["_id"] + }, + sort=[("name", -1)] + ) + if version_entity: + latest_version = version_entity["name"] + + if version_number is None: + # TODO we should be able to change this version by studio + # preferences (like start with version number `0`) + version_number = 1 + if latest_version is not None: + version_number += int(latest_version) + + # Version should not be collected since may be instance + anatomy_data.update({ + "asset": asset_entity["name"], + "family": instance.data["family"], + "subset": subset_name, + "version": version_number + }) + + resolution_width = instance.data.get("resolutionWidth") + if resolution_width: + anatomy_data["resolution_width"] = resolution_width + + resolution_height = instance.data.get("resolutionHeight") + if resolution_height: + anatomy_data["resolution_height"] = resolution_height + + fps = instance.data.get("fps") + if resolution_height: + anatomy_data["fps"] = fps + + instance.data["anatomyData"] = anatomy_data + instance.data["latestVersion"] = latest_version + # TODO check if template is used anywhere + # instance.data["template"] = template + + # TODO we should move this to any Validator + # # We take the parent folder of representation 'filepath' + # instance.data["assumedDestination"] = os.path.dirname( + # (anatomy.format(template_data))["publish"]["path"] + # ) + + self.log.info("Instance anatomy Data collected") + self.log.debug(json.dumps(anatomy_data, indent=4)) diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py deleted file mode 100644 index f065b3c246..0000000000 --- a/pype/plugins/global/publish/collect_templates.py +++ /dev/null @@ -1,117 +0,0 @@ -""" -Requires: - session -> AVALON_PROJECT - context -> anatomy (pypeapp.Anatomy) - instance -> subset - instance -> asset - instance -> family - -Provides: - instance -> template - instance -> assumedTemplateData - instance -> assumedDestination -""" - -import os - -from avalon import io, api -import pyblish.api - - -class CollectTemplates(pyblish.api.InstancePlugin): - """Fill templates with data needed for publish""" - - order = pyblish.api.CollectorOrder + 0.1 - label = "Collect and fill Templates" - hosts = ["maya", "nuke", "standalonepublisher"] - - def process(self, instance): - # get all the stuff from the database - subset_name = instance.data["subset"] - asset_name = instance.data["asset"] - project_name = api.Session["AVALON_PROJECT"] - - project = io.find_one( - { - "type": "project", - "name": project_name - }, - projection={"config": True, "data": True} - ) - - template = project["config"]["template"]["publish"] - anatomy = instance.context.data['anatomy'] - - asset = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project["_id"] - }) - - assert asset, ("No asset found by the name '{}' " - "in project '{}'".format(asset_name, project_name)) - silo = asset.get('silo') - - subset = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset["_id"] - }) - - # assume there is no version yet, we start at `1` - version = None - version_number = 1 - if subset is not None: - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) - - # if there is a subset there ought to be version - if version is not None: - version_number += int(version["name"]) - - hierarchy = asset['data']['parents'] - if hierarchy: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = os.path.join(*hierarchy) - - template_data = {"root": api.Session["AVALON_PROJECTS"], - "project": {"name": project_name, - "code": project['data']['code']}, - "silo": silo, - "family": instance.data['family'], - "asset": asset_name, - "subset": subset_name, - "version": version_number, - "hierarchy": hierarchy.replace("\\", "/"), - "representation": "TEMP"} - - # Add datetime data to template data - datetime_data = instance.context.data.get("datetimeData") or {} - template_data.update(datetime_data) - - resolution_width = instance.data.get("resolutionWidth") - resolution_height = instance.data.get("resolutionHeight") - fps = instance.data.get("fps") - - if resolution_width: - template_data["resolution_width"] = resolution_width - if resolution_width: - template_data["resolution_height"] = resolution_height - if resolution_width: - template_data["fps"] = fps - - instance.data["template"] = template - instance.data["assumedTemplateData"] = template_data - - # We take the parent folder of representation 'filepath' - instance.data["assumedDestination"] = os.path.dirname( - (anatomy.format(template_data))["publish"]["path"] - ) - self.log.info("Assumed Destination has been created...") - self.log.debug("__ assumedTemplateData: `{}`".format(instance.data["assumedTemplateData"])) - self.log.debug("__ template: `{}`".format(instance.data["template"])) From 1515f47f0fad2700efaa69022ac682456b7e4c50 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 19:26:33 +0100 Subject: [PATCH 112/434] extract burnin uses anatomyData --- pype/plugins/global/publish/extract_burnin.py | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index e50ba891d2..b95c15f340 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -32,21 +32,15 @@ class ExtractBurnin(pype.api.Extractor): frame_end = int(instance.data.get("frameEnd") or 1) duration = frame_end - frame_start + 1 - prep_data = { - "username": instance.context.data['user'], - "asset": os.environ['AVALON_ASSET'], - "task": os.environ['AVALON_TASK'], + prep_data = copy.deepcopy(instance.data["anatomyData"]) + prep_data.update({ "frame_start": frame_start, "frame_end": frame_end, "duration": duration, "version": int(version), "comment": instance.context.data.get("comment", ""), "intent": instance.context.data.get("intent", "") - } - - # Add datetime data to preparation data - datetime_data = instance.context.data.get("datetimeData") or {} - prep_data.update(datetime_data) + }) slate_frame_start = frame_start slate_frame_end = frame_end @@ -64,10 +58,6 @@ class ExtractBurnin(pype.api.Extractor): "slate_duration": slate_duration }) - # Update data with template data - template_data = instance.data.get("assumedTemplateData") or {} - prep_data.update(template_data) - # get anatomy project anatomy = instance.context.data['anatomy'] From e4b23553dffd42aad2ee07b5e207787b1b52c4f8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 19:26:55 +0100 Subject: [PATCH 113/434] integrate assumed destinatoin replaced with collect resources path --- .../global/publish/collect_resources_path.py | 132 ++++++++++++++++ .../publish/integrate_assumed_destination.py | 147 ------------------ 2 files changed, 132 insertions(+), 147 deletions(-) create mode 100644 pype/plugins/global/publish/collect_resources_path.py delete mode 100644 pype/plugins/global/publish/integrate_assumed_destination.py diff --git a/pype/plugins/global/publish/collect_resources_path.py b/pype/plugins/global/publish/collect_resources_path.py new file mode 100644 index 0000000000..52e926e09c --- /dev/null +++ b/pype/plugins/global/publish/collect_resources_path.py @@ -0,0 +1,132 @@ +import os +import copy + +import pyblish.api +from avalon import io + + +class IntegrateResourcesPath(pyblish.api.InstancePlugin): + """Generate the assumed destination path where the file will be stored""" + + label = "Integrate Prepare Resource" + order = pyblish.api.IntegratorOrder - 0.05 + families = ["clip", "projectfile", "plate"] + + def process(self, instance): + project_entity = instance.context["projectEntity"] + asset_entity = instance.context["assetEntity"] + + template_data = copy.deepcopy(instance.data["anatomyData"]) + + asset_name = instance.data["asset"] + if asset_name != asset_entity["name"]: + asset_entity = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project_entity["_id"] + }) + assert asset_entity, ( + "No asset found by the name '{}' in project '{}'".format( + asset_name, project_entity["name"] + ) + ) + + instance.data["assetEntity"] = asset_entity + + template_data["name"] = asset_entity["name"] + silo_name = asset_entity.get("silo") + if silo_name: + template_data["silo"] = silo_name + + parents = asset_entity["data"].get("parents") or [] + hierarchy = "/".join(parents) + template_data["hierarchy"] = hierarchy + + subset_name = instance.data["subset"] + self.log.info(subset_name) + + subset = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset_entity["_id"] + }) + + # assume there is no version yet, we start at `1` + version = None + version_number = 1 + if subset is not None: + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) + + # if there is a subset there ought to be version + if version is not None: + version_number += version["name"] + + if instance.data.get('version'): + version_number = int(instance.data.get('version')) + + anatomy = instance.context.data["anatomy"] + padding = int(anatomy.templates['render']['padding']) + + template_data.update({ + "subset": subset_name, + "frame": ('#' * padding), + "version": version_number, + "representation": "TEMP" + }) + + anatomy_filled = anatomy.format(template_data) + + template_names = ["publish"] + for repre in instance.data["representations"]: + template_name = repre.get("anatomy_template") + if template_name and template_name not in template_names: + template_names.append(template_name) + + resources = instance.data.get("resources", list()) + transfers = instance.data.get("transfers", list()) + + for template_name in template_names: + mock_template = anatomy_filled[template_name]["path"] + + # For now assume resources end up in a "resources" folder in the + # published folder + mock_destination = os.path.join( + os.path.dirname(mock_template), "resources" + ) + + # Clean the path + mock_destination = os.path.abspath( + os.path.normpath(mock_destination) + ).replace("\\", "/") + + # Define resource destination and transfers + for resource in resources: + # Add destination to the resource + source_filename = os.path.basename( + resource["source"]).replace("\\", "/") + destination = os.path.join(mock_destination, source_filename) + + # Force forward slashes to fix issue with software unable + # to work correctly with backslashes in specific scenarios + # (e.g. escape characters in PLN-151 V-Ray UDIM) + destination = destination.replace("\\", "/") + + resource['destination'] = destination + + # Collect transfers for the individual files of the resource + # e.g. all individual files of a cache or UDIM textures. + files = resource['files'] + for fsrc in files: + fname = os.path.basename(fsrc) + fdest = os.path.join( + mock_destination, fname).replace("\\", "/") + transfers.append([fsrc, fdest]) + + instance.data["resources"] = resources + instance.data["transfers"] = transfers diff --git a/pype/plugins/global/publish/integrate_assumed_destination.py b/pype/plugins/global/publish/integrate_assumed_destination.py deleted file mode 100644 index d090e2711a..0000000000 --- a/pype/plugins/global/publish/integrate_assumed_destination.py +++ /dev/null @@ -1,147 +0,0 @@ -import pyblish.api -import os - -from avalon import io, api - - -class IntegrateAssumedDestination(pyblish.api.InstancePlugin): - """Generate the assumed destination path where the file will be stored""" - - label = "Integrate Assumed Destination" - order = pyblish.api.IntegratorOrder - 0.05 - families = ["clip", "projectfile", "plate"] - - def process(self, instance): - - anatomy = instance.context.data['anatomy'] - - self.create_destination_template(instance, anatomy) - - template_data = instance.data["assumedTemplateData"] - # self.log.info(anatomy.templates) - anatomy_filled = anatomy.format(template_data) - - # self.log.info(anatomy_filled) - mock_template = anatomy_filled["publish"]["path"] - - # For now assume resources end up in a "resources" folder in the - # published folder - mock_destination = os.path.join(os.path.dirname(mock_template), - "resources") - - # Clean the path - mock_destination = os.path.abspath( - os.path.normpath(mock_destination)).replace("\\", "/") - - # Define resource destination and transfers - resources = instance.data.get("resources", list()) - transfers = instance.data.get("transfers", list()) - for resource in resources: - - # Add destination to the resource - source_filename = os.path.basename( - resource["source"]).replace("\\", "/") - destination = os.path.join(mock_destination, source_filename) - - # Force forward slashes to fix issue with software unable - # to work correctly with backslashes in specific scenarios - # (e.g. escape characters in PLN-151 V-Ray UDIM) - destination = destination.replace("\\", "/") - - resource['destination'] = destination - - # Collect transfers for the individual files of the resource - # e.g. all individual files of a cache or UDIM textures. - files = resource['files'] - for fsrc in files: - fname = os.path.basename(fsrc) - fdest = os.path.join( - mock_destination, fname).replace("\\", "/") - transfers.append([fsrc, fdest]) - - instance.data["resources"] = resources - instance.data["transfers"] = transfers - - def create_destination_template(self, instance, anatomy): - """Create a filepath based on the current data available - - Example template: - {root}/{project}/{asset}/publish/{subset}/v{version:0>3}/ - {subset}.{representation} - Args: - instance: the instance to publish - - Returns: - file path (str) - """ - - # get all the stuff from the database - subset_name = instance.data["subset"] - self.log.info(subset_name) - asset_name = instance.data["asset"] - project_name = api.Session["AVALON_PROJECT"] - a_template = anatomy.templates - - project = io.find_one( - {"type": "project", "name": project_name}, - projection={"config": True, "data": True} - ) - - template = a_template['publish']['path'] - # anatomy = instance.context.data['anatomy'] - - asset = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project["_id"] - }) - - assert asset, ("No asset found by the name '{}' " - "in project '{}'".format(asset_name, project_name)) - - subset = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset["_id"] - }) - - # assume there is no version yet, we start at `1` - version = None - version_number = 1 - if subset is not None: - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) - - # if there is a subset there ought to be version - if version is not None: - version_number += version["name"] - - if instance.data.get('version'): - version_number = int(instance.data.get('version')) - - padding = int(a_template['render']['padding']) - - hierarchy = asset['data']['parents'] - if hierarchy: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = "/".join(hierarchy) - - template_data = {"root": api.Session["AVALON_PROJECTS"], - "project": {"name": project_name, - "code": project['data']['code']}, - "family": instance.data['family'], - "asset": asset_name, - "subset": subset_name, - "frame": ('#' * padding), - "version": version_number, - "hierarchy": hierarchy, - "representation": "TEMP"} - - instance.data["assumedTemplateData"] = template_data - self.log.info(template_data) - instance.data["template"] = template From f6992a3d44532fac0d11a87c5d7cdfe0a0db715d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 19:27:20 +0100 Subject: [PATCH 114/434] collector for project data was removed --- .../global/publish/collect_project_data.py | 24 ------------------- .../nukestudio/publish/collect_clips.py | 2 +- 2 files changed, 1 insertion(+), 25 deletions(-) delete mode 100644 pype/plugins/global/publish/collect_project_data.py diff --git a/pype/plugins/global/publish/collect_project_data.py b/pype/plugins/global/publish/collect_project_data.py deleted file mode 100644 index acdbc2c41f..0000000000 --- a/pype/plugins/global/publish/collect_project_data.py +++ /dev/null @@ -1,24 +0,0 @@ -""" -Requires: - None - -Provides: - context -> projectData -""" - -import pyblish.api -import pype.api as pype - - -class CollectProjectData(pyblish.api.ContextPlugin): - """Collecting project data from avalon db""" - - label = "Collect Project Data" - order = pyblish.api.CollectorOrder - 0.1 - hosts = ["nukestudio"] - - def process(self, context): - # get project data from avalon db - context.data["projectData"] = pype.get_project()["data"] - - return diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 3759d50f6a..82053b6811 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -17,7 +17,7 @@ class CollectClips(api.ContextPlugin): self.log.debug("Created `assetsShared` in context") context.data["assetsShared"] = dict() - projectdata = context.data["projectData"] + projectdata = context.data["projectEntity"]["data"] version = context.data.get("version", "001") sequence = context.data.get("activeSequence") selection = context.data.get("selection") From 5177b891ac5b1b1be0f19c621630be169b08741d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 19:27:42 +0100 Subject: [PATCH 115/434] extract yeti rig and extract look uses anatomyData --- pype/plugins/maya/publish/extract_look.py | 69 +++++-------------- pype/plugins/maya/publish/extract_yeti_rig.py | 10 +-- 2 files changed, 23 insertions(+), 56 deletions(-) diff --git a/pype/plugins/maya/publish/extract_look.py b/pype/plugins/maya/publish/extract_look.py index fa6ecd72c3..4000011520 100644 --- a/pype/plugins/maya/publish/extract_look.py +++ b/pype/plugins/maya/publish/extract_look.py @@ -1,6 +1,7 @@ import os import sys import json +import copy import tempfile import contextlib import subprocess @@ -333,7 +334,7 @@ class ExtractLook(pype.api.Extractor): anatomy = instance.context.data["anatomy"] - self.create_destination_template(instance, anatomy) + destination_dir = self.create_destination_template(instance, anatomy) # Compute destination location basename, ext = os.path.splitext(os.path.basename(filepath)) @@ -343,7 +344,7 @@ class ExtractLook(pype.api.Extractor): ext = ".tx" return os.path.join( - instance.data["assumedDestination"], "resources", basename + ext + destination_dir, "resources", basename + ext ) def _process_texture(self, filepath, do_maketx, staging, linearise, force): @@ -421,38 +422,17 @@ class ExtractLook(pype.api.Extractor): file path (str) """ - # get all the stuff from the database + asset_entity = instance.context["assetEntity"] + + template_data = copy.deepcopy(instance.data["anatomyData"]) + subset_name = instance.data["subset"] self.log.info(subset_name) - asset_name = instance.data["asset"] - project_name = api.Session["AVALON_PROJECT"] - a_template = anatomy.templates - - project = io.find_one( - { - "type": "project", - "name": project_name - }, - projection={"config": True, "data": True} - ) - - template = a_template["publish"]["path"] - # anatomy = instance.context.data['anatomy'] - - asset = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project["_id"] - }) - - assert asset, ("No asset found by the name '{}' " - "in project '{}'").format(asset_name, project_name) - silo = asset.get("silo") subset = io.find_one({ "type": "subset", "name": subset_name, - "parent": asset["_id"] + "parent": asset_entity["_id"] }) # assume there is no version yet, we start at `1` @@ -471,33 +451,18 @@ class ExtractLook(pype.api.Extractor): if version is not None: version_number += version["name"] - if instance.data.get("version"): - version_number = int(instance.data.get("version")) + if instance.data.get('version'): + version_number = int(instance.data.get('version')) - padding = int(a_template["render"]["padding"]) + anatomy = instance.context.data["anatomy"] + padding = int(anatomy.templates['render']['padding']) - hierarchy = asset["data"]["parents"] - if hierarchy: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = "/".join(hierarchy) - - template_data = { - "root": api.Session["AVALON_PROJECTS"], - "project": {"name": project_name, "code": project["data"]["code"]}, - "silo": silo, - "family": instance.data["family"], - "asset": asset_name, + template_data.update({ "subset": subset_name, "frame": ("#" * padding), "version": version_number, - "hierarchy": hierarchy, - "representation": "TEMP", - } + "representation": "TEMP" + }) + anatomy_filled = anatomy.format(template_data) - instance.data["assumedTemplateData"] = template_data - self.log.info(template_data) - instance.data["template"] = template - # We take the parent folder of representation 'filepath' - instance.data["assumedDestination"] = os.path.dirname( - anatomy.format(template_data)["publish"]["path"] - ) + return os.path.dirname(anatomy_filled["publish"]["path"]) diff --git a/pype/plugins/maya/publish/extract_yeti_rig.py b/pype/plugins/maya/publish/extract_yeti_rig.py index 892bc0bea6..d390a1365a 100644 --- a/pype/plugins/maya/publish/extract_yeti_rig.py +++ b/pype/plugins/maya/publish/extract_yeti_rig.py @@ -1,6 +1,7 @@ import os import json import contextlib +import copy from maya import cmds @@ -111,11 +112,12 @@ class ExtractYetiRig(pype.api.Extractor): self.log.info("Writing metadata file") # Create assumed destination folder for imageSearchPath - assumed_temp_data = instance.data["assumedTemplateData"] - template = instance.data["template"] - template_formatted = template.format(**assumed_temp_data) + template_data = copy.deepcopy(instance.data["anatomyData"]) - destination_folder = os.path.dirname(template_formatted) + anatomy = instance.context["anatomy"] + filled = anatomy.format(template_data) + + destination_folder = os.path.dir(filled["publish"]["path"]) image_search_path = os.path.join(destination_folder, "resources") image_search_path = os.path.normpath(image_search_path) From f1486a9cd42820684d03c317442a0919b597bfef Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 19:27:59 +0100 Subject: [PATCH 116/434] integrate_new uses anatomyData --- pype/plugins/global/publish/integrate_new.py | 80 ++++++++------------ 1 file changed, 30 insertions(+), 50 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 7d95534897..c6bc1ffbab 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -2,6 +2,7 @@ import os from os.path import getsize import logging import sys +import copy import clique import errno import pyblish.api @@ -100,12 +101,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): def register(self, instance): # Required environment variables - PROJECT = api.Session["AVALON_PROJECT"] - ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"] - TASK = instance.data.get("task") or api.Session["AVALON_TASK"] - LOCATION = api.Session["AVALON_LOCATION"] + anatomy_data = instance.data["anatomyData"] + asset_entity = instance.data["assetEntity"] + avalon_location = api.Session["AVALON_LOCATION"] + + io.install() context = instance.context + # Atomicity # # Guarantee atomic publishes - each asset contains @@ -140,35 +143,27 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # stagingdir = instance.data.get("stagingDir") if not stagingdir: - self.log.info('''{} is missing reference to staging - directory Will try to get it from - representation'''.format(instance)) + self.log.info(( + "{0} is missing reference to staging directory." + " Will try to get it from representation." + ).format(instance)) - # extra check if stagingDir actually exists and is available - - self.log.debug("Establishing staging directory @ %s" % stagingdir) + else: + self.log.debug( + "Establishing staging directory @ {0}".format(stagingdir) + ) # Ensure at least one file is set up for transfer in staging dir. - repres = instance.data.get("representations", None) + repres = instance.data.get("representations") assert repres, "Instance has no files to transfer" assert isinstance(repres, (list, tuple)), ( - "Instance 'files' must be a list, got: {0}".format(repres) + "Instance 'files' must be a list, got: {0} {1}".format( + str(type(repres)), str(repres) + ) ) - # FIXME: io is not initialized at this point for shell host - io.install() - project = io.find_one({"type": "project"}) - - asset = io.find_one({ - "type": "asset", - "name": ASSET, - "parent": project["_id"] - }) - - assert all([project, asset]), ("Could not find current project or " - "asset '%s'" % ASSET) - - subset = self.get_subset(asset, instance) + intent = context.data.get("intent") + subset = self.get_subset(asset_entity, instance) # get next version latest_version = io.find_one( @@ -229,16 +224,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # \ \________. # \|________| # - root = api.registered_root() - hierarchy = "" - parents = io.find_one({ - "type": 'asset', - "name": ASSET - })['data']['parents'] - if parents and len(parents) > 0: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = os.path.join(*parents) - anatomy = instance.context.data['anatomy'] # Find the representations to transfer amongst the files @@ -261,20 +246,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # |_______| # # create template data for Anatomy - template_data = {"root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - "silo": asset.get('silo'), - "task": TASK, - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": int(version["name"]), - "hierarchy": hierarchy} + template_data = copy.deepcopy(anatomy_data) + # TODO cleanup this code, should be already in anatomyData + template_data.update({ + "subset": subset["name"], + "version": int(version["name"]) + }) - # Add datetime data to template data - datetime_data = context.data.get("datetimeData") or {} - template_data.update(datetime_data) + if intent is not None: + template_data["intent"] = intent resolution_width = repre.get("resolutionWidth") resolution_height = repre.get("resolutionHeight") @@ -292,6 +272,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): stagingdir = repre['stagingDir'] if repre.get('anatomy_template'): template_name = repre['anatomy_template'] + template = os.path.normpath( anatomy.templates[template_name]["path"]) @@ -322,7 +303,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): template_filled = anatomy_filled[template_name]["path"] if repre_context is None: repre_context = template_filled.used_values - test_dest_files.append( os.path.normpath(template_filled) ) From 91d51f145844aed301c3f7a721e807e0dfb154a7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 3 Feb 2020 19:28:33 +0100 Subject: [PATCH 117/434] removed deprecated validate templates --- .../global/publish/validate_templates.py | 43 ------------------- 1 file changed, 43 deletions(-) delete mode 100644 pype/plugins/global/publish/validate_templates.py diff --git a/pype/plugins/global/publish/validate_templates.py b/pype/plugins/global/publish/validate_templates.py deleted file mode 100644 index f24f6b1a2e..0000000000 --- a/pype/plugins/global/publish/validate_templates.py +++ /dev/null @@ -1,43 +0,0 @@ -import pyblish.api -import os - - -class ValidateTemplates(pyblish.api.ContextPlugin): - """Check if all templates were filled""" - - label = "Validate Templates" - order = pyblish.api.ValidatorOrder - 0.1 - hosts = ["maya", "houdini", "nuke"] - - def process(self, context): - - anatomy = context.data["anatomy"] - if not anatomy: - raise RuntimeError("Did not find anatomy") - else: - data = { - "root": os.environ["PYPE_STUDIO_PROJECTS_PATH"], - "project": {"name": "D001_projectsx", - "code": "prjX"}, - "ext": "exr", - "version": 3, - "task": "animation", - "asset": "sh001", - "app": "maya", - "hierarchy": "ep101/sq01/sh010"} - - anatomy_filled = anatomy.format(data) - self.log.info(anatomy_filled) - - data = {"root": os.environ["PYPE_STUDIO_PROJECTS_PATH"], - "project": {"name": "D001_projectsy", - "code": "prjY"}, - "ext": "abc", - "version": 1, - "task": "lookdev", - "asset": "bob", - "app": "maya", - "hierarchy": "ep101/sq01/bob"} - - anatomy_filled = context.data["anatomy"].format(data) - self.log.info(anatomy_filled["work"]["folder"]) From 25a6cd2c13ea0da2af2698c6e4b0442fc438e6e9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 00:12:09 +0100 Subject: [PATCH 118/434] fixed avalon entity check for nukestudio publish --- pype/ftrack/events/event_sync_to_avalon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 23284a2ae6..1b245efaa8 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -1437,7 +1437,7 @@ class SyncToAvalonEvent(BaseEvent): .get("name", {}) .get("new") ) - avalon_ent_by_name = self.avalon_ents_by_name.get(name) + avalon_ent_by_name = self.avalon_ents_by_name.get(name) or {} avalon_ent_by_name_ftrack_id = ( avalon_ent_by_name .get("data", {}) From 670f660a9724a83691913ffbfece7b9ae22cd414 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 10:59:30 +0100 Subject: [PATCH 119/434] changed collect instance anatomy data order to 0.49 --- pype/plugins/global/publish/collect_instance_anatomy_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py index a1a9278d2a..76ab8dc3f6 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -26,7 +26,7 @@ import pyblish.api class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): """Fill templates with data needed for publish""" - order = pyblish.api.CollectorOrder + 0.1 + order = pyblish.api.CollectorOrder + 0.49 label = "Collect instance anatomy data" hosts = ["maya", "nuke", "standalonepublisher"] From 1a04dca10a7c2af07d779bc1339c941eb5ebf44d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 11:00:38 +0100 Subject: [PATCH 120/434] asset entity may not exist so collecting instance anatomy data was changed to not crash --- .../publish/collect_instance_anatomy_data.py | 46 ++++++++++--------- 1 file changed, 24 insertions(+), 22 deletions(-) diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py index 76ab8dc3f6..ecef9d10f2 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -53,40 +53,42 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): instance.context.data["projectEntity"] = project_entity subset_name = instance.data["subset"] - subset_entity = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset_entity["_id"] - }) - version_number = instance.data.get("version") - if version_number is None: - version_number = instance.context.data.get("version") - latest_version = None - if subset_entity is None: - self.log.debug("Subset entity does not exist yet.") - else: - version_entity = io.find_one( - { - "type": "version", - "parent": subset_entity["_id"] - }, - sort=[("name", -1)] - ) - if version_entity: - latest_version = version_entity["name"] + if asset_entity: + subset_entity = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset_entity["_id"] + }) + + + if subset_entity is None: + self.log.debug("Subset entity does not exist yet.") + else: + version_entity = io.find_one( + { + "type": "version", + "parent": subset_entity["_id"] + }, + sort=[("name", -1)] + ) + if version_entity: + latest_version = version_entity["name"] + + # If version is not specified for instance or context if version_number is None: # TODO we should be able to change this version by studio # preferences (like start with version number `0`) version_number = 1 + # use latest version (+1) if already any exist if latest_version is not None: version_number += int(latest_version) # Version should not be collected since may be instance anatomy_data.update({ - "asset": asset_entity["name"], + "asset": asset_name, "family": instance.data["family"], "subset": subset_name, "version": version_number From a14b05ccd1b5f75d4ccde0349de1096ec0425592 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 11:00:48 +0100 Subject: [PATCH 121/434] removed comments --- .../global/publish/collect_instance_anatomy_data.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py index ecef9d10f2..838fb1a113 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -108,14 +108,6 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): instance.data["anatomyData"] = anatomy_data instance.data["latestVersion"] = latest_version - # TODO check if template is used anywhere - # instance.data["template"] = template - - # TODO we should move this to any Validator - # # We take the parent folder of representation 'filepath' - # instance.data["assumedDestination"] = os.path.dirname( - # (anatomy.format(template_data))["publish"]["path"] - # ) self.log.info("Instance anatomy Data collected") self.log.debug(json.dumps(anatomy_data, indent=4)) From f70f307cc40d95bfa5181e27ef7384108e9fb10b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 15:16:35 +0100 Subject: [PATCH 122/434] removed missleading comments --- pype/plugins/global/publish/integrate_new.py | 52 -------------------- 1 file changed, 52 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index c6bc1ffbab..c3a03324aa 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -109,38 +109,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): context = instance.context - # Atomicity - # - # Guarantee atomic publishes - each asset contains - # an identical set of members. - # __ - # / o - # / \ - # | o | - # \ / - # o __/ - # - # for result in context.data["results"]: - # if not result["success"]: - # self.log.debug(result) - # exc_type, exc_value, exc_traceback = result["error_info"] - # extracted_traceback = traceback.extract_tb(exc_traceback)[-1] - # self.log.debug( - # "Error at line {}: \"{}\"".format( - # extracted_traceback[1], result["error"] - # ) - # ) - # assert all(result["success"] for result in context.data["results"]),( - # "Atomicity not held, aborting.") - - # Assemble - # - # | - # v - # ---> <---- - # ^ - # | - # stagingdir = instance.data.get("stagingDir") if not stagingdir: self.log.info(( @@ -214,16 +182,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): version_id = existing_version['_id'] instance.data['version'] = version['name'] - # Write to disk - # _ - # | | - # _| |_ - # ____\ / - # |\ \ / \ - # \ \ v \ - # \ \________. - # \|________| - # anatomy = instance.context.data['anatomy'] # Find the representations to transfer amongst the files @@ -235,16 +193,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): instance.data['transfers'] = [] for idx, repre in enumerate(instance.data["representations"]): - - # Collection - # _______ - # |______|\ - # | |\| - # | || - # | || - # | || - # |_______| - # # create template data for Anatomy template_data = copy.deepcopy(anatomy_data) # TODO cleanup this code, should be already in anatomyData From 6ef1a7e17605233bc8ea0dc25e3912d0d0a9dc9d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 15:17:15 +0100 Subject: [PATCH 123/434] formatting --- pype/plugins/global/publish/integrate_new.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index c3a03324aa..b71b5fb298 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -159,10 +159,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if version_data_instance: version_data.update(version_data_instance) - version = self.create_version(subset=subset, - version_number=next_version, - locations=[LOCATION], - data=version_data) + version = self.create_version( + subset=subset, + version_number=next_version, + locations=[avalon_location], + data=version_data + ) self.log.debug("Creating version ...") existing_version = io.find_one({ From 1dcdac7ae051cb359fb481a812027a553e4c79e1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 15:18:11 +0100 Subject: [PATCH 124/434] asset_entity check moved back since nukestudio instances may have not set value --- pype/plugins/global/publish/integrate_new.py | 26 +++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index b71b5fb298..774a54ea7c 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -102,13 +102,37 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): def register(self, instance): # Required environment variables anatomy_data = instance.data["anatomyData"] - asset_entity = instance.data["assetEntity"] avalon_location = api.Session["AVALON_LOCATION"] io.install() context = instance.context + project_entity = instance.data["projectEntity"] + + asset_name = instance.data["asset"] + asset_entity = instance.data.get("assetEntity") + if not asset_entity: + asset_entity = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project_entity["_id"] + }) + + assert asset_entity, ( + "No asset found by the name \"{0}\" in project \"{1}\"" + ).format(asset_name, project_entity["name"]) + + instance.data["assetEntity"] = asset_entity + + # update anatomy data with asset specific keys + # - name should already been set + hierarchy = "" + parents = asset_entity["data"]["parents"] + if parents: + hierarchy = "/".join(parents) + anatomy_data["hierarchy"] = hierarchy + stagingdir = instance.data.get("stagingDir") if not stagingdir: self.log.info(( From e23cc33de7ad1e9156f1a659a0619e2fc2609f68 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 15:18:59 +0100 Subject: [PATCH 125/434] latest version is not queried before check if instance have already version set in data --- pype/plugins/global/publish/integrate_new.py | 33 +++++++++++--------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 774a54ea7c..24162c4cf1 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -157,22 +157,27 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): intent = context.data.get("intent") subset = self.get_subset(asset_entity, instance) - # get next version - latest_version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - {"name": True}, - sort=[("name", -1)] - ) + # TODO iLLiCiT use "latestVersion" from `instance.data` + # and store version in anatomyData instance collector + # instead of query again + instance_version = instance.data.get('version') + if instance_version is not None: + next_version = int(instance_version) - next_version = 1 - if latest_version is not None: - next_version += latest_version["name"] + else: + # get next version + latest_version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + {"name": True}, + sort=[("name", -1)] + ) - if instance.data.get('version'): - next_version = int(instance.data.get('version')) + next_version = 1 + if latest_version is not None: + next_version += int(latest_version["name"]) self.log.debug("Next version: v{0:03d}".format(next_version)) From 29c6768da935380dd499834857b896c64d2b05f5 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 15:19:19 +0100 Subject: [PATCH 126/434] intent added to anatomy data --- pype/plugins/global/publish/integrate_new.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 24162c4cf1..093a9e354c 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -154,7 +154,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): ) ) - intent = context.data.get("intent") subset = self.get_subset(asset_entity, instance) # TODO iLLiCiT use "latestVersion" from `instance.data` @@ -213,6 +212,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): version_id = existing_version['_id'] instance.data['version'] = version['name'] + intent = context.data.get("intent") + if intent is not None: + anatomy_data["intent"] = intent + anatomy = instance.context.data['anatomy'] # Find the representations to transfer amongst the files From 3a5ab92687bbf7cc89ade7ec453997d5189e0f64 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 15:19:44 +0100 Subject: [PATCH 127/434] removed subset and version anatomy update since they are already set for whole instance --- pype/plugins/global/publish/integrate_new.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 093a9e354c..fc7cbf4afa 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -229,12 +229,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): for idx, repre in enumerate(instance.data["representations"]): # create template data for Anatomy template_data = copy.deepcopy(anatomy_data) - # TODO cleanup this code, should be already in anatomyData - template_data.update({ - "subset": subset["name"], - "version": int(version["name"]) - }) - if intent is not None: template_data["intent"] = intent From 9113fb1c7f72b1e1ad7a0e32ac16fcb26cd67139 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 15:20:18 +0100 Subject: [PATCH 128/434] added check if index_frame_start exist --- pype/plugins/global/publish/integrate_new.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index fc7cbf4afa..6d85e29732 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -298,7 +298,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): index_frame_start = int(repre.get("frameStart")) # exception for slate workflow - if "slate" in instance.data["families"]: + if index_frame_start and "slate" in instance.data["families"]: index_frame_start -= 1 dst_padding_exp = src_padding_exp From fde457d445c18d2f87591017df23e3915b8e55b4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 15:21:27 +0100 Subject: [PATCH 129/434] intent added to version data --- pype/plugins/global/publish/integrate_new.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 6d85e29732..5dba744346 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -601,6 +601,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "fps": context.data.get( "fps", instance.data.get("fps"))} + intent = context.data.get("intent") + if intent is not None: + version_data["intent"] = intent + # Include optional data if present in optionals = [ "frameStart", "frameEnd", "step", "handles", From df512a5a4a17d9d8b14ceba2bc60a908eccbfe5e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 15:21:52 +0100 Subject: [PATCH 130/434] formatting changes --- pype/plugins/global/publish/integrate_new.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 5dba744346..1ff1dfe520 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -331,7 +331,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if not dst_start_frame: dst_start_frame = dst_padding - dst = "{0}{1}{2}".format( dst_head, dst_start_frame, @@ -503,14 +502,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): filelink.create(src, dst, filelink.HARDLINK) def get_subset(self, asset, instance): + subset_name = instance.data["subset"] subset = io.find_one({ "type": "subset", "parent": asset["_id"], - "name": instance.data["subset"] + "name": subset_name }) if subset is None: - subset_name = instance.data["subset"] self.log.info("Subset '%s' not found, creating.." % subset_name) self.log.debug("families. %s" % instance.data.get('families')) self.log.debug( From 6bd8706579b5b1c19ae0ea0c3859e53fdda02013 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 15:22:06 +0100 Subject: [PATCH 131/434] added few TODOs --- pype/plugins/global/publish/integrate_new.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 1ff1dfe520..15165f4217 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -203,6 +203,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if existing_version is None: version_id = io.insert_one(version).inserted_id else: + # TODO query by _id and + # remove old version and representations but keep their ids io.update_many({ 'type': 'version', 'parent': subset["_id"], @@ -304,6 +306,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): dst_padding_exp = src_padding_exp dst_start_frame = None for i in src_collection.indexes: + # TODO 1.) do not count padding in each index iteration + # 2.) do not count dst_padding from src_padding before + # index_frame_start check src_padding = src_padding_exp % i src_file_name = "{0}{1}{2}".format( From 178fed2ae22893670dcfff056c13f44ed64c925b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 15:32:12 +0100 Subject: [PATCH 132/434] updated instance input/output docsting --- pype/plugins/global/publish/collect_instance_anatomy_data.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py index 838fb1a113..404480b30b 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -8,12 +8,17 @@ Requires: instance -> family Optional: + instance -> version instance -> resolutionWidth instance -> resolutionHeight instance -> fps Provides: + instance -> projectEntity + instance -> assetEntity instance -> anatomyData + instance -> version + instance -> latestVersion """ import copy From ed8b56b6de17330054b2c9469ea63133a1ed5a36 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 15:32:54 +0100 Subject: [PATCH 133/434] fixed per key instance.data value assignment --- .../global/publish/collect_instance_anatomy_data.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py index 404480b30b..8a98b6cbb2 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -54,9 +54,6 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): "parent": project_entity["_id"] }) - instance.context.data["assetEntity"] = asset_entity - instance.context.data["projectEntity"] = project_entity - subset_name = instance.data["subset"] version_number = instance.data.get("version") latest_version = None @@ -68,7 +65,6 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): "parent": asset_entity["_id"] }) - if subset_entity is None: self.log.debug("Subset entity does not exist yet.") else: @@ -84,7 +80,7 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): # If version is not specified for instance or context if version_number is None: - # TODO we should be able to change this version by studio + # TODO we should be able to change default version by studio # preferences (like start with version number `0`) version_number = 1 # use latest version (+1) if already any exist @@ -111,8 +107,12 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): if resolution_height: anatomy_data["fps"] = fps + instance.data["projectEntity"] = project_entity + instance.data["assetEntity"] = asset_entity instance.data["anatomyData"] = anatomy_data instance.data["latestVersion"] = latest_version + # TODO should be version number set here? + instance.data["version"] = version_number self.log.info("Instance anatomy Data collected") self.log.debug(json.dumps(anatomy_data, indent=4)) From 876ff064b6b6c1a941888e43758196525b49872c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 16:11:29 +0100 Subject: [PATCH 134/434] reduced collect resources path because of already collected data in instance anatomy data --- .../global/publish/collect_resources_path.py | 62 +------------------ 1 file changed, 3 insertions(+), 59 deletions(-) diff --git a/pype/plugins/global/publish/collect_resources_path.py b/pype/plugins/global/publish/collect_resources_path.py index 52e926e09c..de78874cd6 100644 --- a/pype/plugins/global/publish/collect_resources_path.py +++ b/pype/plugins/global/publish/collect_resources_path.py @@ -13,70 +13,14 @@ class IntegrateResourcesPath(pyblish.api.InstancePlugin): families = ["clip", "projectfile", "plate"] def process(self, instance): - project_entity = instance.context["projectEntity"] - asset_entity = instance.context["assetEntity"] - template_data = copy.deepcopy(instance.data["anatomyData"]) - asset_name = instance.data["asset"] - if asset_name != asset_entity["name"]: - asset_entity = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project_entity["_id"] - }) - assert asset_entity, ( - "No asset found by the name '{}' in project '{}'".format( - asset_name, project_entity["name"] - ) - ) - - instance.data["assetEntity"] = asset_entity - - template_data["name"] = asset_entity["name"] - silo_name = asset_entity.get("silo") - if silo_name: - template_data["silo"] = silo_name - - parents = asset_entity["data"].get("parents") or [] - hierarchy = "/".join(parents) - template_data["hierarchy"] = hierarchy - - subset_name = instance.data["subset"] - self.log.info(subset_name) - - subset = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset_entity["_id"] - }) - - # assume there is no version yet, we start at `1` - version = None - version_number = 1 - if subset is not None: - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) - - # if there is a subset there ought to be version - if version is not None: - version_number += version["name"] - - if instance.data.get('version'): - version_number = int(instance.data.get('version')) - anatomy = instance.context.data["anatomy"] - padding = int(anatomy.templates['render']['padding']) + padding = int(anatomy.templates["render"]["padding"]) + # add possible representation specific key to anatomy data template_data.update({ - "subset": subset_name, - "frame": ('#' * padding), - "version": version_number, + "frame": ("#" * padding), "representation": "TEMP" }) From 3fdfcec29bf6b62023fe34a8b1d1b01fe2198edf Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 16:17:18 +0100 Subject: [PATCH 135/434] version_number is used from instance.data in integrate_new --- pype/plugins/global/publish/integrate_new.py | 32 +++----------------- 1 file changed, 5 insertions(+), 27 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 15165f4217..aff92ea308 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -156,40 +156,18 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): subset = self.get_subset(asset_entity, instance) - # TODO iLLiCiT use "latestVersion" from `instance.data` - # and store version in anatomyData instance collector - # instead of query again - instance_version = instance.data.get('version') - if instance_version is not None: - next_version = int(instance_version) - - else: - # get next version - latest_version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - {"name": True}, - sort=[("name", -1)] - ) - - next_version = 1 - if latest_version is not None: - next_version += int(latest_version["name"]) - - self.log.debug("Next version: v{0:03d}".format(next_version)) + version_number = instance.data["version"] + self.log.debug("Next version: v{0:03d}".format(version_number)) version_data = self.create_version_data(context, instance) version_data_instance = instance.data.get('versionData') - if version_data_instance: version_data.update(version_data_instance) version = self.create_version( subset=subset, - version_number=next_version, + version_number=version_number, locations=[avalon_location], data=version_data ) @@ -198,7 +176,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): existing_version = io.find_one({ 'type': 'version', 'parent': subset["_id"], - 'name': next_version + 'name': version_number }) if existing_version is None: version_id = io.insert_one(version).inserted_id @@ -208,7 +186,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): io.update_many({ 'type': 'version', 'parent': subset["_id"], - 'name': next_version + 'name': version_number }, {'$set': version} ) version_id = existing_version['_id'] From ebdc7c3700f17f636573fa45e4ad500f261200f9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 16:23:03 +0100 Subject: [PATCH 136/434] added few todos --- pype/plugins/global/publish/collect_resources_path.py | 5 +++-- pype/plugins/global/publish/integrate_new.py | 5 ++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/collect_resources_path.py b/pype/plugins/global/publish/collect_resources_path.py index de78874cd6..fe152584b6 100644 --- a/pype/plugins/global/publish/collect_resources_path.py +++ b/pype/plugins/global/publish/collect_resources_path.py @@ -16,11 +16,12 @@ class IntegrateResourcesPath(pyblish.api.InstancePlugin): template_data = copy.deepcopy(instance.data["anatomyData"]) anatomy = instance.context.data["anatomy"] - padding = int(anatomy.templates["render"]["padding"]) + frame_padding = int(anatomy.templates["render"]["padding"]) # add possible representation specific key to anatomy data + # TODO ability to set host specific "frame" value template_data.update({ - "frame": ("#" * padding), + "frame": ("#" * frame_padding), "representation": "TEMP" }) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index aff92ea308..570a093cdc 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -102,7 +102,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): def register(self, instance): # Required environment variables anatomy_data = instance.data["anatomyData"] - avalon_location = api.Session["AVALON_LOCATION"] io.install() @@ -165,6 +164,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if version_data_instance: version_data.update(version_data_instance) + # TODO remove avalon_location (shall we?) + avalon_location = api.Session["AVALON_LOCATION"] + # TODO rename method from `create_version` to + # `prepare_version` or similar... version = self.create_version( subset=subset, version_number=version_number, From f44011268cca59f593402b13b37e5c3a0cefe4db Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 16:25:45 +0100 Subject: [PATCH 137/434] delete action tries to find entities by name and parents if ftrackId is not set in data --- pype/ftrack/actions/action_delete_asset.py | 37 ++++++++++++++++++++-- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/pype/ftrack/actions/action_delete_asset.py b/pype/ftrack/actions/action_delete_asset.py index 7eb9126fca..5d177748cd 100644 --- a/pype/ftrack/actions/action_delete_asset.py +++ b/pype/ftrack/actions/action_delete_asset.py @@ -99,6 +99,7 @@ class DeleteAssetSubset(BaseAction): # Filter event even more (skip task entities) # - task entities are not relevant for avalon + entity_mapping = {} for entity in entities: ftrack_id = entity["id"] if ftrack_id not in ftrack_ids: @@ -107,6 +108,8 @@ class DeleteAssetSubset(BaseAction): if entity.entity_type.lower() == "task": ftrack_ids.remove(ftrack_id) + entity_mapping[ftrack_id] = entity + if not ftrack_ids: # It is bug if this happens! return { @@ -122,11 +125,39 @@ class DeleteAssetSubset(BaseAction): project_name = project["full_name"] self.dbcon.Session["AVALON_PROJECT"] = project_name - selected_av_entities = self.dbcon.find({ + selected_av_entities = list(self.dbcon.find({ "type": "asset", "data.ftrackId": {"$in": ftrack_ids} - }) - selected_av_entities = [ent for ent in selected_av_entities] + })) + if len(selected_av_entities) != len(ftrack_ids): + found_ftrack_ids = [ + ent["data"]["ftrackId"] for ent in selected_av_entities + ] + for ftrack_id, entity in entity_mapping.items(): + if ftrack_id in found_ftrack_ids: + continue + + av_ents_by_name = list(self.dbcon.find({ + "type": "asset", + "name": entity["name"] + })) + if not av_ents_by_name: + continue + + ent_path_items = [ent["name"] for ent in entity["link"]] + parents = ent_path_items[1:len(ent_path_items)-1:] + # TODO we should say to user that + # few of them are missing in avalon + for av_ent in av_ents_by_name: + if av_ent["data"]["parents"] != parents: + continue + + # TODO we should say to user that found entity + # with same name does not match same ftrack id? + if "ftrackId" not in av_ent["data"]: + selected_av_entities.append(av_ent) + break + if not selected_av_entities: return { "success": False, From d57586c76b22e9d57b56e9abf831cb49e4e570db Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 16:47:26 +0100 Subject: [PATCH 138/434] added mapping for avalon -> ftrack id if ftrackId is not in entity's data --- pype/ftrack/actions/action_delete_asset.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/pype/ftrack/actions/action_delete_asset.py b/pype/ftrack/actions/action_delete_asset.py index 5d177748cd..fc9e66e4f8 100644 --- a/pype/ftrack/actions/action_delete_asset.py +++ b/pype/ftrack/actions/action_delete_asset.py @@ -129,6 +129,7 @@ class DeleteAssetSubset(BaseAction): "type": "asset", "data.ftrackId": {"$in": ftrack_ids} })) + found_without_ftrack_id = {} if len(selected_av_entities) != len(ftrack_ids): found_ftrack_ids = [ ent["data"]["ftrackId"] for ent in selected_av_entities @@ -156,6 +157,7 @@ class DeleteAssetSubset(BaseAction): # with same name does not match same ftrack id? if "ftrackId" not in av_ent["data"]: selected_av_entities.append(av_ent) + found_without_ftrack_id[str(av_ent["_id"])] = ftrack_id break if not selected_av_entities: @@ -186,7 +188,8 @@ class DeleteAssetSubset(BaseAction): "created_at": datetime.now(), "project_name": project_name, "subset_ids_by_name": {}, - "subset_ids_by_parent": {} + "subset_ids_by_parent": {}, + "without_ftrack_id": found_without_ftrack_id } id_item = { @@ -444,14 +447,21 @@ class DeleteAssetSubset(BaseAction): asset_ids_to_archive = [] ftrack_ids_to_delete = [] if len(assets_to_delete) > 0: + map_av_ftrack_id = spec_data["without_ftrack_id"] # Prepare data when deleting whole avalon asset avalon_assets = self.dbcon.find({"type": "asset"}) avalon_assets_by_parent = collections.defaultdict(list) for asset in avalon_assets: + asset_id = asset["_id"] parent_id = asset["data"]["visualParent"] avalon_assets_by_parent[parent_id].append(asset) - if asset["_id"] in assets_to_delete: - ftrack_id = asset["data"]["ftrackId"] + if asset_id in assets_to_delete: + ftrack_id = map_av_ftrack_id.get(str(asset_id)) + if not ftrack_id: + ftrack_id = asset["data"].get("ftrackId") + + if not ftrack_id: + continue ftrack_ids_to_delete.append(ftrack_id) children_queue = Queue() From 569bd6165859670365dce17fbe891af9dc56711f Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 4 Feb 2020 17:36:46 +0100 Subject: [PATCH 139/434] fix(nk, nks): some fixes for loading sequence and mov --- .../global/publish/collect_templates.py | 4 ++- pype/plugins/nuke/load/load_mov.py | 34 +++++++++++++++++-- pype/plugins/nuke/load/load_sequence.py | 21 ++++++++++-- 3 files changed, 53 insertions(+), 6 deletions(-) diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py index 383944e293..5ad7aa7320 100644 --- a/pype/plugins/global/publish/collect_templates.py +++ b/pype/plugins/global/publish/collect_templates.py @@ -78,6 +78,8 @@ class CollectTemplates(pyblish.api.InstancePlugin): if hierarchy: # hierarchy = os.path.sep.join(hierarchy) hierarchy = os.path.join(*hierarchy) + else: + hierarchy = "" template_data = {"root": api.Session["AVALON_PROJECTS"], "project": {"name": project_name, @@ -88,7 +90,7 @@ class CollectTemplates(pyblish.api.InstancePlugin): "subset": subset_name, "version": version_number, "hierarchy": hierarchy.replace("\\", "/"), - "representation": "TEMP")} + "representation": "TEMP"} resolution_width = instance.data.get("resolutionWidth") resolution_height = instance.data.get("resolutionHeight") diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py index 655937b9a8..fccba4c573 100644 --- a/pype/plugins/nuke/load/load_mov.py +++ b/pype/plugins/nuke/load/load_mov.py @@ -111,8 +111,15 @@ class LoadMov(api.Loader): if namespace is None: namespace = context['asset']['name'] - file = self.fname.replace("\\", "/") - log.info("file: {}\n".format(self.fname)) + file = self.fname + + if not file: + repr_id = context["representation"]["_id"] + log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") read_name = "Read_{0}_{1}_{2}".format( repr_cont["asset"], @@ -200,7 +207,15 @@ class LoadMov(api.Loader): assert node.Class() == "Read", "Must be Read" - file = self.fname.replace("\\", "/") + file = self.fname + + if not file: + repr_id = representation["_id"] + log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") # Get start frame from version data version = io.find_one({ @@ -263,6 +278,19 @@ class LoadMov(api.Loader): if colorspace: node["colorspace"].setValue(str(colorspace)) + # load nuke presets for Read's colorspace + read_clrs_presets = presets.get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + if preset_clrsp is not None: + node["colorspace"].setValue(str(preset_clrsp)) + + updated_dict = {} updated_dict.update({ "representation": str(representation["_id"]), diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 9f3d09186c..76ff7d2cb6 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -107,7 +107,15 @@ class LoadSequence(api.Loader): first -= self.handle_start last += self.handle_end - file = self.fname.replace("\\", "/") + file = self.fname + + if not file: + repr_id = context["representation"]["_id"] + log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") repr_cont = context["representation"]["context"] if "#" not in file: @@ -229,7 +237,16 @@ class LoadSequence(api.Loader): assert node.Class() == "Read", "Must be Read" repr_cont = representation["context"] - file = self.fname.replace("\\", "/") + + file = self.fname + + if not file: + repr_id = representation["_id"] + log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") if "#" not in file: frame = repr_cont.get("frame") From 66466bc24bf3ba98f1a715dfbd0fe9352ba6a65c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 18:07:17 +0100 Subject: [PATCH 140/434] collect resources path uses anatomy publish.folder key with ability of backwards compatibility --- .../global/publish/collect_resources_path.py | 95 ++++++++----------- 1 file changed, 39 insertions(+), 56 deletions(-) diff --git a/pype/plugins/global/publish/collect_resources_path.py b/pype/plugins/global/publish/collect_resources_path.py index fe152584b6..9fc8c576f5 100644 --- a/pype/plugins/global/publish/collect_resources_path.py +++ b/pype/plugins/global/publish/collect_resources_path.py @@ -1,77 +1,60 @@ +""" +Requires: + context -> anatomy + context -> anatomyData + +Provides: + instance -> publishDir + instance -> resourcesDir +""" + import os import copy import pyblish.api -from avalon import io +from avalon import api -class IntegrateResourcesPath(pyblish.api.InstancePlugin): - """Generate the assumed destination path where the file will be stored""" +class CollectResourcesPath(pyblish.api.InstancePlugin): + """Generate directory path where the files and resources will be stored""" - label = "Integrate Prepare Resource" - order = pyblish.api.IntegratorOrder - 0.05 - families = ["clip", "projectfile", "plate"] + label = "Collect Resources Path" + order = pyblish.api.CollectorOrder + 0.995 def process(self, instance): + anatomy = instance.context.data["anatomy"] + template_data = copy.deepcopy(instance.data["anatomyData"]) - anatomy = instance.context.data["anatomy"] - frame_padding = int(anatomy.templates["render"]["padding"]) - - # add possible representation specific key to anatomy data - # TODO ability to set host specific "frame" value + # This is for cases of Deprecated anatomy without `folder` + # TODO remove when all clients have solved this issue template_data.update({ - "frame": ("#" * frame_padding), + "frame": "FRAME_TEMP", "representation": "TEMP" }) anatomy_filled = anatomy.format(template_data) - template_names = ["publish"] - for repre in instance.data["representations"]: - template_name = repre.get("anatomy_template") - if template_name and template_name not in template_names: - template_names.append(template_name) + if "folder" in anatomy.templates["publish"]: + publish_folder = anatomy_filled["publish"]["folder"] + else: + # solve deprecated situation when `folder` key is not underneath + # `publish` anatomy + project_name = api.Session["AVALON_PROJECT"] + self.log.warning(( + "Deprecation warning: Anatomy does not have set `folder`" + " key underneath `publish` (in global of for project `{}`)." + ).format(project_name)) - resources = instance.data.get("resources", list()) - transfers = instance.data.get("transfers", list()) + file_path = anatomy_filled["publish"]["path"] + # Directory + publish_folder = os.path.dirname(file_path) - for template_name in template_names: - mock_template = anatomy_filled[template_name]["path"] + publish_folder = os.path.normpath(publish_folder) + resources_folder = os.path.join(publish_folder, "resources") - # For now assume resources end up in a "resources" folder in the - # published folder - mock_destination = os.path.join( - os.path.dirname(mock_template), "resources" - ) + instance.data["publishDir"] = publish_folder + instance.data["resourcesDir"] = resources_folder - # Clean the path - mock_destination = os.path.abspath( - os.path.normpath(mock_destination) - ).replace("\\", "/") - - # Define resource destination and transfers - for resource in resources: - # Add destination to the resource - source_filename = os.path.basename( - resource["source"]).replace("\\", "/") - destination = os.path.join(mock_destination, source_filename) - - # Force forward slashes to fix issue with software unable - # to work correctly with backslashes in specific scenarios - # (e.g. escape characters in PLN-151 V-Ray UDIM) - destination = destination.replace("\\", "/") - - resource['destination'] = destination - - # Collect transfers for the individual files of the resource - # e.g. all individual files of a cache or UDIM textures. - files = resource['files'] - for fsrc in files: - fname = os.path.basename(fsrc) - fdest = os.path.join( - mock_destination, fname).replace("\\", "/") - transfers.append([fsrc, fdest]) - - instance.data["resources"] = resources - instance.data["transfers"] = transfers + self.log.debug("publishDir: \"{}\"".format(publish_folder)) + self.log.debug("resourcesDir: \"{}\"".format(resources_folder)) From 6f26d0160ce62817843d935b10ba2a937e715a38 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 18:11:32 +0100 Subject: [PATCH 141/434] integrated assued destination was moved back with name integrate resources path --- .../publish/integrate_resources_path.py | 49 +++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 pype/plugins/global/publish/integrate_resources_path.py diff --git a/pype/plugins/global/publish/integrate_resources_path.py b/pype/plugins/global/publish/integrate_resources_path.py new file mode 100644 index 0000000000..56dc0e5ef7 --- /dev/null +++ b/pype/plugins/global/publish/integrate_resources_path.py @@ -0,0 +1,49 @@ +import os +import pyblish.api + + +class IntegrateResourcesPath(pyblish.api.InstancePlugin): + """Generate directory path where the files and resources will be stored""" + + label = "Integrate Resources Path" + order = pyblish.api.IntegratorOrder - 0.05 + families = ["clip", "projectfile", "plate"] + + def process(self, instance): + resources = instance.data.get("resources") or [] + transfers = instance.data.get("transfers") or [] + + if not resources and not transfers: + self.log.debug( + "Instance does not have `resources` and `transfers`" + ) + return + + resources_folder = instance.data["resourcesDir"] + + # Define resource destination and transfers + for resource in resources: + # Add destination to the resource + source_filename = os.path.basename( + resource["source"]).replace("\\", "/") + destination = os.path.join(resources_folder, source_filename) + + # Force forward slashes to fix issue with software unable + # to work correctly with backslashes in specific scenarios + # (e.g. escape characters in PLN-151 V-Ray UDIM) + destination = destination.replace("\\", "/") + + resource['destination'] = destination + + # Collect transfers for the individual files of the resource + # e.g. all individual files of a cache or UDIM textures. + files = resource['files'] + for fsrc in files: + fname = os.path.basename(fsrc) + fdest = os.path.join( + resources_folder, fname + ).replace("\\", "/") + transfers.append([fsrc, fdest]) + + instance.data["resources"] = resources + instance.data["transfers"] = transfers From fcffa08177efd97ffc08bbf3520eb2be6a8d02f6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 18:27:03 +0100 Subject: [PATCH 142/434] extract look uses `resourcesDir` instead of computing itself --- pype/plugins/maya/publish/extract_look.py | 63 +---------------------- 1 file changed, 2 insertions(+), 61 deletions(-) diff --git a/pype/plugins/maya/publish/extract_look.py b/pype/plugins/maya/publish/extract_look.py index 4000011520..58196433aa 100644 --- a/pype/plugins/maya/publish/extract_look.py +++ b/pype/plugins/maya/publish/extract_look.py @@ -331,10 +331,9 @@ class ExtractLook(pype.api.Extractor): maya_path)) def resource_destination(self, instance, filepath, do_maketx): - anatomy = instance.context.data["anatomy"] - destination_dir = self.create_destination_template(instance, anatomy) + resources_dir = instance.data["resourcesDir"] # Compute destination location basename, ext = os.path.splitext(os.path.basename(filepath)) @@ -344,7 +343,7 @@ class ExtractLook(pype.api.Extractor): ext = ".tx" return os.path.join( - destination_dir, "resources", basename + ext + resources_dir, basename + ext ) def _process_texture(self, filepath, do_maketx, staging, linearise, force): @@ -408,61 +407,3 @@ class ExtractLook(pype.api.Extractor): return converted, COPY, texture_hash return filepath, COPY, texture_hash - - def create_destination_template(self, instance, anatomy): - """Create a filepath based on the current data available - - Example template: - {root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/ - {subset}.{representation} - Args: - instance: the instance to publish - - Returns: - file path (str) - """ - - asset_entity = instance.context["assetEntity"] - - template_data = copy.deepcopy(instance.data["anatomyData"]) - - subset_name = instance.data["subset"] - self.log.info(subset_name) - - subset = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset_entity["_id"] - }) - - # assume there is no version yet, we start at `1` - version = None - version_number = 1 - if subset is not None: - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) - - # if there is a subset there ought to be version - if version is not None: - version_number += version["name"] - - if instance.data.get('version'): - version_number = int(instance.data.get('version')) - - anatomy = instance.context.data["anatomy"] - padding = int(anatomy.templates['render']['padding']) - - template_data.update({ - "subset": subset_name, - "frame": ("#" * padding), - "version": version_number, - "representation": "TEMP" - }) - anatomy_filled = anatomy.format(template_data) - - return os.path.dirname(anatomy_filled["publish"]["path"]) From e92537d34a9c63b7cf09f1b4a46f11c30d76e90d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 18:27:34 +0100 Subject: [PATCH 143/434] extract effects can compute resources dir with anatomyData (need changes) --- .../nukestudio/publish/extract_effects.py | 182 +++++++++--------- 1 file changed, 96 insertions(+), 86 deletions(-) diff --git a/pype/plugins/nukestudio/publish/extract_effects.py b/pype/plugins/nukestudio/publish/extract_effects.py index 15d2a80a55..9e43bee1c8 100644 --- a/pype/plugins/nukestudio/publish/extract_effects.py +++ b/pype/plugins/nukestudio/publish/extract_effects.py @@ -2,10 +2,12 @@ import os import json import re +import copy import pyblish.api import tempfile from avalon import io, api + class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): """Collect video tracks effects into context.""" @@ -71,9 +73,11 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): ) data["source"] = data["sourcePath"] + # WARNING instance should not be created in Extractor! # create new instance instance = instance.context.create_instance(**data) - + # TODO replace line below with `instance.data["resourcesDir"]` + # when instance is created during collection part dst_dir = self.resource_destination_dir(instance) # change paths in effects to files @@ -141,103 +145,109 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): return (v, dst) def resource_destination_dir(self, instance): - anatomy = instance.context.data['anatomy'] - self.create_destination_template(instance, anatomy) + # WARNING this is from `collect_instance_anatomy_data.py` + anatomy_data = copy.deepcopy(instance.context.data["anatomyData"]) + project_entity = instance.context.data["projectEntity"] + context_asset_entity = instance.context.data["assetEntity"] - return os.path.join( - instance.data["assumedDestination"], - "resources" - ) - - def create_destination_template(self, instance, anatomy): - """Create a filepath based on the current data available - - Example template: - {root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/ - {subset}.{representation} - Args: - instance: the instance to publish - - Returns: - file path (str) - """ - - # get all the stuff from the database - subset_name = instance.data["subset"] - self.log.info(subset_name) asset_name = instance.data["asset"] - project_name = api.Session["AVALON_PROJECT"] - a_template = anatomy.templates + if context_asset_entity["name"] == asset_name: + asset_entity = context_asset_entity - project = io.find_one( - { - "type": "project", - "name": project_name - }, - projection={"config": True, "data": True} - ) + else: + asset_entity = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project_entity["_id"] + }) - template = a_template['publish']['path'] - # anatomy = instance.context.data['anatomy'] + subset_name = instance.data["subset"] + version_number = instance.data.get("version") + latest_version = None - asset = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project["_id"] + if asset_entity: + subset_entity = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset_entity["_id"] + }) + + if subset_entity is None: + self.log.debug("Subset entity does not exist yet.") + else: + version_entity = io.find_one( + { + "type": "version", + "parent": subset_entity["_id"] + }, + sort=[("name", -1)] + ) + if version_entity: + latest_version = version_entity["name"] + + if version_number is None: + version_number = 1 + if latest_version is not None: + version_number += int(latest_version) + + anatomy_data.update({ + "asset": asset_name, + "family": instance.data["family"], + "subset": subset_name, + "version": version_number }) - assert asset, ("No asset found by the name '{}' " - "in project '{}'".format(asset_name, project_name)) - silo = asset.get('silo') + resolution_width = instance.data.get("resolutionWidth") + if resolution_width: + anatomy_data["resolution_width"] = resolution_width - subset = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset["_id"] + resolution_height = instance.data.get("resolutionHeight") + if resolution_height: + anatomy_data["resolution_height"] = resolution_height + + fps = instance.data.get("fps") + if resolution_height: + anatomy_data["fps"] = fps + + instance.data["projectEntity"] = project_entity + instance.data["assetEntity"] = asset_entity + instance.data["anatomyData"] = anatomy_data + instance.data["latestVersion"] = latest_version + instance.data["version"] = version_number + + # WARNING this is from `collect_resources_path.py` + anatomy = instance.context.data["anatomy"] + + template_data = copy.deepcopy(instance.data["anatomyData"]) + + # This is for cases of Deprecated anatomy without `folder` + # TODO remove when all clients have solved this issue + template_data.update({ + "frame": "FRAME_TEMP", + "representation": "TEMP" }) - # assume there is no version yet, we start at `1` - version = None - version_number = 1 - if subset is not None: - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) + anatomy_filled = anatomy.format(template_data) - # if there is a subset there ought to be version - if version is not None: - version_number += version["name"] + if "folder" in anatomy.templates["publish"]: + publish_folder = anatomy_filled["publish"]["folder"] + else: + # solve deprecated situation when `folder` key is not underneath + # `publish` anatomy + project_name = api.Session["AVALON_PROJECT"] + self.log.warning(( + "Deprecation warning: Anatomy does not have set `folder`" + " key underneath `publish` (in global of for project `{}`)." + ).format(project_name)) - if instance.data.get('version'): - version_number = int(instance.data.get('version')) + file_path = anatomy_filled["publish"]["path"] + # Directory + publish_folder = os.path.dirname(file_path) - padding = int(a_template['render']['padding']) + publish_folder = os.path.normpath(publish_folder) + resources_folder = os.path.join(publish_folder, "resources") - hierarchy = asset['data']['parents'] - if hierarchy: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = "/".join(hierarchy) + instance.data["publishDir"] = publish_folder + instance.data["resourcesDir"] = resources_folder - template_data = {"root": api.Session["AVALON_PROJECTS"], - "project": {"name": project_name, - "code": project['data']['code']}, - "silo": silo, - "family": instance.data['family'], - "asset": asset_name, - "subset": subset_name, - "frame": ('#' * padding), - "version": version_number, - "hierarchy": hierarchy, - "representation": "TEMP"} - - instance.data["assumedTemplateData"] = template_data - self.log.info(template_data) - instance.data["template"] = template - # We take the parent folder of representation 'filepath' - instance.data["assumedDestination"] = os.path.dirname( - anatomy.format(template_data)["publish"]["path"] - ) + return resources_folder From f6e6220869a53f7411b55e03468761a9f5f7c323 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 4 Feb 2020 18:54:58 +0100 Subject: [PATCH 144/434] extract yeti rig uses resourcesDir --- pype/plugins/maya/publish/extract_yeti_rig.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/pype/plugins/maya/publish/extract_yeti_rig.py b/pype/plugins/maya/publish/extract_yeti_rig.py index d390a1365a..70a509564f 100644 --- a/pype/plugins/maya/publish/extract_yeti_rig.py +++ b/pype/plugins/maya/publish/extract_yeti_rig.py @@ -1,7 +1,6 @@ import os import json import contextlib -import copy from maya import cmds @@ -111,16 +110,7 @@ class ExtractYetiRig(pype.api.Extractor): self.log.info("Writing metadata file") - # Create assumed destination folder for imageSearchPath - template_data = copy.deepcopy(instance.data["anatomyData"]) - - anatomy = instance.context["anatomy"] - filled = anatomy.format(template_data) - - destination_folder = os.path.dir(filled["publish"]["path"]) - - image_search_path = os.path.join(destination_folder, "resources") - image_search_path = os.path.normpath(image_search_path) + image_search_path = resources_dir = instance.data["resourcesDir"] settings = instance.data.get("rigsettings", None) if settings: From f6ae5b2213b6ee21f2e27f8a2a347a669259cc12 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 5 Feb 2020 09:59:25 +0100 Subject: [PATCH 145/434] task name is also checked --- .../global/publish/collect_instance_anatomy_data.py | 12 +++++++++--- pype/plugins/global/publish/integrate_new.py | 4 ++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py index 8a98b6cbb2..9c6a8b08f2 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -87,13 +87,19 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): if latest_version is not None: version_number += int(latest_version) - # Version should not be collected since may be instance - anatomy_data.update({ + anatomy_updates = { "asset": asset_name, "family": instance.data["family"], "subset": subset_name, "version": version_number - }) + } + + task_name = instance.data.get("task") + if task_name: + anatomy_updates["task"] = task_name + + # Version should not be collected since may be instance + anatomy_data.update(anatomy_updates) resolution_width = instance.data.get("resolutionWidth") if resolution_width: diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 570a093cdc..d27582bb71 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -132,6 +132,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): hierarchy = "/".join(parents) anatomy_data["hierarchy"] = hierarchy + task_name = instance.data.get("task") + if task_name: + anatomy_data["task"] = task_name + stagingdir = instance.data.get("stagingDir") if not stagingdir: self.log.info(( From 38b563495506e028f92b6078e1988235bca30c7c Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 5 Feb 2020 14:07:29 +0100 Subject: [PATCH 146/434] update nukestudio init to new install way --- pype/nukestudio/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/nukestudio/__init__.py b/pype/nukestudio/__init__.py index 097f077e15..75825d188a 100644 --- a/pype/nukestudio/__init__.py +++ b/pype/nukestudio/__init__.py @@ -51,7 +51,7 @@ if os.getenv("PYBLISH_GUI", None): pyblish.register_gui(os.getenv("PYBLISH_GUI", None)) -def install(config): +def install(): """ Installing Nukestudio integration for avalon From 9416f23466a2417059cb9d2e89ed44765d372b8a Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Wed, 5 Feb 2020 17:52:05 +0100 Subject: [PATCH 147/434] collecting AOVs, remapping for representations, family corrections --- .../global/publish/collect_filesequences.py | 325 ++++++++++-------- .../global/publish/submit_publish_job.py | 277 ++++++++------- pype/plugins/maya/create/create_render.py | 2 +- pype/plugins/maya/publish/collect_render.py | 96 ++++-- .../maya/publish/collect_renderable_camera.py | 4 +- .../maya/publish/submit_maya_deadline.py | 6 +- 6 files changed, 393 insertions(+), 317 deletions(-) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 305604ae00..83fe9e8a4b 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -16,6 +16,8 @@ import json import pyblish.api from avalon import api +from pypeapp import PypeLauncher + def collect(root, regex=None, @@ -72,9 +74,9 @@ def collect(root, class CollectRenderedFrames(pyblish.api.ContextPlugin): """Gather file sequences from working directory - When "PYPE_PUBLISH_PATHS" environment variable is set these paths - (folders or .json files) are parsed for image sequences. - Otherwise the current working directory is searched for file sequences. + When "FILESEQUENCE" environment variable is set these paths (folders or + .json files) are parsed for image sequences. Otherwise the current + working directory is searched for file sequences. The json configuration may have the optional keys: asset (str): The asset to publish to. If not provided fall back to @@ -101,6 +103,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): lut_path = None slate_frame = None families_data = None + baked_mov_path = None subset = None version = None frame_start = 0 @@ -159,8 +162,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): if instance: instance_family = instance.get("family") pixel_aspect = instance.get("pixelAspect", 1) - resolution_width = instance.get("resolutionWidth", 1920) - resolution_height = instance.get("resolutionHeight", 1080) + resolution_width = instance.get( + "resolutionWidth", 1920) + resolution_height = instance.get( + "resolutionHeight", 1080) lut_path = instance.get("lutPath", None) baked_mov_path = instance.get("bakeRenderPath") families_data = instance.get("families") @@ -183,19 +188,24 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): if "slate" in families_data: frame_start -= 1 - collections, remainder = collect( - root=root, - regex=regex, - exclude_regex=data.get("exclude_regex"), - frame_start=frame_start, - frame_end=frame_end, - ) + if regex: + collections, remainder = collect( + root=root, + regex=regex, + exclude_regex=data.get("exclude_regex"), + frame_start=frame_start, + frame_end=frame_end, + ) - self.log.info("Found collections: {}".format(collections)) - self.log.info("Found remainder: {}".format(remainder)) + self.log.info("Found collections: {}".format(collections)) + self.log.info("Found remainder: {}".format(remainder)) fps = data.get("fps", 25) + # adding publish comment and intent to context + context.data["comment"] = data.get("comment", "") + context.data["intent"] = data.get("intent", "") + if data.get("user"): context.data["user"] = data["user"] @@ -221,9 +231,9 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): self.log.info( "Attaching render {}:v{}".format( attach["subset"], attach["version"])) - instance = context.create_instance( + new_instance = context.create_instance( attach["subset"]) - instance.data.update( + new_instance.data.update( { "name": attach["subset"], "version": attach["version"], @@ -241,29 +251,34 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): "resolutionHeight": resolution_height }) - if "representations" not in instance.data: - instance.data["representations"] = [] + if regex: + if "representations" not in new_instance.data: + new_instance.data["representations"] = [] - for collection in collections: - self.log.info( - " - adding representation: {}".format( - str(collection)) - ) - ext = collection.tail.lstrip(".") + for collection in collections: + self.log.info( + " - adding representation: {}".format( + str(collection)) + ) + ext = collection.tail.lstrip(".") - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"], - "frameStart": frame_start, - "frameEnd": frame_end - } - instance.data["representations"].append( - representation) + representation = { + "name": ext, + "ext": "{}".format(ext), + "files": list(collection), + "stagingDir": root, + "anatomy_template": "render", + "fps": fps, + "tags": ["review"], + } + new_instance.data["representations"].append( + representation) + else: + try: + representations = data["metadata"]["instance"]["representations"] # noqa: E501 + except KeyError as e: + assert False, e + new_instance.data["representations"] = representations elif subset: # if we have subset - add all collections and known @@ -285,10 +300,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): "Adding representations to subset {}".format( subset)) - instance = context.create_instance(subset) + new_instance = context.create_instance(subset) data = copy.deepcopy(data) - instance.data.update( + new_instance.data.update( { "name": subset, "family": families[0], @@ -309,138 +324,158 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): } ) - if "representations" not in instance.data: - instance.data["representations"] = [] + if "representations" not in new_instance.data: + new_instance.data["representations"] = [] - for collection in collections: - self.log.info(" - {}".format(str(collection))) + if regex: + for collection in collections: + self.log.info(" - {}".format(str(collection))) - ext = collection.tail.lstrip(".") + ext = collection.tail.lstrip(".") - if "slate" in instance.data["families"]: - frame_start += 1 - - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "frameStart": frame_start, - "frameEnd": frame_end, - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"] if not baked_mov_path else [], - } - instance.data["representations"].append( - representation) - - # filter out only relevant mov in case baked available - self.log.debug("__ remainder {}".format(remainder)) - if baked_mov_path: - remainder = [r for r in remainder - if r in baked_mov_path] - self.log.debug("__ remainder {}".format(remainder)) - - # process reminders - for rem in remainder: - # add only known types to representation - if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']: - self.log.info(" . {}".format(rem)) - - if "slate" in instance.data["families"]: + if "slate" in new_instance.data["families"]: frame_start += 1 - tags = ["review"] - - if baked_mov_path: - tags.append("delete") - representation = { - "name": rem.split(".")[-1], - "ext": "{}".format(rem.split(".")[-1]), - "files": rem, - "stagingDir": root, + "name": ext, + "ext": "{}".format(ext), + "files": list(collection), "frameStart": frame_start, + "frameEnd": frame_end, + "stagingDir": root, "anatomy_template": "render", "fps": fps, - "tags": tags + "tags": ["review"] if not baked_mov_path else [], } - instance.data["representations"].append( - representation) + new_instance.data["representations"].append( + representation) + + # filter out only relevant mov in case baked available + self.log.debug("__ remainder {}".format(remainder)) + if baked_mov_path: + remainder = [r for r in remainder + if r in baked_mov_path] + self.log.debug("__ remainder {}".format(remainder)) + + # process reminders + for rem in remainder: + # add only known types to representation + if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']: + self.log.info(" . {}".format(rem)) + + if "slate" in instance.data["families"]: + frame_start += 1 + + tags = ["review"] + + if baked_mov_path: + tags.append("delete") + + representation = { + "name": rem.split(".")[-1], + "ext": "{}".format(rem.split(".")[-1]), + "files": rem, + "stagingDir": root, + "frameStart": frame_start, + "anatomy_template": "render", + "fps": fps, + "tags": tags + } + new_instance.data["representations"].append( + representation) + else: + try: + representations = data["metadata"]["instance"]["representations"] # noqa: E501 + except KeyError as e: + assert False, e + + new_instance.data["representations"] = representations else: # we have no subset so we take every collection and create one # from it - for collection in collections: - instance = context.create_instance(str(collection)) - self.log.info("Creating subset from: %s" % str(collection)) + if regex: + for collection in collections: + new_instance = context.create_instance(str(collection)) + self.log.info( + "Creating subset from: %s" % str(collection)) - # Ensure each instance gets a unique reference to the data - data = copy.deepcopy(data) + # Ensure each instance gets a unique + # reference to the data + data = copy.deepcopy(data) - # If no subset provided, get it from collection's head - subset = data.get("subset", collection.head.rstrip("_. ")) + # If no subset provided, get it from collection's head + subset = data.get( + "subset", collection.head.rstrip("_. ")) - # If no start or end frame provided, get it from collection - indices = list(collection.indexes) - start = data.get("frameStart", indices[0]) - end = data.get("frameEnd", indices[-1]) + # If no start or end frame provided, + # get it from collection + indices = list(collection.indexes) + start = data.get("frameStart", indices[0]) + end = data.get("frameEnd", indices[-1]) - ext = list(collection)[0].split(".")[-1] + ext = list(collection)[0].split(".")[-1] - if "review" not in families: - families.append("review") + if "review" not in families: + families.append("review") - instance.data.update( - { - "name": str(collection), - "family": families[0], # backwards compatibility - "families": list(families), - "subset": subset, - "asset": data.get( - "asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, + new_instance.data.update( + { + "name": str(collection), + "family": families[0], + "families": list(families), + "subset": subset, + "asset": data.get( + "asset", api.Session["AVALON_ASSET"]), + "stagingDir": root, + "frameStart": start, + "frameEnd": end, + "fps": fps, + "source": data.get("source", ""), + "pixelAspect": pixel_aspect, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "version": version + } + ) + if lut_path: + new_instance.data.update({"lutPath": lut_path}) + + new_instance.append(collection) + new_instance.context.data["fps"] = fps + + if "representations" not in new_instance.data: + new_instance.data["representations"] = [] + + representation = { + "name": ext, + "ext": "{}".format(ext), + "files": list(collection), "frameStart": start, "frameEnd": end, + "stagingDir": root, + "anatomy_template": "render", "fps": fps, - "source": data.get("source", ""), - "pixelAspect": pixel_aspect, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "version": version + "tags": ["review"], } - ) - if lut_path: - instance.data.update({"lutPath": lut_path}) + new_instance.data["representations"].append( + representation) - instance.append(collection) - instance.context.data["fps"] = fps - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"], - } - instance.data["representations"].append(representation) - - # temporary ... allow only beauty on ftrack - if session['AVALON_APP'] == "maya": - AOV_filter = ['beauty'] - for aov in AOV_filter: - if aov not in instance.data['subset']: - instance.data['families'].remove('review') - instance.data['families'].remove('ftrack') - representation["tags"].remove('review') + # temporary ... allow only beauty on ftrack + if session['AVALON_APP'] == "maya": + AOV_filter = ['beauty'] + for aov in AOV_filter: + if aov not in new_instance.data['subset']: + new_instance.data['families'].remove( + 'review') + new_instance.data['families'].remove( + 'ftrack') + representation["tags"].remove('review') + self.log.info("remapping paths ...") + new_instance.data["representations"] = [PypeLauncher.path_remapper(r) for r in new_instance.data["representations"]] # noqa: E501 self.log.debug( "__ representations {}".format( - instance.data["representations"])) + new_instance.data["representations"])) self.log.debug( - "__ instance.data {}".format(instance.data)) + "__ instance.data {}".format(new_instance.data)) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 48efbcde7a..b5a780464a 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -2,6 +2,7 @@ import os import json import re import logging +from collections import namedtuple from avalon import api, io from avalon.vendor import requests, clique @@ -9,21 +10,23 @@ from avalon.vendor import requests, clique import pyblish.api +AOVFilter = namedtuple("AOVFilter", ["app", "aov"]) + + def _get_script(): """Get path to the image sequence script""" try: from pype.scripts import publish_filesequence except Exception: - raise RuntimeError("Expected module 'publish_deadline'" - "to be available") + assert False, "Expected module 'publish_deadline'to be available" module_path = publish_filesequence.__file__ if module_path.endswith(".pyc"): - module_path = module_path[:-len(".pyc")] + ".py" + module_path = module_path[: -len(".pyc")] + ".py" module_path = os.path.normpath(module_path) - mount_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_MOUNT']) - network_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_PATH']) + mount_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_MOUNT"]) + network_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_PATH"]) module_path = module_path.replace(mount_root, network_root) @@ -34,39 +37,29 @@ def _get_script(): def get_latest_version(asset_name, subset_name, family): # Get asset asset_name = io.find_one( - { - "type": "asset", - "name": asset_name - }, - projection={"name": True} + {"type": "asset", "name": asset_name}, projection={"name": True} ) subset = io.find_one( - { - "type": "subset", - "name": subset_name, - "parent": asset_name["_id"] - }, - projection={"_id": True, "name": True} + {"type": "subset", "name": subset_name, "parent": asset_name["_id"]}, + projection={"_id": True, "name": True}, ) # Check if subsets actually exists (pre-run check) assert subset, "No subsets found, please publish with `extendFrames` off" # Get version - version_projection = {"name": True, - "data.startFrame": True, - "data.endFrame": True, - "parent": True} + version_projection = { + "name": True, + "data.startFrame": True, + "data.endFrame": True, + "parent": True, + } version = io.find_one( - { - "type": "version", - "parent": subset["_id"], - "data.families": family - }, + {"type": "version", "parent": subset["_id"], "data.families": family}, projection=version_projection, - sort=[("name", -1)] + sort=[("name", -1)], ) assert version, "No version found, this is a bug" @@ -87,8 +80,12 @@ def get_resources(version, extension=None): directory = api.get_representation_path(representation) print("Source: ", directory) - resources = sorted([os.path.normpath(os.path.join(directory, fname)) - for fname in os.listdir(directory)]) + resources = sorted( + [ + os.path.normpath(os.path.join(directory, fname)) + for fname in os.listdir(directory) + ] + ) return resources @@ -149,23 +146,22 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): hosts = ["fusion", "maya", "nuke"] - families = [ - "render.farm", - "renderlayer", - "imagesequence" - ] + families = ["render.farm", "renderlayer", "imagesequence"] + + # this will add review and ftrack tag only to `beauty` in `maya` app + aov_filter = [AOVFilter("maya", ["beauty"])] enviro_filter = [ - "PATH", - "PYTHONPATH", - "FTRACK_API_USER", - "FTRACK_API_KEY", - "FTRACK_SERVER", - "PYPE_ROOT", - "PYPE_METADATA_FILE", - "PYPE_STUDIO_PROJECTS_PATH", - "PYPE_STUDIO_PROJECTS_MOUNT" - ] + "PATH", + "PYTHONPATH", + "FTRACK_API_USER", + "FTRACK_API_KEY", + "FTRACK_SERVER", + "PYPE_ROOT", + "PYPE_METADATA_FILE", + "PYPE_STUDIO_PROJECTS_PATH", + "PYPE_STUDIO_PROJECTS_MOUNT", + ] def _submit_deadline_post_job(self, instance, job): """ @@ -176,8 +172,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): data = instance.data.copy() subset = data["subset"] job_name = "{batch} - {subset} [publish image sequence]".format( - batch=job["Props"]["Name"], - subset=subset + batch=job["Props"]["Name"], subset=subset ) metadata_filename = "{}_metadata.json".format(subset) @@ -185,9 +180,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): metadata_path = os.path.join(output_dir, metadata_filename) metadata_path = os.path.normpath(metadata_path) - mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT']) + mount_root = os.path.normpath(os.environ["PYPE_STUDIO_PROJECTS_MOUNT"]) network_root = os.path.normpath( - os.environ['PYPE_STUDIO_PROJECTS_PATH']) + os.environ["PYPE_STUDIO_PROJECTS_PATH"] + ) metadata_path = metadata_path.replace(mount_root, network_root) @@ -197,21 +193,19 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "Plugin": "Python", "BatchName": job["Props"]["Batch"], "Name": job_name, - "JobType": "Normal", "JobDependency0": job["_id"], "UserName": job["Props"]["User"], "Comment": instance.context.data.get("comment", ""), - "Priority": job["Props"]["Pri"] + "Priority": job["Props"]["Pri"], }, "PluginInfo": { "Version": "3.6", "ScriptFile": _get_script(), "Arguments": "", - "SingleFrameOnly": "True" + "SingleFrameOnly": "True", }, - # Mandatory for Deadline, may be empty - "AuxFiles": [] + "AuxFiles": [], } # Transfer the environment from the original job to this dependent @@ -225,12 +219,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.log.info("FILTER: {}".format(self.enviro_filter)) if key.upper() in self.enviro_filter: - payload["JobInfo"].update({ - "EnvironmentKeyValue%d" % i: "{key}={value}".format( - key=key, - value=environment[key] - ) - }) + payload["JobInfo"].update( + { + "EnvironmentKeyValue%d" + % i: "{key}={value}".format( + key=key, value=environment[key] + ) + } + ) i += 1 # Avoid copied pools and remove secondary pool @@ -295,33 +291,32 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): fps tags """ - - # Get a submission job data = instance.data.copy() + context = instance.context + render_job = data.pop("deadlineSubmissionJob", None) submission_type = "deadline" - if not render_job: # No deadline job. Try Muster: musterSubmissionJob render_job = data.pop("musterSubmissionJob", None) submission_type = "muster" - if not render_job: - raise RuntimeError("Can't continue without valid Deadline " - "or Muster submission prior to this " - "plug-in.") + assert render_job, ( + "Can't continue without valid Deadline " + "or Muster submission prior to this " + "plug-in." + ) if submission_type == "deadline": - self.DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL", - "http://localhost:8082") + self.DEADLINE_REST_URL = os.environ.get( + "DEADLINE_REST_URL", "http://localhost:8082" + ) assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL" self._submit_deadline_post_job(instance, render_job) asset = data.get("asset") or api.Session["AVALON_ASSET"] - subset = data["subset"] + subset = data.get("subset") - # Get start/end frame from instance, if not available get from context - context = instance.context start = instance.data.get("frameStart") if start is None: start = context.data["frameStart"] @@ -329,45 +324,76 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if end is None: end = context.data["frameEnd"] - # Add in regex for sequence filename - # This assumes the output files start with subset name and ends with - # a file extension. The "ext" key includes the dot with the extension. - if "ext" in instance.data: - ext = r"\." + re.escape(instance.data["ext"]) - else: - ext = r"\.\D+" - - regex = r"^{subset}.*\d+{ext}$".format(subset=re.escape(subset), - ext=ext) - try: - source = data['source'] + source = data["source"] except KeyError: source = context.data["currentFile"] - source = source.replace(os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), - api.registered_root()) - + source = source.replace( + os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), api.registered_root() + ) relative_path = os.path.relpath(source, api.registered_root()) source = os.path.join("{root}", relative_path).replace("\\", "/") + regex = None - # find subsets and version to attach render to - attach_to = instance.data.get("attachTo") - attach_subset_versions = [] - if attach_to: - for subset in attach_to: - for instance in context: - if instance.data["subset"] != subset["subset"]: - continue - attach_subset_versions.append( - {"version": instance.data["version"], - "subset": subset["subset"], - "family": subset["family"]}) + if data.get("expectedFiles"): + representations = [] + cols, rem = clique.assemble(data.get("expectedFiles")) + for c in cols: + ext = c.tail.lstrip(".") + review = True + for filter in self.aov_filter: + if os.environ.get("AVALON_APP", "") == filter.app: + for aov in filter.aov: + if re.match( + r"(\.|_)({})(\.|_)".format(aov), list(c)[0] + ): + review = False + rep = { + "name": ext, + "ext": ext, + "files": [os.path.basename(f) for f in list(c)], + "frameStart": int(start), + "frameEnd": int(end), + # If expectedFile are absolute, we need only filenames + "stagingDir": os.path.dirname(list(c)[0]), + "anatomy_template": "render", + "fps": context.data.get("fps", None), + "tags": ["review"] if review else [], + } + representations.append(rep) + + for r in rem: + ext = r.split(".")[-1] + rep = { + "name": ext, + "ext": ext, + "files": os.path.basename(r), + "stagingDir": os.path.dirname(r), + "anatomy_template": "publish", + } + + representations.append(rep) + + if "representations" not in instance.data: + data["representations"] = [] + + # add representation + data["representations"] += representations + + else: + if "ext" in instance.data: + ext = r"\." + re.escape(instance.data["ext"]) + else: + ext = r"\.\D+" + + regex = r"^{subset}.*\d+{ext}$".format( + subset=re.escape(subset), ext=ext + ) # Write metadata for publish job metadata = { "asset": asset, - "regex": regex, "frameStart": start, "frameEnd": end, "fps": context.data.get("fps", None), @@ -375,28 +401,30 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "source": source, "user": context.data["user"], "version": context.data["version"], - "attachTo": attach_subset_versions, "intent": context.data.get("intent"), "comment": context.data.get("comment"), # Optional metadata (for debugging) "metadata": { - "instance": data, "job": render_job, - "session": api.Session.copy() - } + "session": api.Session.copy(), + "instance": data, + }, } if api.Session["AVALON_APP"] == "nuke": - metadata['subset'] = subset + metadata["subset"] = subset if submission_type == "muster": ftrack = { "FTRACK_API_USER": os.environ.get("FTRACK_API_USER"), "FTRACK_API_KEY": os.environ.get("FTRACK_API_KEY"), - "FTRACK_SERVER": os.environ.get("FTRACK_SERVER") + "FTRACK_SERVER": os.environ.get("FTRACK_SERVER"), } metadata.update({"ftrack": ftrack}) + if regex: + metadata["regex"] = regex + # Ensure output dir exists output_dir = instance.data["outputDir"] if not os.path.isdir(output_dir): @@ -418,16 +446,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # Frame comparison prev_start = None prev_end = None - resource_range = range(int(start), int(end)+1) + resource_range = range(int(start), int(end) + 1) # Gather all the subset files (one subset per render pass!) subset_names = [data["subset"]] subset_names.extend(data.get("renderPasses", [])) resources = [] for subset_name in subset_names: - version = get_latest_version(asset_name=data["asset"], - subset_name=subset_name, - family=family) + version = get_latest_version( + asset_name=data["asset"], + subset_name=subset_name, + family=family, + ) # Set prev start / end frames for comparison if not prev_start and not prev_end: @@ -435,9 +465,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): prev_end = version["data"]["frameEnd"] subset_resources = get_resources(version, _ext) - resource_files = get_resource_files(subset_resources, - resource_range, - override) + resource_files = get_resource_files( + subset_resources, resource_range, override + ) resources.extend(resource_files) @@ -445,27 +475,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): updated_end = max(end, prev_end) # Update metadata and instance start / end frame - self.log.info("Updating start / end frame : " - "{} - {}".format(updated_start, updated_end)) - - # TODO : Improve logic to get new frame range for the - # publish job (publish_filesequence.py) - # The current approach is not following Pyblish logic - # which is based - # on Collect / Validate / Extract. - - # ---- Collect Plugins --- - # Collect Extend Frames - Only run if extendFrames is toggled - # # # Store in instance: - # # # Previous rendered files per subset based on frames - # # # --> Add to instance.data[resources] - # # # Update publish frame range - - # ---- Validate Plugins --- - # Validate Extend Frames - # # # Check if instance has the requirements to extend frames - # There might have been some things which can be added to the list - # Please do so when fixing this. + self.log.info( + "Updating start / end frame : " + "{} - {}".format(updated_start, updated_end) + ) # Start frame metadata["frameStart"] = updated_start diff --git a/pype/plugins/maya/create/create_render.py b/pype/plugins/maya/create/create_render.py index 668c6412eb..4c07e8c118 100644 --- a/pype/plugins/maya/create/create_render.py +++ b/pype/plugins/maya/create/create_render.py @@ -14,7 +14,7 @@ class CreateRender(avalon.maya.Creator): """Create render layer for export""" label = "Render" - family = "render" + family = "rendering" icon = "eye" defaults = ["Main"] diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 375b237583..0cab118b2e 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -1,7 +1,8 @@ import re import os import types -from abc import ABC, abstractmethod +# TODO: pending python 3 upgrade +from abc import ABCMeta, abstractmethod from maya import cmds import maya.app.renderSetup.model.renderSetup as renderSetup @@ -48,12 +49,11 @@ class CollectMayaRender(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder + 0.01 hosts = ["maya"] label = "Collect Render Layers" - families = ["render"] def process(self, context): render_instance = None for instance in context: - if 'render' in instance.data['families']: + if 'rendering' in instance.data['families']: render_instance = instance if not render_instance: @@ -65,6 +65,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): collected_render_layers = render_instance.data['setMembers'] filepath = context.data["currentFile"].replace("\\", "/") asset = api.Session["AVALON_ASSET"] + workspace = context.data["workspaceDir"] self._rs = renderSetup.instance() maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()} @@ -120,11 +121,19 @@ class CollectMayaRender(pyblish.api.ContextPlugin): # frame range exp_files = ExpectedFiles().get(renderer, layer_name) + # append full path + full_exp_files = [] + for ef in exp_files: + full_path = os.path.join(workspace, "render", ef) + full_path = full_path.replace("\\", "/") + full_exp_files.append(full_path) + + self.log.info("collecting layer: {}".format(layer_name)) # Get layer specific settings, might be overrides data = { "subset": expected_layer_name, "attachTo": attachTo, - "setMembers": expected_layer_name, + "setMembers": layer_name, "publish": True, "frameStart": self.get_render_attribute("startFrame", layer=layer_name), @@ -136,7 +145,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): layer=layer_name), # instance subset - "family": "Render Layers", + "family": "renderlayer", "families": ["renderlayer"], "asset": asset, "time": api.time(), @@ -145,7 +154,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): # Add source to allow tracing back to the scene from # which was submitted originally "source": filepath, - "expectedFiles": exp_files + "expectedFiles": full_exp_files } # Apply each user defined attribute as data @@ -201,9 +210,6 @@ class CollectMayaRender(pyblish.api.ContextPlugin): if pool_b: options["renderGlobals"].update({"SecondaryPool": pool_b}) - legacy = attributes["useLegacyRenderLayers"] - options["renderGlobals"]["UseLegacyRenderLayers"] = legacy - # Machine list machine_list = attributes["machineList"] if machine_list: @@ -267,22 +273,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin): return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr), layer=layer) - def _get_layer_overrides(self, attr, layer): - connections = cmds.listConnections(attr, plugs=True) - if connections: - for connection in connections: - if connection: - node_name = connection.split('.')[0] - if cmds.nodeType(node_name) == 'renderLayer': - attr_name = '%s.value' % '.'.join( - connection.split('.')[:-1]) - if node_name == layer: - yield cmds.getAttr(attr_name) - class ExpectedFiles: - def get(renderer, layer): + def get(self, renderer, layer): if renderer.lower() == 'arnold': return ExpectedFilesArnold(layer).get_files() elif renderer.lower() == 'vray': @@ -298,8 +292,8 @@ class ExpectedFiles: "unsupported {}".format(renderer)) -class AExpectedFiles(ABC): - +class AExpectedFiles: + __metaclass__ = ABCMeta renderer = None layer = None @@ -356,6 +350,9 @@ class AExpectedFiles(ABC): # every renderable camera in layer. expected_files = [] + layer_name = self.layer + if self.layer.startswith("rs_"): + layer_name = self.layer[3:] start_frame = int(self.get_render_attribute('startFrame')) end_frame = int(self.get_render_attribute('endFrame')) frame_step = int(self.get_render_attribute('byFrameStep')) @@ -368,7 +365,7 @@ class AExpectedFiles(ABC): mappings = ( (R_SUBSTITUTE_SCENE_TOKEN, scene_name), - (R_SUBSTITUTE_LAYER_TOKEN, self.layer), + (R_SUBSTITUTE_LAYER_TOKEN, layer_name), (R_SUBSTITUTE_CAMERA_TOKEN, cam), (R_SUBSTITUTE_AOV_TOKEN, aov[0]) ) @@ -377,7 +374,9 @@ class AExpectedFiles(ABC): file_prefix = re.sub(regex, value, file_prefix) for frame in range( - int(start_frame), int(end_frame), int(frame_step)): + int(start_frame), + int(end_frame) + 1, + int(frame_step)): expected_files.append( '{}.{}.{}'.format(file_prefix, str(frame).rjust(padding, "0"), @@ -386,7 +385,7 @@ class AExpectedFiles(ABC): else: mappings = ( (R_SUBSTITUTE_SCENE_TOKEN, scene_name), - (R_SUBSTITUTE_LAYER_TOKEN, self.layer), + (R_SUBSTITUTE_LAYER_TOKEN, layer_name), (R_SUBSTITUTE_CAMERA_TOKEN, cam) ) @@ -394,7 +393,9 @@ class AExpectedFiles(ABC): file_prefix = re.sub(regex, value, file_prefix) for frame in range( - int(start_frame), int(end_frame), int(frame_step)): + int(start_frame), + int(end_frame) + 1, + int(frame_step)): expected_files.append( '{}.{}.{}'.format(file_prefix, str(frame).rjust(padding, "0"), @@ -418,6 +419,7 @@ class AExpectedFiles(ABC): if renderable: renderable_cameras.append(cam) + return renderable_cameras def maya_is_true(self, attr_val): """ @@ -433,6 +435,22 @@ class AExpectedFiles(ABC): else: return bool(attr_val) + def get_layer_overrides(self, attr, layer): + connections = cmds.listConnections(attr, plugs=True) + if connections: + for connection in connections: + if connection: + node_name = connection.split('.')[0] + if cmds.nodeType(node_name) == 'renderLayer': + attr_name = '%s.value' % '.'.join( + connection.split('.')[:-1]) + if node_name == layer: + yield cmds.getAttr(attr_name) + + def get_render_attribute(self, attr): + return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr), + layer=self.layer) + class ExpectedFilesArnold(AExpectedFiles): @@ -449,10 +467,10 @@ class ExpectedFilesArnold(AExpectedFiles): } def __init__(self, layer): - super(self).__init__(layer) + super(ExpectedFilesArnold, self).__init__(layer) self.renderer = 'arnold' - def _get_aovs(self): + def get_aovs(self): enabled_aovs = [] if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode') and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')): @@ -490,16 +508,26 @@ class ExpectedFilesArnold(AExpectedFiles): aov_ext ) ) + if not enabled_aovs: + # if there are no AOVs, append 'beauty' as this is arnolds + # default. If token is specified and no AOVs are + # defined, this will be used. + enabled_aovs.append( + ( + 'beauty', + cmds.getAttr('defaultRenderGlobals.imfPluginKey') + ) + ) return enabled_aovs class ExpectedFilesVray(AExpectedFiles): def __init__(self, layer): - super(self).__init__(layer) + super(ExpectedFilesVray, self).__init__(layer) self.renderer = 'vray' - def _get_aovs(self): + def get_aovs(self): default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey') enabled_aovs = [] @@ -545,10 +573,10 @@ class ExpectedFilesVray(AExpectedFiles): class ExpectedFilesRedshift(AExpectedFiles): def __init__(self, layer): - super(self).__init__(layer) + super(ExpectedFilesRedshift, self).__init__(layer) self.renderer = 'redshift' - def _get_aovs(self): + def get_aovs(self): enabled_aovs = [] default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey') rs_aovs = [n for n in cmds.ls(type='RedshiftAOV')] diff --git a/pype/plugins/maya/publish/collect_renderable_camera.py b/pype/plugins/maya/publish/collect_renderable_camera.py index 707d52ef69..13b847cee4 100644 --- a/pype/plugins/maya/publish/collect_renderable_camera.py +++ b/pype/plugins/maya/publish/collect_renderable_camera.py @@ -16,8 +16,8 @@ class CollectRenderableCamera(pyblish.api.InstancePlugin): "renderlayer"] def process(self, instance): - layer = "rs_%s" % instance.data["setMembers"] - + layer = instance.data["setMembers"] + self.log.info("layer: {}".format(layer)) cameras = cmds.ls(type="camera", long=True) renderable = [c for c in cameras if lib.get_attr_in_layer("%s.renderable" % c, layer=layer)] diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index e3fa79b1c8..f28b9f5474 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -150,8 +150,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): dirname = os.path.join(workspace, "renders") renderlayer = instance.data['setMembers'] # rs_beauty renderlayer_name = instance.data['subset'] # beauty - renderlayer_globals = instance.data["renderGlobals"] - legacy_layers = renderlayer_globals["UseLegacyRenderLayers"] + # renderlayer_globals = instance.data["renderGlobals"] + # legacy_layers = renderlayer_globals["UseLegacyRenderLayers"] deadline_user = context.data.get("deadlineUser", getpass.getuser()) jobname = "%s - %s" % (filename, instance.name) @@ -212,7 +212,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "UsingRenderLayers": True, # Use legacy Render Layer system - "UseLegacyRenderLayers": legacy_layers, + # "UseLegacyRenderLayers": legacy_layers, # Render only this layer "RenderLayer": renderlayer, From 255947b2a57c63b2532bf2f12fa7a4a48104bc9f Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Wed, 5 Feb 2020 18:54:21 +0100 Subject: [PATCH 148/434] fixing maya renders dir, normalizing path before copying --- .../global/publish/collect_filesequences.py | 45 ++++++++++++++++--- pype/plugins/global/publish/integrate_new.py | 3 +- pype/plugins/maya/publish/collect_render.py | 2 +- 3 files changed, 41 insertions(+), 9 deletions(-) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 83fe9e8a4b..59b2623877 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -108,6 +108,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): version = None frame_start = 0 frame_end = 0 + new_instance = None + if os.environ.get("PYPE_PUBLISH_PATHS"): paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep) self.log.info("Collecting paths: {}".format(paths)) @@ -394,6 +396,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): else: # we have no subset so we take every collection and create one # from it + if regex: for collection in collections: new_instance = context.create_instance(str(collection)) @@ -471,11 +474,39 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): new_instance.data['families'].remove( 'ftrack') representation["tags"].remove('review') + else: + subset = data["metadata"]["instance"]["subset"] + self.log.info("Creating new subset: {}".format(subset)) + new_instance = context.create_instance(subset) + data = copy.deepcopy(data) + new_instance.data.update( + { + "name": subset, + "family": 'render', + "families": ['render'], + "subset": subset, + "asset": data.get( + "asset", api.Session["AVALON_ASSET"]), + "stagingDir": root, + "frameStart": frame_start, + "frameEnd": frame_end, + "fps": fps, + "source": data.get("source", ""), + "pixelAspect": pixel_aspect, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "slateFrame": slate_frame + } + ) + new_instance.data["representations"] = data["metadata"]["instance"]["representations"] - self.log.info("remapping paths ...") - new_instance.data["representations"] = [PypeLauncher.path_remapper(r) for r in new_instance.data["representations"]] # noqa: E501 - self.log.debug( - "__ representations {}".format( - new_instance.data["representations"])) - self.log.debug( - "__ instance.data {}".format(new_instance.data)) + if new_instance: + self.log.info("remapping paths ...") + new_instance.data["representations"] = [PypeLauncher.path_remapper(r) for r in new_instance.data["representations"]] # noqa: E501 + self.log.debug( + "__ representations {}".format( + new_instance.data["representations"])) + self.log.debug( + "__ instance.data {}".format(new_instance.data)) + else: + self.log.error("nothing collected") diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 7d95534897..dc5dc7be94 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -511,7 +511,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): """ src = self.unc_convert(src) dst = self.unc_convert(dst) - + src = os.path.normpath(src) + dst = os.path.normpath(dst) self.log.debug("Copying file .. {} -> {}".format(src, dst)) dirname = os.path.dirname(dst) try: diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 0cab118b2e..4096ef3c0e 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -124,7 +124,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): # append full path full_exp_files = [] for ef in exp_files: - full_path = os.path.join(workspace, "render", ef) + full_path = os.path.join(workspace, "renders", ef) full_path = full_path.replace("\\", "/") full_exp_files.append(full_path) From 75b603d845fe44d6ba5f39268137ca0f6128763e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 10:46:03 +0100 Subject: [PATCH 149/434] removed add datetime and add frame numbers --- pype/scripts/otio_burnin.py | 38 ------------------------------------- 1 file changed, 38 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index f128352974..aca848dcfa 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -132,44 +132,6 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): options = ffmpeg_burnins.TextOptions(**self.options_init) self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT) - def add_datetime(self, date_format, align, options=None): - """ - Adding date text to a filter. Using pythons datetime module. - - :param str date_format: format of date (e.g. `%d.%m.%Y`) - :param enum align: alignment, must use provided enum flags - :param dict options: recommended to use TextOptions - """ - if not options: - options = ffmpeg_burnins.TextOptions(**self.options_init) - today = datetime.datetime.today() - text = today.strftime(date_format) - self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT) - - def add_frame_numbers( - self, align, options=None, start_frame=None, text=None - ): - """ - Convenience method to create the frame number expression. - - :param enum align: alignment, must use provided enum flags - :param dict options: recommended to use FrameNumberOptions - """ - if not options: - options = ffmpeg_burnins.FrameNumberOptions(**self.options_init) - if start_frame: - options['frame_offset'] = start_frame - - expr = r'%%{eif\:n+%d\:d}' % options['frame_offset'] - _text = str(int(self.end_frame + options['frame_offset'])) - if text and isinstance(text, str): - text = r"{}".format(text) - expr = text.replace("{current_frame}", expr) - text = text.replace("{current_frame}", _text) - - options['expression'] = expr - self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT) - def add_timecode(self, align, options=None, start_frame=None): """ Convenience method to create the frame number expression. From 8c75c74cdec4c08225723f9f8328046999588735 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 10:49:59 +0100 Subject: [PATCH 150/434] print command before run --- pype/scripts/otio_burnin.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index aca848dcfa..b9d10ca23a 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -226,9 +226,13 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): is_sequence = "%" in output - command = self.command(output=output, - args=args, - overwrite=overwrite) + command = self.command( + output=output, + args=args, + overwrite=overwrite + ) + print(command) + proc = Popen(command, shell=True) proc.communicate() if proc.returncode != 0: From 8e86f6e37a0b6fb4ce794e67372b65d49f85b813 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 10:50:18 +0100 Subject: [PATCH 151/434] implemented custom drawtext and timecode constants --- pype/scripts/otio_burnin.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index b9d10ca23a..00d63939e7 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -27,6 +27,15 @@ FFPROBE = ( '{} -v quiet -print_format json -show_format -show_streams %(source)s' ).format(os.path.normpath(ffmpeg_path + "ffprobe")) +DRAWTEXT = ( + "drawtext=text=\\'%(text)s\\':x=%(x)s:y=%(y)s:fontcolor=" + "%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'" +) +TIMECODE = ( + "drawtext=text=\\'%(text)s\\':timecode=\\'%(timecode)s\\'" + ":timecode_rate=%(fps).2f:x=%(x)s:y=%(y)s:fontcolor=" + "%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'" +) def _streams(source): """Reimplemented from otio burnins to be able use full path to ffprobe From 4f862acfb8af3b12315008bd1e95b773fd58fc56 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 10:50:51 +0100 Subject: [PATCH 152/434] added constants for easier handling or entered keys --- pype/scripts/otio_burnin.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 00d63939e7..e0df769db4 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -37,6 +37,11 @@ TIMECODE = ( "%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'" ) +MISSING_KEY_VALUE = "N/A" +CURRENT_FRAME_KEY = "{current_frame}" +TIME_CODE_KEY = "{timecode}" + + def _streams(source): """Reimplemented from otio burnins to be able use full path to ffprobe :param str source: source media file From b73fcc6730e7b3367988a7aa636f363b60d82204 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 10:51:09 +0100 Subject: [PATCH 153/434] modified docstring --- pype/scripts/otio_burnin.py | 28 ++++++++-------------------- 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index e0df769db4..79565af22a 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -307,34 +307,22 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) - each key of "burnins" represents Alignment, there are 6 possibilities: TOP_LEFT TOP_CENTERED TOP_RIGHT BOTTOM_LEFT BOTTOM_CENTERED BOTTOM_RIGHT - - value for each key is dict which should contain "function" which says - what kind of burnin is that: - "text", "timecode" or "frame_numbers" - - "text" key with content is also required when "text" function is used + - value must be string with text you want to burn-in + - text may contain specific formatting keys (exmplained below) Requirement of *data* keys is based on presets. - - "start_frame" - is required when "timecode" or "frame_numbers" function is used - - "start_frame_tc" - when "timecode" should start with different frame + - "frame_start" - is required when "timecode" or "current_frame" ins keys + - "frame_start_tc" - when "timecode" should start with different frame - *keys for static text* EXAMPLE: preset = { "options": {*OPTIONS FOR LOOK*}, "burnins": { - "TOP_LEFT": { - "function": "text", - "text": "static_text" - }, - "TOP_RIGHT": { - "function": "text", - "text": "{shot}" - }, - "BOTTOM_LEFT": { - "function": "timecode" - }, - "BOTTOM_RIGHT": { - "function": "frame_numbers" - } + "TOP_LEFT": "static_text", + "TOP_RIGHT": "{shot}", + "BOTTOM_LEFT": "TC: {timecode}", + "BOTTOM_RIGHT": "{frame_start}{current_frame}" } } From d263cc3bfd0029b788d8ce4ff1bea405765bf3ef Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 10:51:37 +0100 Subject: [PATCH 154/434] data variable in __main___ was renamed to in_data to not be overriden during processing --- pype/scripts/otio_burnin.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 79565af22a..9564982980 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -439,10 +439,10 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) if __name__ == '__main__': import sys import json - data = json.loads(sys.argv[-1]) + in_data = json.loads(sys.argv[-1]) burnins_from_data( - data['input'], - data['codec'], - data['output'], - data['burnin_data'] + in_data['input'], + in_data['codec'], + in_data['output'], + in_data['burnin_data'] ) From be088579be01d7d5db473133d7c49f245aeec10c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 10:52:26 +0100 Subject: [PATCH 155/434] alignment checks lowered string (it is available to use `top_left` in presets --- pype/scripts/otio_burnin.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 9564982980..85e72245cd 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -359,17 +359,18 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) for align_text, preset in presets.get('burnins', {}).items(): align = None - if align_text == 'TOP_LEFT': + align_text = align_text.strip().lower() + if align_text == "top_left": align = ModifiedBurnins.TOP_LEFT - elif align_text == 'TOP_CENTERED': + elif align_text == "top_centered": align = ModifiedBurnins.TOP_CENTERED - elif align_text == 'TOP_RIGHT': + elif align_text == "top_right": align = ModifiedBurnins.TOP_RIGHT - elif align_text == 'BOTTOM_LEFT': + elif align_text == "bottom_left": align = ModifiedBurnins.BOTTOM_LEFT - elif align_text == 'BOTTOM_CENTERED': + elif align_text == "bottom_centered": align = ModifiedBurnins.BOTTOM_CENTERED - elif align_text == 'BOTTOM_RIGHT': + elif align_text == "bottom_right": align = ModifiedBurnins.BOTTOM_RIGHT bi_func = preset.get('function') From ca19b5d6798ef3535544b36340cea82a26ba7ff5 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 10:56:51 +0100 Subject: [PATCH 156/434] add_text can accept frame_start argument --- pype/scripts/otio_burnin.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 85e72245cd..d913baa5e2 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -134,17 +134,21 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): if options_init: self.options_init.update(options_init) - def add_text(self, text, align, options=None): + def add_text(self, text, align, frame_start=None, options=None): """ Adding static text to a filter. :param str text: text to apply to the drawtext :param enum align: alignment, must use provided enum flags + :param int frame_start: starting frame for burnins :param dict options: recommended to use TextOptions """ if not options: options = ffmpeg_burnins.TextOptions(**self.options_init) - self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT) + + options = options.copy() + if frame_start: + options["frame_offset"] = frame_start def add_timecode(self, align, options=None, start_frame=None): """ From 5d5d3eec92d892ddae1845cbabada0847c739471 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 10:57:11 +0100 Subject: [PATCH 157/434] add_text use custom DRAWTEXT ffmpeg string --- pype/scripts/otio_burnin.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index d913baa5e2..be4ec3e57d 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -150,6 +150,8 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): if frame_start: options["frame_offset"] = frame_start + self._add_burnin(text, align, options, DRAWTEXT) + def add_timecode(self, align, options=None, start_frame=None): """ Convenience method to create the frame number expression. From 96d3e51d9200cf04e4b63705a727d381c48a286e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 11:00:29 +0100 Subject: [PATCH 158/434] add timecode allows to add text and use custom TIMECODE ffmpeg string --- pype/scripts/otio_burnin.py | 39 +++++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 15 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index be4ec3e57d..67b85f9ba4 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -140,7 +140,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): :param str text: text to apply to the drawtext :param enum align: alignment, must use provided enum flags - :param int frame_start: starting frame for burnins + :param int frame_start: starting frame for burnins current frame :param dict options: recommended to use TextOptions """ if not options: @@ -152,32 +152,41 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): self._add_burnin(text, align, options, DRAWTEXT) - def add_timecode(self, align, options=None, start_frame=None): + def add_timecode( + self, align, frame_start=None, frame_start_tc=None, text=None, + options=None + ): """ Convenience method to create the frame number expression. :param enum align: alignment, must use provided enum flags + :param int frame_start: starting frame for burnins current frame + :param int frame_start_tc: starting frame for burnins timecode + :param str text: text that will be before timecode :param dict options: recommended to use TimeCodeOptions """ if not options: options = ffmpeg_burnins.TimeCodeOptions(**self.options_init) - if start_frame: - options['frame_offset'] = start_frame - timecode = ffmpeg_burnins._frames_to_timecode( - options['frame_offset'], + options = options.copy() + if frame_start: + options["frame_offset"] = frame_start + + if not frame_start_tc: + frame_start_tc = options["frame_offset"] + + if not text: + text = "" + + if not options.get("fps"): + options["fps"] = self.frame_rate + + options["timecode"] = ffmpeg_burnins._frames_to_timecode( + frame_start_tc, self.frame_rate ) - options = options.copy() - if not options.get('fps'): - options['fps'] = self.frame_rate - self._add_burnin( - timecode.replace(':', r'\:'), - align, - options, - ffmpeg_burnins.TIMECODE - ) + self._add_burnin(text, align, options, TIMECODE) def _add_burnin(self, text, align, options, draw): """ From defe60e5566ec8a251802636430843650a9115d4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 11:01:37 +0100 Subject: [PATCH 159/434] add burnin do not use expression but only text --- pype/scripts/otio_burnin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 67b85f9ba4..39bf963342 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -197,7 +197,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): """ resolution = self.resolution data = { - 'text': options.get('expression') or text, + 'text': text, 'color': options['font_color'], 'size': options['font_size'] } From ca2279e710dcf15e8545c3a904027508d9989435 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 11:02:01 +0100 Subject: [PATCH 160/434] _drawtext must count text sizes with timecode text --- pype/scripts/otio_burnin.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 39bf963342..4c1301becf 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -201,8 +201,12 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): 'color': options['font_color'], 'size': options['font_size'] } + timecode_text = options.get("timecode") or "" + text_for_size = text + timecode_text data.update(options) - data.update(ffmpeg_burnins._drawtext(align, resolution, text, options)) + data.update( + ffmpeg_burnins._drawtext(align, resolution, text_for_size, options) + ) if 'font' in data and ffmpeg_burnins._is_windows(): data['font'] = data['font'].replace(os.sep, r'\\' + os.sep) data['font'] = data['font'].replace(':', r'\:') From 39e785aefb6e4a48b5a8ea215a06070c11c2f425 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 11:02:14 +0100 Subject: [PATCH 161/434] doctstring changes --- pype/scripts/otio_burnin.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 4c1301becf..73de2f2827 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -347,14 +347,14 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) For this preset we'll need at least this data: data = { - "start_frame": 1001, + "frame_start": 1001, "shot": "sh0010" } When Timecode should start from 1 then data need: data = { - "start_frame": 1001, - "start_frame_tc": 1, + "frame_start": 1001, + "frame_start_tc": 1, "shot": "sh0010" } ''' From 9a8c3b56a22cf333909e8dad8fc064a9164c0d1e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 11:02:59 +0100 Subject: [PATCH 162/434] changed data processing to not use functions but only text --- pype/scripts/otio_burnin.py | 99 +++++++++++++++++-------------------- 1 file changed, 45 insertions(+), 54 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 73de2f2827..e7464cdc7c 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -368,15 +368,27 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) stream = burnin._streams[0] if "resolution_width" not in data: - data["resolution_width"] = stream.get("width", "Unknown") + data["resolution_width"] = stream.get("width", MISSING_KEY_VALUE) if "resolution_height" not in data: - data["resolution_height"] = stream.get("height", "Unknown") + data["resolution_height"] = stream.get("height", MISSING_KEY_VALUE) if "fps" not in data: data["fps"] = get_fps(stream.get("r_frame_rate", "0/0")) - for align_text, preset in presets.get('burnins', {}).items(): + # Check frame start and add expression if is available + if frame_start is not None: + data[CURRENT_FRAME_KEY] = r'%%{eif\:n+%d\:d}' % frame_start + + if frame_start_tc is not None: + data[TIME_CODE_KEY[1:-1]] = TIME_CODE_KEY + + for align_text, value in presets.get('burnins', {}).items(): + if not value: + continue + + has_timecode = TIME_CODE_KEY in value + align = None align_text = align_text.strip().lower() if align_text == "top_left": @@ -392,65 +404,44 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) elif align_text == "bottom_right": align = ModifiedBurnins.BOTTOM_RIGHT - bi_func = preset.get('function') - if not bi_func: - log.error( - 'Missing function for burnin!' - 'Burnins are not created!' + # Replace with missing key value if frame_start_tc is not set + if frame_start_tc is None and has_timecode: + has_timecode = False + log.warning( + "`frame_start` and `frame_start_tc`" + " are not set in entered data." ) - return + value = value.replace(TIME_CODE_KEY, MISSING_KEY_VALUE) - if ( - bi_func in ['frame_numbers', 'timecode'] and - frame_start is None - ): - log.error( - 'start_frame is not set in entered data!' - 'Burnins are not created!' - ) - return + key_pattern = re.compile(r"(\{.*?[^{0]*\})") - if bi_func == 'frame_numbers': - current_frame_identifier = "{current_frame}" - text = preset.get('text') or current_frame_identifier + missing_keys = [] + for group in key_pattern.findall(value): + try: + group.format(**data) + except (TypeError, KeyError): + missing_keys.append(group) - if current_frame_identifier not in text: - log.warning(( - 'Text for Frame numbers don\'t have ' - '`{current_frame}` key in text!' - )) + missing_keys = list(set(missing_keys)) + for key in missing_keys: + value = value.replace(key, MISSING_KEY_VALUE) - text_items = [] - split_items = text.split(current_frame_identifier) - for item in split_items: - text_items.append(item.format(**data)) + # Handle timecode differently + if has_timecode: + args = [align, frame_start, frame_start_tc] + if not value.startswith(TIME_CODE_KEY): + value_items = value.split(TIME_CODE_KEY) + text = value_items[0].format(**data) + args.append(value_items[0]) - text = "{current_frame}".join(text_items) + burnin.add_timecode(*args) + continue - burnin.add_frame_numbers(align, start_frame=frame_start, text=text) + text = value.format(**data) + burnin.add_text(text, align, frame_start) - elif bi_func == 'timecode': - burnin.add_timecode(align, start_frame=frame_start_tc) - - elif bi_func == 'text': - if not preset.get('text'): - log.error('Text is not set for text function burnin!') - return - text = preset['text'].format(**data) - burnin.add_text(text, align) - - elif bi_func == "datetime": - date_format = preset["format"] - burnin.add_datetime(date_format, align) - - else: - log.error( - 'Unknown function for burnins {}'.format(bi_func) - ) - return - - codec_args = '' - if codec_data is not []: + codec_args = "" + if codec_data: codec_args = " ".join(codec_data) burnin.render(output_path, args=codec_args, overwrite=overwrite, **data) From 1033f779d1a72d33365ec197b398a6f41cf478f9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 11:03:58 +0100 Subject: [PATCH 163/434] codec moved to optional args because is optional --- pype/scripts/otio_burnin.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index e7464cdc7c..bc45e45f82 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -302,7 +302,9 @@ def example(input_path, output_path): burnin.render(output_path, overwrite=True) -def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True): +def burnins_from_data( + input_path, output_path, data, codec_data=None, overwrite=True +): ''' This method adds burnins to video/image file based on presets setting. Extension of output MUST be same as input. (mov -> mov, avi -> avi,...) @@ -453,7 +455,7 @@ if __name__ == '__main__': in_data = json.loads(sys.argv[-1]) burnins_from_data( in_data['input'], - in_data['codec'], in_data['output'], - in_data['burnin_data'] + in_data['burnin_data'], + in_data['codec'] ) From f19235f91e4492331f04df281049d8984716fcdd Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 11:04:08 +0100 Subject: [PATCH 164/434] added forgotten import --- pype/scripts/otio_burnin.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index bc45e45f82..8a95542c04 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -1,4 +1,5 @@ import os +import re import datetime import subprocess import json From 6be774b1f8716471e28beb2659f3d27750df6f4e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 11:04:43 +0100 Subject: [PATCH 165/434] removed imports from __main__ --- pype/scripts/otio_burnin.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 8a95542c04..6c1e19690b 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -1,4 +1,5 @@ import os +import sys import re import datetime import subprocess @@ -451,8 +452,6 @@ def burnins_from_data( if __name__ == '__main__': - import sys - import json in_data = json.loads(sys.argv[-1]) burnins_from_data( in_data['input'], From a2d07a89a9fa19b007c0565459df4973bbf1710d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 6 Feb 2020 11:06:31 +0100 Subject: [PATCH 166/434] removed deprecated method usage in example --- pype/scripts/otio_burnin.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 6c1e19690b..590939df56 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -296,10 +296,6 @@ def example(input_path, output_path): burnin.add_text('My Text', ModifiedBurnins.TOP_CENTERED) # Datetime burnin.add_text('%d-%m-%y', ModifiedBurnins.TOP_RIGHT) - # Frame number - burnin.add_frame_numbers(ModifiedBurnins.TOP_RIGHT, start_frame=start_frame) - # Timecode - burnin.add_timecode(ModifiedBurnins.TOP_LEFT, start_frame=start_frame) # Start render (overwrite output file if exist) burnin.render(output_path, overwrite=True) From cb26f7ae6606a2183bfcb88a2159fe4b1a85ce90 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Thu, 6 Feb 2020 12:58:25 +0100 Subject: [PATCH 167/434] fixed subset name, removing rendering instance, fixed layer creaton and image prefix --- pype/maya/lib.py | 2 +- .../global/publish/collect_filesequences.py | 11 +++++--- pype/plugins/maya/create/create_render.py | 26 +++++++++++++++++++ pype/plugins/maya/publish/collect_render.py | 1 + 4 files changed, 36 insertions(+), 4 deletions(-) diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 7cf201cdd9..216b455137 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2462,7 +2462,7 @@ def _get_render_instance(): if not has_family: continue - if cmds.getAttr("{}.family".format(objset)) == 'render': + if cmds.getAttr("{}.family".format(objset)) == 'rendering': return objset return None diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 59b2623877..82d876afc6 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -479,12 +479,17 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): self.log.info("Creating new subset: {}".format(subset)) new_instance = context.create_instance(subset) data = copy.deepcopy(data) + task = data["metadata"]["session"]["AVALON_TASK"] + new_subset_name = 'render{}{}{}{}'.format( + task[0].upper(), task[1:], + subset[0].upper(), subset[1:]) + new_instance.data.update( { - "name": subset, + "name": new_subset_name, "family": 'render', "families": ['render'], - "subset": subset, + "subset": new_subset_name, "asset": data.get( "asset", api.Session["AVALON_ASSET"]), "stagingDir": root, @@ -500,7 +505,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): ) new_instance.data["representations"] = data["metadata"]["instance"]["representations"] - if new_instance: + if new_instance is not None: self.log.info("remapping paths ...") new_instance.data["representations"] = [PypeLauncher.path_remapper(r) for r in new_instance.data["representations"]] # noqa: E501 self.log.debug( diff --git a/pype/plugins/maya/create/create_render.py b/pype/plugins/maya/create/create_render.py index 4c07e8c118..159202c51f 100644 --- a/pype/plugins/maya/create/create_render.py +++ b/pype/plugins/maya/create/create_render.py @@ -25,6 +25,22 @@ class CreateRender(avalon.maya.Creator): # renderSetup instance _rs = None + _image_prefix_nodes = { + 'mentalray': 'defaultRenderGlobals.imageFilePrefix', + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' + } + + _image_prefixes = { + 'mentalray': 'maya///_', + 'vray': '"maya///', + 'arnold': 'maya///_', + 'renderman': 'maya///_', + 'redshift': 'maya///_' + } + def __init__(self, *args, **kwargs): super(CreateRender, self).__init__(*args, **kwargs) @@ -49,6 +65,16 @@ class CreateRender(avalon.maya.Creator): sets.append(render_set) cmds.sets(sets, forceElement=instance) + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + # handle various renderman names + if renderer.startswith('renderman'): + renderer = 'renderman' + + cmds.setAttr(self._image_prefix_nodes[renderer], + self._image_prefixes[renderer], + type="string") + def _create_render_settings(self): # get pools pools = [] diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 4096ef3c0e..5179acf416 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -55,6 +55,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): for instance in context: if 'rendering' in instance.data['families']: render_instance = instance + render_instance.data["remove"] = True if not render_instance: self.log.info("No render instance found, skipping render " From b68cbf4be0c41152a4008d2312626a0e6075cf4b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 6 Feb 2020 16:59:22 +0100 Subject: [PATCH 168/434] fix(nuke): didnt create write node --- pype/nuke/lib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 2ed9f75513..6d6e7de1b2 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -374,7 +374,7 @@ def create_write_node(name, data, input=None, prenodes=None): now_node.setInput(0, prev_node) # imprinting group node - avalon.nuke.imprint(GN, data["avalon"], tab="Pype") + avalon.nuke.imprint(GN, data["avalon"]) divider = nuke.Text_Knob('') GN.addKnob(divider) From 979cad41dceeb2a9dffd62d82b55ccd5edef06d0 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 6 Feb 2020 16:59:59 +0100 Subject: [PATCH 169/434] fix(nks):didnt publish plates --- pype/plugins/nukestudio/publish/collect_plates.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index be448931c8..70f0f7407e 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -156,8 +156,9 @@ class CollectPlatesData(api.InstancePlugin): ext=ext ) - start_frame = source_first_frame + instance.data["sourceInH"] - duration = instance.data["sourceOutH"] - instance.data["sourceInH"] + start_frame = int(source_first_frame + instance.data["sourceInH"]) + duration = int( + instance.data["sourceOutH"] - instance.data["sourceInH"]) end_frame = start_frame + duration self.log.debug("start_frame: `{}`".format(start_frame)) self.log.debug("end_frame: `{}`".format(end_frame)) From 73a7dd2554b312f10e3ea4ea03dcf4bef0d5759b Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Thu, 6 Feb 2020 17:10:12 +0100 Subject: [PATCH 170/434] fixed remapping, RGBA to beauty in arnold, determining subset version correctly --- .../global/publish/collect_filesequences.py | 2 +- pype/plugins/maya/publish/collect_render.py | 6 +- .../maya/publish/determine_future_version.py | 85 +++++++++++++++++++ 3 files changed, 91 insertions(+), 2 deletions(-) create mode 100644 pype/plugins/maya/publish/determine_future_version.py diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 82d876afc6..f20bda1289 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -507,7 +507,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): if new_instance is not None: self.log.info("remapping paths ...") - new_instance.data["representations"] = [PypeLauncher.path_remapper(r) for r in new_instance.data["representations"]] # noqa: E501 + new_instance.data["representations"] = [PypeLauncher.path_remapper(data=r) for r in new_instance.data["representations"]] # noqa: E501 self.log.debug( "__ representations {}".format( new_instance.data["representations"])) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 5179acf416..771078a5f5 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -503,9 +503,13 @@ class ExpectedFilesArnold(AExpectedFiles): '{}.enabled'.format(aov), self.layer): enabled = self.maya_is_true(override) if enabled: + # If aov RGBA is selected, arnold will translate it to `beauty` + aov_name = cmds.getAttr('%s.name' % aov) + if aov_name == 'RGBA': + aov_name = 'beauty' enabled_aovs.append( ( - cmds.getAttr('%s.name' % aov), + aov_name, aov_ext ) ) diff --git a/pype/plugins/maya/publish/determine_future_version.py b/pype/plugins/maya/publish/determine_future_version.py new file mode 100644 index 0000000000..72dbf719d7 --- /dev/null +++ b/pype/plugins/maya/publish/determine_future_version.py @@ -0,0 +1,85 @@ +import pyblish +from avalon import api, io + + +class DetermineFutureVersion(pyblish.api.InstancePlugin): + """ + This will determine version of subset if we want render to be attached to. + """ + label = "Determine Subset Version" + order = pyblish.api.IntegratorOrder + hosts = ["maya"] + families = ["renderlayer"] + + def process(self, instance): + context = instance.context + attach_to_subsets = [s["subset"] for s in instance.data['attachTo']] + + if not attach_to_subsets: + return + + for i in context: + if i.data["subset"] in attach_to_subsets: + latest_version = self._get_latest_version(i.data["subset"]) + + # this will get corresponding subset in attachTo list + # so we can set version there + sub = next(item for item in instance.data['attachTo'] if item["subset"] == i.data["subset"]) # noqa: E501 + + if not latest_version: + # if latest_version is None, subset is not yet in + # database so we'll check its instance to see if version + # is there and use that, or we'll just stay with v1 + latest_version = i.data.get("version", 1) + + sub["version"] = latest_version + self.log.info("render will be attached to {} v{}".format( + sub["subset"], sub["version"] + )) + + def _get_latest_version(self, subset): + latest_version = None + + project_name = api.Session["AVALON_PROJECT"] + asset_name = api.Session["AVALON_ASSET"] + + project_entity = io.find_one({ + "type": "project", + "name": project_name + }) + + assert project_entity, ( + "Project '{0}' was not found." + ).format(project_name) + + asset_entity = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project_entity["_id"] + }) + assert asset_entity, ( + "No asset found by the name '{0}' in project '{1}'" + ).format(asset_name, project_name) + + if asset_entity: + subset_entity = io.find_one({ + "type": "subset", + "name": subset, + "parent": asset_entity["_id"] + }) + + if subset_entity is None: + self.log.info("Subset entity does not exist yet.") + pass + + else: + version_entity = io.find_one( + { + "type": "version", + "parent": subset_entity["_id"] + }, + sort=[("name", -1)] + ) + if version_entity: + latest_version = version_entity["name"] + return latest_version From d0f0129c2c585b4f493e8b0bbd68f24f317849eb Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 6 Feb 2020 17:40:07 +0100 Subject: [PATCH 171/434] fix(nk): loaders use self log --- pype/plugins/nuke/load/load_sequence.py | 31 +++++++++++-------------- 1 file changed, 14 insertions(+), 17 deletions(-) diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 76ff7d2cb6..db77c53aff 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -5,10 +5,6 @@ import contextlib from avalon import api, io from pype.nuke import presets -from pype.api import Logger - -log = Logger().get_logger(__name__, "nuke") - @contextlib.contextmanager def preserve_trim(node): @@ -35,14 +31,14 @@ def preserve_trim(node): if start_at_frame: node['frame_mode'].setValue("start at") node['frame'].setValue(str(script_start)) - log.info("start frame of Read was set to" - "{}".format(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) if offset_frame: node['frame_mode'].setValue("offset") node['frame'].setValue(str((script_start + offset_frame))) - log.info("start frame of Read was set to" - "{}".format(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) def loader_shift(node, frame, relative=True): @@ -74,7 +70,7 @@ def loader_shift(node, frame, relative=True): class LoadSequence(api.Loader): """Load image sequence into Nuke""" - families = ["write", "source", "plate", "render"] + families = ["render2d", "source", "plate", "render"] representations = ["exr", "dpx", "jpg", "jpeg", "png"] label = "Load sequence" @@ -91,7 +87,7 @@ class LoadSequence(api.Loader): version = context['version'] version_data = version.get("data", {}) - log.info("version_data: {}\n".format(version_data)) + self.log.info("version_data: {}\n".format(version_data)) self.first_frame = int(nuke.root()["first_frame"].getValue()) self.handle_start = version_data.get("handleStart", 0) @@ -111,7 +107,7 @@ class LoadSequence(api.Loader): if not file: repr_id = context["representation"]["_id"] - log.warning( + self.log.warning( "Representation id `{}` is failing to load".format(repr_id)) return @@ -242,7 +238,7 @@ class LoadSequence(api.Loader): if not file: repr_id = representation["_id"] - log.warning( + self.log.warning( "Representation id `{}` is failing to load".format(repr_id)) return @@ -277,9 +273,10 @@ class LoadSequence(api.Loader): last = version_data.get("frameEnd") if first is None: - log.warning("Missing start frame for updated version" - "assuming starts at frame 0 for: " - "{} ({})".format(node['name'].value(), representation)) + self.log.warning("Missing start frame for updated version" + "assuming starts at frame 0 for: " + "{} ({})".format( + node['name'].value(), representation)) first = 0 first -= self.handle_start @@ -288,7 +285,7 @@ class LoadSequence(api.Loader): # Update the loader's path whilst preserving some values with preserve_trim(node): node["file"].setValue(file) - log.info("__ node['file']: {}".format(node["file"].value())) + self.log.info("__ node['file']: {}".format(node["file"].value())) # Set the global in to the start frame of the sequence loader_shift(node, first, relative=True) @@ -328,7 +325,7 @@ class LoadSequence(api.Loader): node, updated_dict ) - log.info("udated to version: {}".format(version.get("name"))) + self.log.info("udated to version: {}".format(version.get("name"))) def remove(self, container): From 1b1770dd638cd684a1f69c76c5fb06eef9739ad8 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 6 Feb 2020 17:40:37 +0100 Subject: [PATCH 172/434] fix(nuke): loader mov reads revew presets for family and representation --- pype/plugins/nuke/load/load_mov.py | 58 +++++++++++++++++++++--------- 1 file changed, 41 insertions(+), 17 deletions(-) diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py index fccba4c573..77346a82a4 100644 --- a/pype/plugins/nuke/load/load_mov.py +++ b/pype/plugins/nuke/load/load_mov.py @@ -4,9 +4,7 @@ import contextlib from avalon import api, io from pype.nuke import presets - -from pype.api import Logger -log = Logger().get_logger(__name__, "nuke") +from pypeapp import config @contextlib.contextmanager @@ -34,14 +32,14 @@ def preserve_trim(node): if start_at_frame: node['frame_mode'].setValue("start at") node['frame'].setValue(str(script_start)) - log.info("start frame of Read was set to" - "{}".format(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) if offset_frame: node['frame_mode'].setValue("offset") node['frame'].setValue(str((script_start + offset_frame))) - log.info("start frame of Read was set to" - "{}".format(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) def loader_shift(node, frame, relative=True): @@ -70,11 +68,37 @@ def loader_shift(node, frame, relative=True): return int(script_start) +def add_review_presets_config(): + returning = { + "families": list(), + "representations": list() + } + review_presets = config.get_presets()["plugins"]["global"]["publish"].get( + "ExtractReview", {}) + + outputs = review_presets.get("outputs", {}) + # + for output, properities in outputs.items(): + returning["representations"].append(output) + returning["families"] += properities.get("families", []) + + return returning + + class LoadMov(api.Loader): """Load mov file into Nuke""" + presets = add_review_presets_config() + families = [ + "source", + "plate", + "render", + "review"] + presets["families"] - families = ["write", "source", "plate", "render", "review"] - representations = ["wipmov", "h264", "mov", "preview", "review", "mp4"] + representations = [ + "mov", + "preview", + "review", + "mp4"] + presets["representations"] label = "Load mov" order = -10 @@ -115,7 +139,7 @@ class LoadMov(api.Loader): if not file: repr_id = context["representation"]["_id"] - log.warning( + self.log.warning( "Representation id `{}` is failing to load".format(repr_id)) return @@ -211,7 +235,7 @@ class LoadMov(api.Loader): if not file: repr_id = representation["_id"] - log.warning( + self.log.warning( "Representation id `{}` is failing to load".format(repr_id)) return @@ -246,9 +270,10 @@ class LoadMov(api.Loader): colorspace = version_data.get("colorspace") if first is None: - log.warning("Missing start frame for updated version" - "assuming starts at frame 0 for: " - "{} ({})".format(node['name'].value(), representation)) + self.log.warning("Missing start frame for updated version" + "assuming starts at frame 0 for: " + "{} ({})".format( + node['name'].value(), representation)) first = 0 # fix handle start and end if none are available @@ -264,7 +289,7 @@ class LoadMov(api.Loader): # Update the loader's path whilst preserving some values with preserve_trim(node): node["file"].setValue(file) - log.info("__ node['file']: {}".format(node["file"].value())) + self.log.info("__ node['file']: {}".format(node["file"].value())) # Set the global in to the start frame of the sequence loader_shift(node, first, relative=True) @@ -290,7 +315,6 @@ class LoadMov(api.Loader): if preset_clrsp is not None: node["colorspace"].setValue(str(preset_clrsp)) - updated_dict = {} updated_dict.update({ "representation": str(representation["_id"]), @@ -316,7 +340,7 @@ class LoadMov(api.Loader): update_container( node, updated_dict ) - log.info("udated to version: {}".format(version.get("name"))) + self.log.info("udated to version: {}".format(version.get("name"))) def remove(self, container): From b02eb6c4c944419684e38927d9795677a8d15887 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Thu, 6 Feb 2020 21:34:25 +0100 Subject: [PATCH 173/434] fixed review filtering and attaching, creator creates layer if not present --- pype/maya/lib.py | 2 +- .../global/publish/collect_filesequences.py | 17 +++--- .../global/publish/submit_publish_job.py | 57 ++++--------------- pype/plugins/maya/create/create_render.py | 9 ++- .../maya/publish/submit_maya_deadline.py | 3 - 5 files changed, 30 insertions(+), 58 deletions(-) diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 216b455137..9432f734b6 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2491,7 +2491,7 @@ class RenderSetupListObserver: members = cmds.sets(render_set, query=True) or [] if not "LAYER_{}".format(layer_name) in members: print(" - creating set for {}".format(layer_name)) - set = cmds.sets(n="LAYER_{}".format(layer_name)) + set = cmds.sets(n="LAYER_{}".format(layer_name), empty=True) cmds.sets(set, forceElement=render_set) rio = RenderSetupItemObserver(item) print("- adding observer for {}".format(item.name())) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index f20bda1289..947d055d9b 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -225,11 +225,11 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): if families_data and "slate" in families_data: families.append("slate") - if data.get("attachTo"): + if data["metadata"]["instance"].get("attachTo"): # we need to attach found collections to existing # subset version as review represenation. - for attach in data.get("attachTo"): + for attach in data["metadata"]["instance"]["attachTo"]: self.log.info( "Attaching render {}:v{}".format( attach["subset"], attach["version"])) @@ -238,6 +238,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): new_instance.data.update( { "name": attach["subset"], + "subset": attach["subset"], "version": attach["version"], "family": 'review', "families": ['review', 'ftrack'], @@ -368,7 +369,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): if "slate" in instance.data["families"]: frame_start += 1 - tags = ["review"] + tags = ["preview"] if baked_mov_path: tags.append("delete") @@ -476,14 +477,16 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): representation["tags"].remove('review') else: subset = data["metadata"]["instance"]["subset"] - self.log.info("Creating new subset: {}".format(subset)) - new_instance = context.create_instance(subset) data = copy.deepcopy(data) task = data["metadata"]["session"]["AVALON_TASK"] new_subset_name = 'render{}{}{}{}'.format( task[0].upper(), task[1:], subset[0].upper(), subset[1:]) + self.log.info( + "Creating new subset: {}".format(new_subset_name)) + new_instance = context.create_instance(new_subset_name) + new_instance.data.update( { "name": new_subset_name, @@ -503,11 +506,11 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): "slateFrame": slate_frame } ) - new_instance.data["representations"] = data["metadata"]["instance"]["representations"] + new_instance.data["representations"] = data["metadata"]["instance"]["representations"] # noqa: E501 if new_instance is not None: self.log.info("remapping paths ...") - new_instance.data["representations"] = [PypeLauncher.path_remapper(data=r) for r in new_instance.data["representations"]] # noqa: E501 + new_instance.data["representations"] = [PypeLauncher().path_remapper(data=r) for r in new_instance.data["representations"]] # noqa: E501 self.log.debug( "__ representations {}".format( new_instance.data["representations"])) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index cb86d45141..6fc60dc9ff 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -148,7 +148,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): families = ["render.farm", "renderlayer", "imagesequence"] - # this will add review and ftrack tag only to `beauty` in `maya` app aov_filter = [AOVFilter("maya", ["beauty"])] enviro_filter = [ @@ -249,48 +248,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): :param instance: Instance data :type instance: dict - - Data needed for collect_filesequences: - - root - asset * - source * - frameStart - frameEnd - subset - ftrack - fps - user - version * - attachTo *: - subset - version - regex ! - exclude_regex ! - - metadata: - session * - instance *: - family - pixelAspect * - resolutionWidth - resolutionHeight - lutPath * - bakeRenderPath - families - slateFrame - version - representations: - name - ext - files": []] - frameStart - frameEnd - stagingDir - anatomy_template - fps - tags """ + data = instance.data.copy() context = instance.context @@ -341,16 +300,22 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if data.get("expectedFiles"): representations = [] cols, rem = clique.assemble(data.get("expectedFiles")) + # create representation for every collected sequence for c in cols: ext = c.tail.lstrip(".") - review = True + preview = False + # if filtered aov name is found in filename, toggle it for + # preview video renderin for filter in self.aov_filter: if os.environ.get("AVALON_APP", "") == filter.app: for aov in filter.aov: if re.match( - r"(\.|_)({})(\.|_)".format(aov), list(c)[0] + r".+(?:\.|_)({})(?:\.|_).*".format(aov), + list(c)[0] ): - review = False + preview = True + break + break rep = { "name": ext, "ext": ext, @@ -361,7 +326,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "stagingDir": os.path.dirname(list(c)[0]), "anatomy_template": "render", "fps": context.data.get("fps", None), - "tags": ["review"] if review else [], + "tags": ["review"] if preview else [], } representations.append(rep) diff --git a/pype/plugins/maya/create/create_render.py b/pype/plugins/maya/create/create_render.py index 159202c51f..080c6bd55d 100644 --- a/pype/plugins/maya/create/create_render.py +++ b/pype/plugins/maya/create/create_render.py @@ -55,9 +55,9 @@ class CreateRender(avalon.maya.Creator): instance = super(CreateRender, self).process() cmds.setAttr("{}.machineList".format(instance), lock=True) self._rs = renderSetup.instance() + layers = self._rs.getRenderLayers() if use_selection: print(">>> processing existing layers") - layers = self._rs.getRenderLayers() sets = [] for layer in layers: print(" - creating set for {}".format(layer.name())) @@ -65,6 +65,13 @@ class CreateRender(avalon.maya.Creator): sets.append(render_set) cmds.sets(sets, forceElement=instance) + # if no render layers are present, create default one with + # asterix selector + if not layers: + rl = self._rs.createRenderLayer('Main') + cl = rl.createCollection("defaultCollection") + cl.getSelector().setPattern('*') + renderer = cmds.getAttr( 'defaultRenderGlobals.currentRenderer').lower() # handle various renderman names diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index f28b9f5474..4c6399a880 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -211,9 +211,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # Only render layers are considered renderable in this pipeline "UsingRenderLayers": True, - # Use legacy Render Layer system - # "UseLegacyRenderLayers": legacy_layers, - # Render only this layer "RenderLayer": renderlayer, From ae387d09778607ec56b12c2d9d75a9e74740786a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 11:39:22 +0100 Subject: [PATCH 174/434] added subproces for status --- pype/ftrack/ftrack_server/sub_event_info.py | 411 ++++++++++++++++++++ 1 file changed, 411 insertions(+) create mode 100644 pype/ftrack/ftrack_server/sub_event_info.py diff --git a/pype/ftrack/ftrack_server/sub_event_info.py b/pype/ftrack/ftrack_server/sub_event_info.py new file mode 100644 index 0000000000..d63b6acadd --- /dev/null +++ b/pype/ftrack/ftrack_server/sub_event_info.py @@ -0,0 +1,411 @@ +import os +import sys +import copy +import signal +import socket +import uuid +from datetime import datetime + +import ftrack_api +from ftrack_server import FtrackServer +from pype.ftrack.ftrack_server.lib import ( + SocketSession, SocketBaseEventHub, + TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT +) +from pypeapp import Logger + +log = Logger().get_logger("Event storer") +log.info(os.environ.get("FTRACK_EVENT_SUB_ID")) + + +class ObjectFactory: + session = None + sock = None + subprocess_id = os.environ["FTRACK_EVENT_SUB_ID"] + status_factory = None + + +def trigger_status_info(status_id=None, status=None): + if not status and not status_id: + log.warning( + "`status_id` or `status` must be specified to trigger action." + ) + return + + if not status: + status = ObjectFactory.status_factory[status_id] + + if not status: + return + + new_event_data = copy.deepcopy(action_data) + new_event_data.update({ + "selection": [] + }) + new_event_data["subprocess_id"] = ObjectFactory.subprocess_id + new_event_data["status_id"] = status.id + + new_event = ftrack_api.event.base.Event( + topic="ftrack.action.launch", + data=new_event_data, + source=status.source + ) + ObjectFactory.session.event_hub.publish(new_event) + + +action_identifier = ( + "event.server.status" + ObjectFactory.subprocess_id +) + +# TODO add IP adress to label +# TODO add icon +action_data = { + "label": "Pype Admin", + "variant": "Event server Status", + "description": "Get Infromation about event server", + "actionIdentifier": action_identifier, + "icon": None +} + + +class Status: + default_item = { + "type": "label", + "value": "Information not allowed." + } + note_item = { + "type": "label", + "value": "Hit `submit` to refresh data." + } + splitter_item = { + "type": "label", + "value": "---" + } + + def __init__(self, source_info, parent): + self.id = str(uuid.uuid1()) + self.created = datetime.now() + self.parent = parent + + self.source = source_info + + self.main_process = None + self.storer = None + self.processor = None + + def add_result(self, source, data): + if source.lower() == "storer": + self.storer = data + + elif source.lower() == "processor": + self.processor = data + + else: + self.main_process = data + + def filled(self): + # WARNING DEBUG PART!!!! + return True + return ( + self.main_process is not None and + self.storer is not None and + self.processor is not None + ) + + def get_items_from_dict(self, in_dict): + items = [] + for key, value in in_dict.items(): + items.append({ + "type": "label", + "value": "##{}".format(key) + }) + items.append({ + "type": "label", + "value": value + }) + return items + + def bool_items(self): + items = [] + name_labels = { + "shutdown_main": "Shutdown main process", + "reset_storer": "Reset storer", + "reset_processor": "Reset processor" + } + for name, label in name_labels.items(): + items.append({ + "type": "boolean", + "value": False, + "label": label, + "name": name + }) + return items + + def items(self): + items = [] + items.append(self.note_item) + + items.append({"type": "label", "value": "Main process"}) + if not self.main_process: + items.append(self.default_item) + else: + items.extend( + self.get_items_from_dict(self.main_process) + ) + + items.append(self.splitter_item) + items.append({"type": "label", "value": "Storer process"}) + if not self.storer: + items.append(self.default_item) + else: + items.extend( + self.get_items_from_dict(self.storer) + ) + + items.append(self.splitter_item) + items.append({"type": "label", "value": "Processor process"}) + if not self.processor: + items.append(self.default_item) + else: + items.extend( + self.get_items_from_dict(self.processor) + ) + + items.append(self.splitter_item) + items.extend(self.bool_items()) + + return items + + @property + def is_overtime(self): + time_delta = (datetime.now() - self.created).total_seconds() + return time_delta >= self.parent.max_delta_seconds + + +class StatusFactory: + max_delta_seconds = 30 + + def __init__(self): + self.statuses = {} + + def __getitem__(self, key): + return self.statuses.get(key) + + def create_status(self, source_info): + new_status = Status(source_info, self) + self.statuses[new_status.id] = new_status + return new_status + + def process_result(self, event): + subprocess_id = event["data"].get("subprocess_id") + if subprocess_id != ObjectFactory.subprocess_id: + return + + status_id = event["data"].get("status_id") + status = self.statuses[status_id] + if not status: + return + + source = event["data"]["source"] + data = event["data"]["status_info"] + + status.add_result(source, data) + if status.filled(): + trigger_status_info(status=status) + + +def server_activity_validate_user(event): + """Validate user permissions to show server info.""" + session = ObjectFactory.session + + username = event["source"].get("user", {}).get("username") + if not username: + return False + + user_ent = session.query( + "User where username = \"{}\"".format(username) + ).first() + if not user_ent: + return False + + role_list = ["Pypeclub", "Administrator"] + for role in user_ent["user_security_roles"]: + if role["security_role"]["name"] in role_list: + return True + return False + + +def server_activity_discover(event): + """Discover action in actions menu conditions.""" + session = ObjectFactory.session + if session is None: + return + + if not server_activity_validate_user(event): + return + + return {"items": [action_data]} + + +def handle_filled_event(event): + subprocess_id = event["data"].get("subprocess_id") + if subprocess_id != ObjectFactory.subprocess_id: + return None + + status_id = event["data"].get("status_id") + status = ObjectFactory.status_factory[status_id] + if not status: + return None + + values = event.get("values") + if values: + log.info(values) + + title = "Event server - Status" + + event_data = copy.deepcopy(event["data"]) + event_data.update({ + "type": "widget", + "items": status.items(), + "title": title + }) + + ObjectFactory.session.event_hub.publish( + ftrack_api.event.base.Event( + topic="ftrack.action.trigger-user-interface", + data=event_data + ), + on_error='ignore' + ) + + +def server_activity(event): + session = ObjectFactory.session + if session is None: + msg = "Session is not set. Can't trigger Reset action." + log.warning(msg) + return { + "success": False, + "message": msg + } + + valid = server_activity_validate_user(event) + if not valid: + return { + "success": False, + "message": "You don't have permissions to see Event server status!" + } + + subprocess_id = event["data"].get("subprocess_id") + if subprocess_id is not None: + return handle_filled_event(event) + + status = ObjectFactory.status_factory.create_status(event["source"]) + + event_data = { + "status_id": status.id, + "subprocess_id": ObjectFactory.subprocess_id + } + session.event_hub.publish( + ftrack_api.event.base.Event( + topic=TOPIC_STATUS_SERVER, + data=event_data + ), + on_error="ignore" + ) + + return { + "success": True, + "message": "Collecting information (this may take > 20s)" + } + + +def register(session): + '''Registers the event, subscribing the discover and launch topics.''' + session.event_hub.subscribe( + "topic=ftrack.action.discover", + server_activity_discover + ) + + status_launch_subscription = ( + "topic=ftrack.action.launch and data.actionIdentifier={}" + ).format(action_identifier) + + session.event_hub.subscribe( + status_launch_subscription, + server_activity + ) + + session.event_hub.subscribe( + "topic={}".format(TOPIC_STATUS_SERVER_RESULT), + ObjectFactory.status_factory.process_result + ) + + +def main(args): + port = int(args[-1]) + + # Create a TCP/IP socket + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + + # Connect the socket to the port where the server is listening + server_address = ("localhost", port) + log.debug("Storer connected to {} port {}".format(*server_address)) + sock.connect(server_address) + sock.sendall(b"CreatedStatus") + # store socket connection object + ObjectFactory.sock = sock + ObjectFactory.status_factory = StatusFactory() + + _returncode = 0 + try: + session = SocketSession( + auto_connect_event_hub=True, sock=sock, Eventhub=SocketBaseEventHub + ) + ObjectFactory.session = session + register(session) + server = FtrackServer("event") + log.debug("Launched Ftrack Event storer") + server.run_server(session, load_files=False) + + except Exception: + _returncode = 1 + log.error("ServerInfo subprocess crashed", exc_info=True) + + finally: + log.debug("Ending. Closing socket.") + sock.close() + return _returncode + + +if __name__ == "__main__": + # Register interupt signal + def signal_handler(sig, frame): + print("You pressed Ctrl+C. Process ended.") + sys.exit(0) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + sys.exit(main(sys.argv)) + + +example_action_event = { + 'data': { + 'selection': [], + 'description': 'Test action2', + 'variant': None, + 'label': 'Test action2', + 'actionIdentifier': 'test.action2.3ceffe5e9acf40f8aa80603adebd0d06', + 'values': {}, + 'icon': None, + }, + 'topic': 'ftrack.action.launch', + 'sent': None, + 'source': { + 'id': 'eb67d186301c4cbbab73c1aee9b7c55d', + 'user': {'username': 'jakub.trllo', 'id': '2a8ae090-cbd3-11e8-a87a-0a580aa00121'} + }, + 'target': '', + 'in_reply_to_event': None +} From c937964dc8c80b54b95d5059670f845a83f4ca82 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 12:13:22 +0100 Subject: [PATCH 175/434] added subprocess to event server cli --- pype/ftrack/ftrack_server/event_server_cli.py | 46 +++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/pype/ftrack/ftrack_server/event_server_cli.py b/pype/ftrack/ftrack_server/event_server_cli.py index b09b0bc84e..b2c540e993 100644 --- a/pype/ftrack/ftrack_server/event_server_cli.py +++ b/pype/ftrack/ftrack_server/event_server_cli.py @@ -7,6 +7,7 @@ import socket import argparse import atexit import time +import uuid import ftrack_api from pype.ftrack.lib import credentials @@ -175,6 +176,7 @@ def main_loop(ftrack_url): otherwise thread will be killed. """ + os.environ["FTRACK_EVENT_SUB_ID"] = str(uuid.uuid1()) # Get mongo hostname and port for testing mongo connection mongo_list = ftrack_events_mongo_settings() mongo_hostname = mongo_list[0] @@ -202,6 +204,13 @@ def main_loop(ftrack_url): processor_last_failed = datetime.datetime.now() processor_failed_count = 0 + statuser_name = "StorerThread" + statuser_port = 10021 + statuser_path = "{}/sub_event_info.py".format(file_path) + statuser_thread = None + statuser_last_failed = datetime.datetime.now() + statuser_failed_count = 0 + ftrack_accessible = False mongo_accessible = False @@ -336,6 +345,43 @@ def main_loop(ftrack_url): processor_failed_count = 0 processor_last_failed = _processor_last_failed + if statuser_thread is None: + if statuser_failed_count < max_fail_count: + statuser_thread = socket_thread.SocketThread( + statuser_name, statuser_port, statuser_path + ) + statuser_thread.start() + + elif statuser_failed_count == max_fail_count: + print(( + "Statuser failed {}times in row" + " I'll try to run again {}s later" + ).format(str(max_fail_count), str(wait_time_after_max_fail))) + statuser_failed_count += 1 + + elif (( + datetime.datetime.now() - statuser_last_failed + ).seconds > wait_time_after_max_fail): + statuser_failed_count = 0 + + # If thread failed test Ftrack and Mongo connection + elif not statuser_thread.isAlive(): + statuser_thread.join() + statuser_thread = None + ftrack_accessible = False + mongo_accessible = False + + _processor_last_failed = datetime.datetime.now() + delta_time = ( + _processor_last_failed - statuser_last_failed + ).seconds + + if delta_time < min_fail_seconds: + statuser_failed_count += 1 + else: + statuser_failed_count = 0 + statuser_last_failed = _processor_last_failed + time.sleep(1) From fa60c87c3e0f9e9261dd9b9e5c8b4188c50e0b4f Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 18:28:29 +0100 Subject: [PATCH 176/434] created base EventHub that can set callbacks on heartbeat and set message for sockets on heartbeat --- pype/ftrack/ftrack_server/lib.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/pype/ftrack/ftrack_server/lib.py b/pype/ftrack/ftrack_server/lib.py index fefba580e0..2617b63614 100644 --- a/pype/ftrack/ftrack_server/lib.py +++ b/pype/ftrack/ftrack_server/lib.py @@ -123,20 +123,30 @@ def check_ftrack_url(url, log_errors=True): return url -class StorerEventHub(ftrack_api.event.hub.EventHub): +class SocketBaseEventHub(ftrack_api.event.hub.EventHub): + + hearbeat_msg = b"hearbeat" + heartbeat_callbacks = [] + def __init__(self, *args, **kwargs): self.sock = kwargs.pop("sock") - super(StorerEventHub, self).__init__(*args, **kwargs) + super(SocketBaseEventHub, self).__init__(*args, **kwargs) def _handle_packet(self, code, packet_identifier, path, data): """Override `_handle_packet` which extend heartbeat""" code_name = self._code_name_mapping[code] if code_name == "heartbeat": # Reply with heartbeat. - self.sock.sendall(b"storer") - return self._send_packet(self._code_name_mapping['heartbeat']) + for callback in self.heartbeat_callbacks: + callback() + + self.sock.sendall(self.hearbeat_msg) + return self._send_packet(self._code_name_mapping["heartbeat"]) + + return super(SocketBaseEventHub, self)._handle_packet( + code, packet_identifier, path, data + ) - elif code_name == "connect": event = ftrack_api.event.base.Event( topic="pype.storer.started", data={}, From 24022c583651f16d70b210e340472be523c447d8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 18:28:44 +0100 Subject: [PATCH 177/434] Status event hub implemented --- pype/ftrack/ftrack_server/lib.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pype/ftrack/ftrack_server/lib.py b/pype/ftrack/ftrack_server/lib.py index 2617b63614..71ce6861a4 100644 --- a/pype/ftrack/ftrack_server/lib.py +++ b/pype/ftrack/ftrack_server/lib.py @@ -147,6 +147,25 @@ class SocketBaseEventHub(ftrack_api.event.hub.EventHub): code, packet_identifier, path, data ) + +class StatusEventHub(SocketBaseEventHub): + def _handle_packet(self, code, packet_identifier, path, data): + """Override `_handle_packet` which extend heartbeat""" + code_name = self._code_name_mapping[code] + if code_name == "connect": + event = ftrack_api.event.base.Event( + topic="pype.status.started", + data={}, + source={ + "id": self.id, + "user": {"username": self._api_user} + } + ) + self._event_queue.put(event) + + return super(StatusEventHub, self)._handle_packet( + code, packet_identifier, path, data + ) event = ftrack_api.event.base.Event( topic="pype.storer.started", data={}, From a97c73258e349291ae8f0899f37ac7ec9a8c13b5 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 18:29:01 +0100 Subject: [PATCH 178/434] removed user event hub --- pype/ftrack/ftrack_server/lib.py | 29 -------------------- pype/ftrack/ftrack_server/sub_user_server.py | 4 +-- 2 files changed, 2 insertions(+), 31 deletions(-) diff --git a/pype/ftrack/ftrack_server/lib.py b/pype/ftrack/ftrack_server/lib.py index 71ce6861a4..57c5b7d5dc 100644 --- a/pype/ftrack/ftrack_server/lib.py +++ b/pype/ftrack/ftrack_server/lib.py @@ -296,35 +296,6 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub): return super()._handle_packet(code, packet_identifier, path, data) -class UserEventHub(ftrack_api.event.hub.EventHub): - def __init__(self, *args, **kwargs): - self.sock = kwargs.pop("sock") - super(UserEventHub, self).__init__(*args, **kwargs) - - def _handle_packet(self, code, packet_identifier, path, data): - """Override `_handle_packet` which extend heartbeat""" - code_name = self._code_name_mapping[code] - if code_name == "heartbeat": - # Reply with heartbeat. - self.sock.sendall(b"hearbeat") - return self._send_packet(self._code_name_mapping['heartbeat']) - - elif code_name == "connect": - event = ftrack_api.event.base.Event( - topic="pype.storer.started", - data={}, - source={ - "id": self.id, - "user": {"username": self._api_user} - } - ) - self._event_queue.put(event) - - return super(UserEventHub, self)._handle_packet( - code, packet_identifier, path, data - ) - - class SocketSession(ftrack_api.session.Session): '''An isolated session for interaction with an ftrack server.''' def __init__( diff --git a/pype/ftrack/ftrack_server/sub_user_server.py b/pype/ftrack/ftrack_server/sub_user_server.py index f0d39447a8..8c1497a562 100644 --- a/pype/ftrack/ftrack_server/sub_user_server.py +++ b/pype/ftrack/ftrack_server/sub_user_server.py @@ -5,7 +5,7 @@ import socket import traceback from ftrack_server import FtrackServer -from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub +from pype.ftrack.ftrack_server.lib import SocketSession, SocketBaseEventHub from pypeapp import Logger @@ -28,7 +28,7 @@ def main(args): try: session = SocketSession( - auto_connect_event_hub=True, sock=sock, Eventhub=UserEventHub + auto_connect_event_hub=True, sock=sock, Eventhub=SocketBaseEventHub ) server = FtrackServer("action") log.debug("Launched User Ftrack Server") From 526f9282d1e4136b44eab6e5505b1adf23e4af5b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 18:29:24 +0100 Subject: [PATCH 179/434] storer and processor eventhubs are modified --- pype/ftrack/ftrack_server/lib.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/pype/ftrack/ftrack_server/lib.py b/pype/ftrack/ftrack_server/lib.py index 57c5b7d5dc..478bede6ef 100644 --- a/pype/ftrack/ftrack_server/lib.py +++ b/pype/ftrack/ftrack_server/lib.py @@ -166,6 +166,16 @@ class StatusEventHub(SocketBaseEventHub): return super(StatusEventHub, self)._handle_packet( code, packet_identifier, path, data ) + + +class StorerEventHub(SocketBaseEventHub): + + hearbeat_msg = b"storer" + + def _handle_packet(self, code, packet_identifier, path, data): + """Override `_handle_packet` which extend heartbeat""" + code_name = self._code_name_mapping[code] + if code_name == "connect": event = ftrack_api.event.base.Event( topic="pype.storer.started", data={}, @@ -181,7 +191,9 @@ class StatusEventHub(SocketBaseEventHub): ) -class ProcessEventHub(ftrack_api.event.hub.EventHub): +class ProcessEventHub(SocketBaseEventHub): + + hearbeat_msg = b"processor" url, database, table_name = get_ftrack_event_mongo_info() is_table_created = False @@ -193,7 +205,6 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub): database_name=self.database, table_name=self.table_name ) - self.sock = kwargs.pop("sock") super(ProcessEventHub, self).__init__(*args, **kwargs) def prepare_dbcon(self): @@ -289,9 +300,6 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub): code_name = self._code_name_mapping[code] if code_name == "event": return - if code_name == "heartbeat": - self.sock.sendall(b"processor") - return self._send_packet(self._code_name_mapping["heartbeat"]) return super()._handle_packet(code, packet_identifier, path, data) From 4fd403bf54a167ea6d0621554b0a9b6768ca2bfb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 18:29:38 +0100 Subject: [PATCH 180/434] added constants with topics to lib --- pype/ftrack/ftrack_server/lib.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/ftrack/ftrack_server/lib.py b/pype/ftrack/ftrack_server/lib.py index 478bede6ef..e623cab8fb 100644 --- a/pype/ftrack/ftrack_server/lib.py +++ b/pype/ftrack/ftrack_server/lib.py @@ -28,6 +28,10 @@ from pypeapp import Logger from pype.ftrack.lib.custom_db_connector import DbConnector +TOPIC_STATUS_SERVER = "pype.event.server.status" +TOPIC_STATUS_SERVER_RESULT = "pype.event.server.status.result" + + def ftrack_events_mongo_settings(): host = None port = None From 37de60577809c2ace929f7dab880a95ddc0ed0c2 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 18:30:07 +0100 Subject: [PATCH 181/434] socket thread can use additional arguments to execute and -port arg was removed (not used) --- pype/ftrack/ftrack_server/socket_thread.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pype/ftrack/ftrack_server/socket_thread.py b/pype/ftrack/ftrack_server/socket_thread.py index 8e217870ba..cb073d83a0 100644 --- a/pype/ftrack/ftrack_server/socket_thread.py +++ b/pype/ftrack/ftrack_server/socket_thread.py @@ -12,13 +12,14 @@ class SocketThread(threading.Thread): MAX_TIMEOUT = 35 - def __init__(self, name, port, filepath): + def __init__(self, name, port, filepath, additional_args=[]): super(SocketThread, self).__init__() self.log = Logger().get_logger("SocketThread", "Event Thread") self.setName(name) self.name = name self.port = port self.filepath = filepath + self.additional_args = additional_args self.sock = None self.subproc = None self.connection = None @@ -53,7 +54,12 @@ class SocketThread(threading.Thread): ) self.subproc = subprocess.Popen( - [sys.executable, self.filepath, "-port", str(self.port)] + [ + sys.executable, + self.filepath, + *self.additional_args, + str(self.port) + ] ) # Listen for incoming connections From 05929f2b02929b9652411e4f0b53d324f3a67b76 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 18:31:24 +0100 Subject: [PATCH 182/434] status get suprocess data ony if they are missing (why to collect same data for each action launch) --- pype/ftrack/ftrack_server/sub_event_info.py | 426 +++++++++----------- 1 file changed, 197 insertions(+), 229 deletions(-) diff --git a/pype/ftrack/ftrack_server/sub_event_info.py b/pype/ftrack/ftrack_server/sub_event_info.py index d63b6acadd..5a38c992f5 100644 --- a/pype/ftrack/ftrack_server/sub_event_info.py +++ b/pype/ftrack/ftrack_server/sub_event_info.py @@ -1,137 +1,189 @@ import os import sys -import copy +import json import signal import socket -import uuid -from datetime import datetime +import datetime import ftrack_api from ftrack_server import FtrackServer from pype.ftrack.ftrack_server.lib import ( - SocketSession, SocketBaseEventHub, + SocketSession, StatusEventHub, TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT ) from pypeapp import Logger log = Logger().get_logger("Event storer") -log.info(os.environ.get("FTRACK_EVENT_SUB_ID")) - - -class ObjectFactory: - session = None - sock = None - subprocess_id = os.environ["FTRACK_EVENT_SUB_ID"] - status_factory = None - - -def trigger_status_info(status_id=None, status=None): - if not status and not status_id: - log.warning( - "`status_id` or `status` must be specified to trigger action." - ) - return - - if not status: - status = ObjectFactory.status_factory[status_id] - - if not status: - return - - new_event_data = copy.deepcopy(action_data) - new_event_data.update({ - "selection": [] - }) - new_event_data["subprocess_id"] = ObjectFactory.subprocess_id - new_event_data["status_id"] = status.id - - new_event = ftrack_api.event.base.Event( - topic="ftrack.action.launch", - data=new_event_data, - source=status.source - ) - ObjectFactory.session.event_hub.publish(new_event) - - action_identifier = ( - "event.server.status" + ObjectFactory.subprocess_id + "event.server.status" + os.environ["FTRACK_EVENT_SUB_ID"] ) - -# TODO add IP adress to label -# TODO add icon action_data = { "label": "Pype Admin", - "variant": "Event server Status", + "variant": "- Event server Status", "description": "Get Infromation about event server", "actionIdentifier": action_identifier, "icon": None } +class ObjectFactory: + session = None + status_factory = None + + class Status: default_item = { "type": "label", - "value": "Information not allowed." + "value": "Process info is not available at this moment." } + + def __init__(self, name, label, parent): + self.name = name + self.label = label or name + self.parent = parent + + self.info = None + self.last_update = None + + def update(self, info): + self.last_update = datetime.datetime.now() + self.info = info + + def get_delta_string(self, delta): + days, hours, minutes = ( + delta.days, delta.seconds // 3600, delta.seconds // 60 % 60 + ) + delta_items = [ + "{}d".format(days), + "{}h".format(hours), + "{}m".format(minutes) + ] + if not days: + delta_items.pop(0) + if not hours: + delta_items.pop(0) + delta_items.append("{}s".format(delta.seconds % 60)) + if not minutes: + delta_items.pop(0) + + return " ".join(delta_items) + + def get_items(self): + items = [] + last_update = "N/A" + if self.last_update: + delta = datetime.datetime.now() - self.last_update + last_update = "{} ago".format( + self.get_delta_string(delta) + ) + + last_update = "Updated: {}".format(last_update) + items.append({ + "type": "label", + "value": "#{}".format(self.label) + }) + items.append({ + "type": "label", + "value": "##{}".format(last_update) + }) + + if not self.info: + if self.info is None: + trigger_info_get() + items.append(self.default_item) + return items + + info = {} + for key, value in self.info.items(): + if key not in ["created_at:", "created_at"]: + info[key] = value + continue + + datetime_value = datetime.datetime.strptime( + value, "%Y.%m.%d %H:%M:%S" + ) + delta = datetime.datetime.now() - datetime_value + + running_for = self.get_delta_string(delta) + info["Started at"] = "{} [running: {}]".format(value, running_for) + + for key, value in info.items(): + items.append({ + "type": "label", + "value": "{}: {}".format(key, value) + }) + + return items + + +class StatusFactory: + note_item = { "type": "label", - "value": "Hit `submit` to refresh data." + "value": ( + "NOTE: Hit `submit` and uncheck all" + " checkers to refresh data." + ) } splitter_item = { "type": "label", "value": "---" } - def __init__(self, source_info, parent): - self.id = str(uuid.uuid1()) - self.created = datetime.now() - self.parent = parent + def __init__(self, statuses={}): + self.statuses = [] + for status in statuses.items(): + self.create_status(*status) - self.source = source_info + def __getitem__(self, key): + return self.get(key) - self.main_process = None - self.storer = None - self.processor = None + def get(self, key, default=None): + for status in self.statuses: + if status.name == key: + return status + return default - def add_result(self, source, data): - if source.lower() == "storer": - self.storer = data - - elif source.lower() == "processor": - self.processor = data - - else: - self.main_process = data - - def filled(self): - # WARNING DEBUG PART!!!! + def is_filled(self): + for status in self.statuses: + if status.info is None: + return False return True - return ( - self.main_process is not None and - self.storer is not None and - self.processor is not None - ) - def get_items_from_dict(self, in_dict): - items = [] - for key, value in in_dict.items(): - items.append({ - "type": "label", - "value": "##{}".format(key) - }) - items.append({ - "type": "label", - "value": value - }) - return items + def create_status(self, name, label): + new_status = Status(name, label, self) + self.statuses.append(new_status) + + def process_event_result(self, event): + subprocess_id = event["data"].get("subprocess_id") + if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]: + return + + source = event["data"]["source"] + data = event["data"]["status_info"] + for status in self.statuses: + if status.name == source: + status.update(data) + break def bool_items(self): items = [] - name_labels = { - "shutdown_main": "Shutdown main process", - "reset_storer": "Reset storer", - "reset_processor": "Reset processor" - } + items.append({ + "type": "label", + "value": "#Restart process" + }) + items.append({ + "type": "label", + "value": ( + "WARNING: Main process may not restart" + " if does not run as a service!" + ) + }) + + name_labels = {} + for status in self.statuses: + name_labels[status.name] = status.label + for name, label in name_labels.items(): items.append({ "type": "boolean", @@ -144,75 +196,14 @@ class Status: def items(self): items = [] items.append(self.note_item) - - items.append({"type": "label", "value": "Main process"}) - if not self.main_process: - items.append(self.default_item) - else: - items.extend( - self.get_items_from_dict(self.main_process) - ) - - items.append(self.splitter_item) - items.append({"type": "label", "value": "Storer process"}) - if not self.storer: - items.append(self.default_item) - else: - items.extend( - self.get_items_from_dict(self.storer) - ) - - items.append(self.splitter_item) - items.append({"type": "label", "value": "Processor process"}) - if not self.processor: - items.append(self.default_item) - else: - items.extend( - self.get_items_from_dict(self.processor) - ) - - items.append(self.splitter_item) items.extend(self.bool_items()) + for status in self.statuses: + items.append(self.splitter_item) + items.extend(status.get_items()) + return items - @property - def is_overtime(self): - time_delta = (datetime.now() - self.created).total_seconds() - return time_delta >= self.parent.max_delta_seconds - - -class StatusFactory: - max_delta_seconds = 30 - - def __init__(self): - self.statuses = {} - - def __getitem__(self, key): - return self.statuses.get(key) - - def create_status(self, source_info): - new_status = Status(source_info, self) - self.statuses[new_status.id] = new_status - return new_status - - def process_result(self, event): - subprocess_id = event["data"].get("subprocess_id") - if subprocess_id != ObjectFactory.subprocess_id: - return - - status_id = event["data"].get("status_id") - status = self.statuses[status_id] - if not status: - return - - source = event["data"]["source"] - data = event["data"]["status_info"] - - status.add_result(source, data) - if status.filled(): - trigger_status_info(status=status) - def server_activity_validate_user(event): """Validate user permissions to show server info.""" @@ -247,38 +238,6 @@ def server_activity_discover(event): return {"items": [action_data]} -def handle_filled_event(event): - subprocess_id = event["data"].get("subprocess_id") - if subprocess_id != ObjectFactory.subprocess_id: - return None - - status_id = event["data"].get("status_id") - status = ObjectFactory.status_factory[status_id] - if not status: - return None - - values = event.get("values") - if values: - log.info(values) - - title = "Event server - Status" - - event_data = copy.deepcopy(event["data"]) - event_data.update({ - "type": "widget", - "items": status.items(), - "title": title - }) - - ObjectFactory.session.event_hub.publish( - ftrack_api.event.base.Event( - topic="ftrack.action.trigger-user-interface", - data=event_data - ), - on_error='ignore' - ) - - def server_activity(event): session = ObjectFactory.session if session is None: @@ -289,35 +248,47 @@ def server_activity(event): "message": msg } - valid = server_activity_validate_user(event) - if not valid: + if not server_activity_validate_user(event): return { "success": False, "message": "You don't have permissions to see Event server status!" } - subprocess_id = event["data"].get("subprocess_id") - if subprocess_id is not None: - return handle_filled_event(event) + values = event["data"].get("values") or {} + is_checked = False + for value in values.values(): + if value: + is_checked = True + break - status = ObjectFactory.status_factory.create_status(event["source"]) + if not is_checked: + return { + "items": ObjectFactory.status_factory.items(), + "title": "Server current status" + } - event_data = { - "status_id": status.id, - "subprocess_id": ObjectFactory.subprocess_id - } + +def trigger_info_get(): + session = ObjectFactory.session session.event_hub.publish( ftrack_api.event.base.Event( topic=TOPIC_STATUS_SERVER, - data=event_data + data={"subprocess_id": os.environ["FTRACK_EVENT_SUB_ID"]} ), on_error="ignore" ) - return { - "success": True, - "message": "Collecting information (this may take > 20s)" - } + +def on_start(event): + session = ObjectFactory.session + source_id = event.get("source", {}).get("id") + if not source_id or source_id != session.event_hub.id: + return + + if session is None: + log.warning("Session is not set. Can't trigger Sync to avalon action.") + return True + trigger_info_get() def register(session): @@ -326,6 +297,7 @@ def register(session): "topic=ftrack.action.discover", server_activity_discover ) + session.event_hub.subscribe("topic=pype.status.started", on_start) status_launch_subscription = ( "topic=ftrack.action.launch and data.actionIdentifier={}" @@ -338,34 +310,51 @@ def register(session): session.event_hub.subscribe( "topic={}".format(TOPIC_STATUS_SERVER_RESULT), - ObjectFactory.status_factory.process_result + ObjectFactory.status_factory.process_event_result ) +def heartbeat(): + if ObjectFactory.status_factory.is_filled(): + return + + trigger_info_get() + + def main(args): port = int(args[-1]) + server_info = json.loads(args[-2]) # Create a TCP/IP socket sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # Connect the socket to the port where the server is listening server_address = ("localhost", port) - log.debug("Storer connected to {} port {}".format(*server_address)) + log.debug("Statuser connected to {} port {}".format(*server_address)) sock.connect(server_address) sock.sendall(b"CreatedStatus") # store socket connection object ObjectFactory.sock = sock - ObjectFactory.status_factory = StatusFactory() + statuse_names = { + "main": "Main process", + "storer": "Storer", + "processor": "Processor" + } + + ObjectFactory.status_factory = StatusFactory(statuse_names) + ObjectFactory.status_factory["main"].update(server_info) _returncode = 0 try: session = SocketSession( - auto_connect_event_hub=True, sock=sock, Eventhub=SocketBaseEventHub + auto_connect_event_hub=True, sock=sock, Eventhub=StatusEventHub ) ObjectFactory.session = session + session.event_hub.heartbeat_callbacks.append(heartbeat) register(session) server = FtrackServer("event") - log.debug("Launched Ftrack Event storer") + log.debug("Launched Ftrack Event statuser") + server.run_server(session, load_files=False) except Exception: @@ -388,24 +377,3 @@ if __name__ == "__main__": signal.signal(signal.SIGTERM, signal_handler) sys.exit(main(sys.argv)) - - -example_action_event = { - 'data': { - 'selection': [], - 'description': 'Test action2', - 'variant': None, - 'label': 'Test action2', - 'actionIdentifier': 'test.action2.3ceffe5e9acf40f8aa80603adebd0d06', - 'values': {}, - 'icon': None, - }, - 'topic': 'ftrack.action.launch', - 'sent': None, - 'source': { - 'id': 'eb67d186301c4cbbab73c1aee9b7c55d', - 'user': {'username': 'jakub.trllo', 'id': '2a8ae090-cbd3-11e8-a87a-0a580aa00121'} - }, - 'target': '', - 'in_reply_to_event': None -} From 1b1a78cb6ed79be18fcf89bd340c4e09528fda56 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 18:31:47 +0100 Subject: [PATCH 183/434] processor suprocess can send status information on ask event --- .../ftrack_server/sub_event_processor.py | 51 ++++++++++++++++++- 1 file changed, 50 insertions(+), 1 deletion(-) diff --git a/pype/ftrack/ftrack_server/sub_event_processor.py b/pype/ftrack/ftrack_server/sub_event_processor.py index 9c971ca916..2a3ad3e76d 100644 --- a/pype/ftrack/ftrack_server/sub_event_processor.py +++ b/pype/ftrack/ftrack_server/sub_event_processor.py @@ -1,13 +1,59 @@ +import os import sys import signal import socket +import datetime from ftrack_server import FtrackServer -from pype.ftrack.ftrack_server.lib import SocketSession, ProcessEventHub +from pype.ftrack.ftrack_server.lib import ( + SocketSession, ProcessEventHub, TOPIC_STATUS_SERVER +) +import ftrack_api from pypeapp import Logger log = Logger().get_logger("Event processor") +subprocess_started = datetime.datetime.now() + + +class SessionFactory: + session = None + + +def send_status(event): + subprocess_id = event["data"].get("subprocess_id") + if not subprocess_id: + return + + if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]: + return + + session = SessionFactory.session + if not session: + return + + new_event_data = { + "subprocess_id": subprocess_id, + "source": "processor", + "status_info": { + "created_at": subprocess_started.strftime("%Y.%m.%d %H:%M:%S") + } + } + + new_event = ftrack_api.event.base.Event( + topic="pype.event.server.status.result", + data=new_event_data + ) + + session.event_hub.publish(new_event) + + +def register(session): + '''Registers the event, subscribing the discover and launch topics.''' + session.event_hub.subscribe( + "topic={}".format(TOPIC_STATUS_SERVER), send_status + ) + def main(args): port = int(args[-1]) @@ -24,6 +70,9 @@ def main(args): session = SocketSession( auto_connect_event_hub=True, sock=sock, Eventhub=ProcessEventHub ) + register(session) + SessionFactory.session = session + server = FtrackServer("event") log.debug("Launched Ftrack Event processor") server.run_server(session) From 2ff7b87956651c3343d195b56f0f871aaa4afee1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 18:32:02 +0100 Subject: [PATCH 184/434] storer can send status information on ask --- pype/ftrack/ftrack_server/sub_event_storer.py | 36 +++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/pype/ftrack/ftrack_server/sub_event_storer.py b/pype/ftrack/ftrack_server/sub_event_storer.py index dfe8e21654..b4b9b8a7ab 100644 --- a/pype/ftrack/ftrack_server/sub_event_storer.py +++ b/pype/ftrack/ftrack_server/sub_event_storer.py @@ -8,14 +8,15 @@ import pymongo import ftrack_api from ftrack_server import FtrackServer from pype.ftrack.ftrack_server.lib import ( + SocketSession, StorerEventHub, get_ftrack_event_mongo_info, - SocketSession, - StorerEventHub + TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT ) from pype.ftrack.lib.custom_db_connector import DbConnector from pypeapp import Logger log = Logger().get_logger("Event storer") +subprocess_started = datetime.datetime.now() class SessionFactory: @@ -138,11 +139,42 @@ def trigger_sync(event): ) +def send_status(event): + session = SessionFactory.session + if not session: + return + + subprocess_id = event["data"].get("subprocess_id") + if not subprocess_id: + return + + if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]: + return + + new_event_data = { + "subprocess_id": os.environ["FTRACK_EVENT_SUB_ID"], + "source": "storer", + "status_info": { + "created_at": subprocess_started.strftime("%Y.%m.%d %H:%M:%S") + } + } + + new_event = ftrack_api.event.base.Event( + topic=TOPIC_STATUS_SERVER_RESULT, + data=new_event_data + ) + + session.event_hub.publish(new_event) + + def register(session): '''Registers the event, subscribing the discover and launch topics.''' install_db() session.event_hub.subscribe("topic=*", launch) session.event_hub.subscribe("topic=pype.storer.started", trigger_sync) + session.event_hub.subscribe( + "topic={}".format(TOPIC_STATUS_SERVER), send_status + ) def main(args): From 5433daf7b065eb7c16720009170b3400a5ee0fd5 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 18:32:40 +0100 Subject: [PATCH 185/434] event server cli sent his infomation on status subprocess startup --- pype/ftrack/ftrack_server/event_server_cli.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/pype/ftrack/ftrack_server/event_server_cli.py b/pype/ftrack/ftrack_server/event_server_cli.py index b2c540e993..2dadb5da25 100644 --- a/pype/ftrack/ftrack_server/event_server_cli.py +++ b/pype/ftrack/ftrack_server/event_server_cli.py @@ -4,7 +4,10 @@ import signal import datetime import subprocess import socket +import json +import platform import argparse +import getpass import atexit import time import uuid @@ -233,6 +236,16 @@ def main_loop(ftrack_url): atexit.register( on_exit, processor_thread=processor_thread, storer_thread=storer_thread ) + + system_name, pc_name = platform.uname()[:2] + host_name = socket.gethostname() + main_info = { + "created_at": datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S"), + "Username": getpass.getuser(), + "Host Name": host_name, + "Host IP": socket.gethostbyname(host_name) + } + main_info_str = json.dumps(main_info) # Main loop while True: # Check if accessible Ftrack and Mongo url @@ -270,6 +283,7 @@ def main_loop(ftrack_url): printed_ftrack_error = False printed_mongo_error = False + # ====== STORER ======= # Run backup thread which does not requeire mongo to work if storer_thread is None: if storer_failed_count < max_fail_count: @@ -304,6 +318,7 @@ def main_loop(ftrack_url): storer_failed_count = 0 storer_last_failed = _storer_last_failed + # ====== PROCESSOR ======= if processor_thread is None: if processor_failed_count < max_fail_count: processor_thread = socket_thread.SocketThread( @@ -345,10 +360,12 @@ def main_loop(ftrack_url): processor_failed_count = 0 processor_last_failed = _processor_last_failed + # ====== STATUSER ======= if statuser_thread is None: if statuser_failed_count < max_fail_count: statuser_thread = socket_thread.SocketThread( - statuser_name, statuser_port, statuser_path + statuser_name, statuser_port, statuser_path, + [main_info_str] ) statuser_thread.start() From 2f85cdf0be4ed0b54481013ebc57c201dad9f444 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 7 Feb 2020 19:53:16 +0100 Subject: [PATCH 186/434] restarting is working, need to add comunication between main proces and status process --- pype/ftrack/ftrack_server/event_server_cli.py | 99 +++++++++++-------- pype/ftrack/ftrack_server/socket_thread.py | 56 ++++++++++- pype/ftrack/ftrack_server/sub_event_info.py | 35 +++++++ 3 files changed, 148 insertions(+), 42 deletions(-) diff --git a/pype/ftrack/ftrack_server/event_server_cli.py b/pype/ftrack/ftrack_server/event_server_cli.py index 2dadb5da25..19e889f77d 100644 --- a/pype/ftrack/ftrack_server/event_server_cli.py +++ b/pype/ftrack/ftrack_server/event_server_cli.py @@ -222,7 +222,7 @@ def main_loop(ftrack_url): # stop threads on exit # TODO check if works and args have thread objects! - def on_exit(processor_thread, storer_thread): + def on_exit(processor_thread, storer_thread, statuser_thread): if processor_thread is not None: processor_thread.stop() processor_thread.join() @@ -233,8 +233,16 @@ def main_loop(ftrack_url): storer_thread.join() storer_thread = None + if statuser_thread is not None: + statuser_thread.stop() + statuser_thread.join() + statuser_thread = None + atexit.register( - on_exit, processor_thread=processor_thread, storer_thread=storer_thread + on_exit, + processor_thread=processor_thread, + storer_thread=storer_thread, + statuser_thread=statuser_thread ) system_name, pc_name = platform.uname()[:2] @@ -283,6 +291,51 @@ def main_loop(ftrack_url): printed_ftrack_error = False printed_mongo_error = False + # ====== STATUSER ======= + if statuser_thread is None: + if statuser_failed_count < max_fail_count: + statuser_thread = socket_thread.StatusSocketThread( + statuser_name, statuser_port, statuser_path, + [main_info_str] + ) + statuser_thread.start() + + elif statuser_failed_count == max_fail_count: + print(( + "Statuser failed {}times in row" + " I'll try to run again {}s later" + ).format(str(max_fail_count), str(wait_time_after_max_fail))) + statuser_failed_count += 1 + + elif (( + datetime.datetime.now() - statuser_last_failed + ).seconds > wait_time_after_max_fail): + statuser_failed_count = 0 + + # If thread failed test Ftrack and Mongo connection + elif not statuser_thread.isAlive(): + statuser_thread.join() + statuser_thread = None + ftrack_accessible = False + mongo_accessible = False + + _processor_last_failed = datetime.datetime.now() + delta_time = ( + _processor_last_failed - statuser_last_failed + ).seconds + + if delta_time < min_fail_seconds: + statuser_failed_count += 1 + else: + statuser_failed_count = 0 + statuser_last_failed = _processor_last_failed + + elif statuser_thread.stop_subprocess: + print("Main process was stopped by action") + on_exit(processor_thread, storer_thread, statuser_thread) + os.kill(os.getpid(), signal.SIGTERM) + return 1 + # ====== STORER ======= # Run backup thread which does not requeire mongo to work if storer_thread is None: @@ -291,6 +344,7 @@ def main_loop(ftrack_url): storer_name, storer_port, storer_path ) storer_thread.start() + elif storer_failed_count == max_fail_count: print(( "Storer failed {}times I'll try to run again {}s later" @@ -360,44 +414,9 @@ def main_loop(ftrack_url): processor_failed_count = 0 processor_last_failed = _processor_last_failed - # ====== STATUSER ======= - if statuser_thread is None: - if statuser_failed_count < max_fail_count: - statuser_thread = socket_thread.SocketThread( - statuser_name, statuser_port, statuser_path, - [main_info_str] - ) - statuser_thread.start() - - elif statuser_failed_count == max_fail_count: - print(( - "Statuser failed {}times in row" - " I'll try to run again {}s later" - ).format(str(max_fail_count), str(wait_time_after_max_fail))) - statuser_failed_count += 1 - - elif (( - datetime.datetime.now() - statuser_last_failed - ).seconds > wait_time_after_max_fail): - statuser_failed_count = 0 - - # If thread failed test Ftrack and Mongo connection - elif not statuser_thread.isAlive(): - statuser_thread.join() - statuser_thread = None - ftrack_accessible = False - mongo_accessible = False - - _processor_last_failed = datetime.datetime.now() - delta_time = ( - _processor_last_failed - statuser_last_failed - ).seconds - - if delta_time < min_fail_seconds: - statuser_failed_count += 1 - else: - statuser_failed_count = 0 - statuser_last_failed = _processor_last_failed + if statuser_thread is not None: + statuser_thread.set_process("storer", storer_thread) + statuser_thread.set_process("processor", processor_thread) time.sleep(1) diff --git a/pype/ftrack/ftrack_server/socket_thread.py b/pype/ftrack/ftrack_server/socket_thread.py index cb073d83a0..cbe4f9dd8b 100644 --- a/pype/ftrack/ftrack_server/socket_thread.py +++ b/pype/ftrack/ftrack_server/socket_thread.py @@ -3,6 +3,7 @@ import sys import time import socket import threading +import traceback import subprocess from pypeapp import Logger @@ -14,12 +15,13 @@ class SocketThread(threading.Thread): def __init__(self, name, port, filepath, additional_args=[]): super(SocketThread, self).__init__() - self.log = Logger().get_logger("SocketThread", "Event Thread") + self.log = Logger().get_logger(self.__class__.__name__) self.setName(name) self.name = name self.port = port self.filepath = filepath self.additional_args = additional_args + self.sock = None self.subproc = None self.connection = None @@ -59,7 +61,8 @@ class SocketThread(threading.Thread): self.filepath, *self.additional_args, str(self.port) - ] + ], + stdin=subprocess.PIPE ) # Listen for incoming connections @@ -133,3 +136,52 @@ class SocketThread(threading.Thread): if data == b"MongoError": self.mongo_error = True connection.sendall(data) + + +class StatusSocketThread(SocketThread): + process_name_mapping = { + b"RestartS": "storer", + b"RestartP": "processor", + b"RestartM": "main" + } + + def __init__(self, *args, **kwargs): + self.process_threads = {} + self.stop_subprocess = False + super(StatusSocketThread, self).__init__(*args, **kwargs) + + def set_process(self, process_name, thread): + try: + if not self.subproc: + self.process_threads[process_name] = None + return + + if ( + process_name in self.process_threads and + self.process_threads[process_name] == thread + ): + return + + self.process_threads[process_name] = thread + self.subproc.stdin.write( + str.encode("reset:{}".format(process_name)) + ) + self.subproc.stdin.flush() + + except Exception: + print("Could not set thread in StatusSocketThread") + traceback.print_exception(*sys.exc_info()) + + def _handle_data(self, connection, data): + if not data: + return + + process_name = self.process_name_mapping.get(data) + if process_name: + if process_name == "main": + self.stop_subprocess = True + else: + subp = self.process_threads.get(process_name) + if subp: + subp.stop() + connection.sendall(data) diff --git a/pype/ftrack/ftrack_server/sub_event_info.py b/pype/ftrack/ftrack_server/sub_event_info.py index 5a38c992f5..a0c2564e10 100644 --- a/pype/ftrack/ftrack_server/sub_event_info.py +++ b/pype/ftrack/ftrack_server/sub_event_info.py @@ -1,6 +1,8 @@ import os import sys import json +import time +import threading import signal import socket import datetime @@ -29,6 +31,7 @@ action_data = { class ObjectFactory: session = None status_factory = None + checker_thread = None class Status: @@ -267,6 +270,17 @@ def server_activity(event): "title": "Server current status" } + session = ObjectFactory.session + if values["main"]: + session.event_hub.sock.sendall(b"RestartM") + return + + if values["storer"]: + session.event_hub.sock.sendall(b"RestartS") + + if values["processor"]: + session.event_hub.sock.sendall(b"RestartP") + def trigger_info_get(): session = ObjectFactory.session @@ -367,13 +381,34 @@ def main(args): return _returncode +class OutputChecker(threading.Thread): + read_input = True + + def run(self): + while self.read_input: + line = sys.stdin.readlines() + log.info(str(line)) + # for line in sys.stdin.readlines(): + # log.info(str(line)) + log.info("alive-end") + time.sleep(0.5) + + def stop(self): + self.read_input = False + + if __name__ == "__main__": # Register interupt signal def signal_handler(sig, frame): print("You pressed Ctrl+C. Process ended.") + ObjectFactory.checker_thread.stop() sys.exit(0) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) + checker_thread = OutputChecker() + ObjectFactory.checker_thread = checker_thread + checker_thread.start() + sys.exit(main(sys.argv)) From 3e6ce6c1644fbdd63deece2bb756b4705ab39f58 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 7 Feb 2020 20:48:38 +0100 Subject: [PATCH 187/434] feat(nuke): setting colorspace to write and Reads from presets --- pype/nuke/lib.py | 101 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 98 insertions(+), 3 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 6d6e7de1b2..a7f1b64eec 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -645,15 +645,105 @@ class WorkfileSettings(object): write_dict (dict): nuke write node as dictionary ''' - # TODO: complete this function so any write node in # scene will have fixed colorspace following presets for the project if not isinstance(write_dict, dict): msg = "set_root_colorspace(): argument should be dictionary" - nuke.message(msg) log.error(msg) return - log.debug("__ set_writes_colorspace(): {}".format(write_dict)) + from avalon.nuke import get_avalon_knob_data + + for node in nuke.allNodes(): + + if node.Class() in ["Viewer", "Dot"]: + continue + + # get data from avalon knob + avalon_knob_data = get_avalon_knob_data(node, ["avalon:", "ak:"]) + + if not avalon_knob_data: + continue + + if avalon_knob_data["id"] != "pyblish.avalon.instance": + continue + + # establish families + families = [avalon_knob_data["family"]] + if avalon_knob_data.get("families"): + families.append(avalon_knob_data.get("families")) + + # except disabled nodes but exclude backdrops in test + for fmly, knob in write_dict.items(): + write = None + if (fmly in families): + # Add all nodes in group instances. + if node.Class() == "Group": + node.begin() + for x in nuke.allNodes(): + if x.Class() == "Write": + write = x + node.end() + elif node.Class() == "Write": + write = node + else: + log.warning("Wrong write node Class") + + write["colorspace"].setValue(str(knob["colorspace"])) + log.info( + "Setting `{0}` to `{1}`".format( + write.name(), + knob["colorspace"])) + + def set_reads_colorspace(self, reads): + """ Setting colorspace to Read nodes + + Looping trought all read nodes and tries to set colorspace based on regex rules in presets + """ + changes = dict() + for n in nuke.allNodes(): + file = nuke.filename(n) + if not n.Class() == "Read": + continue + + # load nuke presets for Read's colorspace + read_clrs_presets = get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + log.debug(preset_clrsp) + if preset_clrsp is not None: + current = n["colorspace"].value() + future = str(preset_clrsp) + if current != future: + changes.update({ + n.name(): { + "from": current, + "to": future + } + }) + log.debug(changes) + if changes: + msg = "Read nodes are not set to correct colospace:\n\n" + for nname, knobs in changes.items(): + msg += str(" - node: '{0}' is now '{1}' " + "but should be '{2}'\n").format( + nname, knobs["from"], knobs["to"] + ) + + msg += "\nWould you like to change it?" + + if nuke.ask(msg): + for nname, knobs in changes.items(): + n = nuke.toNode(nname) + n["colorspace"].setValue(knobs["to"]) + log.info( + "Setting `{0}` to `{1}`".format( + nname, + knobs["to"])) def set_colorspace(self): ''' Setting colorpace following presets @@ -671,6 +761,7 @@ class WorkfileSettings(object): msg = "set_colorspace(): missing `viewer` settings in template" nuke.message(msg) log.error(msg) + try: self.set_writes_colorspace(nuke_colorspace["write"]) except AttributeError: @@ -678,6 +769,10 @@ class WorkfileSettings(object): nuke.message(msg) log.error(msg) + reads = nuke_colorspace.get("read") + if reads: + self.set_reads_colorspace(reads) + try: for key in nuke_colorspace: log.debug("Preset's colorspace key: {}".format(key)) From 1a84b605a162ab381a459e9421b1d0e3e32677ab Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 7 Feb 2020 20:49:07 +0100 Subject: [PATCH 188/434] fix(nuke, nks): removing `handles` obsolete --- pype/plugins/nukestudio/publish/collect_clips.py | 1 - pype/plugins/nukestudio/publish/collect_effects.py | 12 +++++++++--- pype/plugins/nukestudio/publish/collect_handles.py | 4 ---- .../nukestudio/publish/collect_hierarchy_context.py | 1 - pype/plugins/nukestudio/publish/collect_plates.py | 1 - pype/plugins/nukestudio/publish/collect_reviews.py | 8 +++++--- .../nukestudio/publish/collect_tag_handles.py | 12 +++++++----- pype/plugins/nukestudio/publish/extract_effects.py | 11 +++++++---- 8 files changed, 28 insertions(+), 22 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 3759d50f6a..4525b4947f 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -105,7 +105,6 @@ class CollectClips(api.ContextPlugin): "asset": asset, "family": "clip", "families": [], - "handles": 0, "handleStart": projectdata.get("handleStart", 0), "handleEnd": projectdata.get("handleEnd", 0), "version": int(version)}) diff --git a/pype/plugins/nukestudio/publish/collect_effects.py b/pype/plugins/nukestudio/publish/collect_effects.py index 0aee0adf2e..55ff849c88 100644 --- a/pype/plugins/nukestudio/publish/collect_effects.py +++ b/pype/plugins/nukestudio/publish/collect_effects.py @@ -11,7 +11,9 @@ class CollectVideoTracksLuts(pyblish.api.InstancePlugin): def process(self, instance): - self.log.debug("Finding soft effect for subset: `{}`".format(instance.data.get("subset"))) + self.log.debug( + "Finding soft effect for subset: `{}`".format( + instance.data.get("subset"))) # taking active sequence subset = instance.data.get("subset") @@ -41,8 +43,12 @@ class CollectVideoTracksLuts(pyblish.api.InstancePlugin): if len(instance.data.get("effectTrackItems", {}).keys()) > 0: instance.data["families"] += ["lut"] - self.log.debug("effects.keys: {}".format(instance.data.get("effectTrackItems", {}).keys())) - self.log.debug("effects: {}".format(instance.data.get("effectTrackItems", {}))) + self.log.debug( + "effects.keys: {}".format( + instance.data.get("effectTrackItems", {}).keys())) + self.log.debug( + "effects: {}".format( + instance.data.get("effectTrackItems", {}))) def add_effect(self, instance, track_index, item): track = item.parentTrack().name() diff --git a/pype/plugins/nukestudio/publish/collect_handles.py b/pype/plugins/nukestudio/publish/collect_handles.py index 8da83e715b..28f502d846 100644 --- a/pype/plugins/nukestudio/publish/collect_handles.py +++ b/pype/plugins/nukestudio/publish/collect_handles.py @@ -24,7 +24,6 @@ class CollectClipHandles(api.ContextPlugin): continue # get handles - handles = int(instance.data["handles"]) handle_start = int(instance.data["handleStart"]) handle_end = int(instance.data["handleEnd"]) @@ -38,19 +37,16 @@ class CollectClipHandles(api.ContextPlugin): self.log.debug("Adding to shared assets: `{}`".format( instance.data["name"])) asset_shared.update({ - "handles": handles, "handleStart": handle_start, "handleEnd": handle_end }) - for instance in filtered_instances: if not instance.data.get("main") and not instance.data.get("handleTag"): self.log.debug("Synchronize handles on: `{}`".format( instance.data["name"])) name = instance.data["asset"] s_asset_data = assets_shared.get(name) - instance.data["handles"] = s_asset_data.get("handles", 0) instance.data["handleStart"] = s_asset_data.get( "handleStart", 0 ) diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 5f29837d80..5085b9719e 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -263,7 +263,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): # get custom attributes of the shot if instance.data.get("main"): in_info['custom_attributes'] = { - 'handles': int(instance.data.get('handles', 0)), "handleStart": handle_start, "handleEnd": handle_end, "frameStart": instance.data["frameStart"], diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index 70f0f7407e..b98eccce7f 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -134,7 +134,6 @@ class CollectPlatesData(api.InstancePlugin): # add to data of representation version_data.update({ - "handles": version_data['handleStart'], "colorspace": item.sourceMediaColourTransform(), "colorspaceScript": instance.context.data["colorspace"], "families": [f for f in families if 'ftrack' not in f], diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index f9032b2ca4..f223e5ca65 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -125,7 +125,7 @@ class CollectReviews(api.InstancePlugin): thumb_path, format='png' ) - + self.log.debug("__ sourceIn: `{}`".format(instance.data["sourceIn"])) self.log.debug("__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame)) @@ -145,7 +145,10 @@ class CollectReviews(api.InstancePlugin): item = instance.data["item"] transfer_data = [ - "handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track", "version" + "handleStart", "handleEnd", "sourceIn", "sourceOut", + "frameStart", "frameEnd", "sourceInH", "sourceOutH", + "clipIn", "clipOut", "clipInH", "clipOutH", "asset", + "track", "version" ] version_data = dict() @@ -154,7 +157,6 @@ class CollectReviews(api.InstancePlugin): # add to data of representation version_data.update({ - "handles": version_data['handleStart'], "colorspace": item.sourceMediaColourTransform(), "families": instance.data["families"], "subset": instance.data["subset"], diff --git a/pype/plugins/nukestudio/publish/collect_tag_handles.py b/pype/plugins/nukestudio/publish/collect_tag_handles.py index 929f5e3b68..a6a63faea9 100644 --- a/pype/plugins/nukestudio/publish/collect_tag_handles.py +++ b/pype/plugins/nukestudio/publish/collect_tag_handles.py @@ -38,7 +38,9 @@ class CollectClipTagHandles(api.ContextPlugin): # gets arguments if there are any t_args = t_metadata.get("tag.args", "") - assert t_args, self.log.error("Tag with Handles is missing Args. Use only handle start/end") + assert t_args, self.log.error( + "Tag with Handles is missing Args. " + "Use only handle start/end") t_args = json.loads(t_args.replace("'", "\"")) # add in start @@ -55,8 +57,8 @@ class CollectClipTagHandles(api.ContextPlugin): # adding handles to asset_shared on context if instance.data.get("handleEnd"): - assets_shared_a["handleEnd"] = instance.data["handleEnd"] + assets_shared_a[ + "handleEnd"] = instance.data["handleEnd"] if instance.data.get("handleStart"): - assets_shared_a["handleStart"] = instance.data["handleStart"] - if instance.data.get("handles"): - assets_shared_a["handles"] = instance.data["handles"] + assets_shared_a[ + "handleStart"] = instance.data["handleStart"] diff --git a/pype/plugins/nukestudio/publish/extract_effects.py b/pype/plugins/nukestudio/publish/extract_effects.py index 15d2a80a55..5e2721aa8e 100644 --- a/pype/plugins/nukestudio/publish/extract_effects.py +++ b/pype/plugins/nukestudio/publish/extract_effects.py @@ -6,6 +6,7 @@ import pyblish.api import tempfile from avalon import io, api + class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): """Collect video tracks effects into context.""" @@ -17,9 +18,12 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): item = instance.data["item"] effects = instance.data.get("effectTrackItems") - instance.data["families"] = [f for f in instance.data.get("families", []) if f not in ["lut"]] + instance.data["families"] = [f for f in instance.data.get( + "families", []) if f not in ["lut"]] - self.log.debug("___ instance.data[families]: `{}`".format(instance.data["families"])) + self.log.debug( + "__ instance.data[families]: `{}`".format( + instance.data["families"])) # skip any without effects if not effects: @@ -102,7 +106,6 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): # add to data of representation version_data.update({ - "handles": version_data['handleStart'], "colorspace": item.sourceMediaColourTransform(), "colorspaceScript": instance.context.data["colorspace"], "families": ["plate", "lut"], @@ -132,7 +135,7 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): def copy_linked_files(self, effect, dst_dir): for k, v in effect["node"].items(): - if k in "file" and v is not '': + if k in "file" and v != '': base_name = os.path.basename(v) dst = os.path.join(dst_dir, base_name).replace("\\", "/") From 67306bfb66f8505cbfe068bea7f11e9cbc9e36f3 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Sat, 8 Feb 2020 01:25:23 +0100 Subject: [PATCH 189/434] fixed validators to support multiple cameras, deleting obsolete collector --- .../maya/publish/collect_renderlayers.py | 202 ------------------ .../publish/validate_render_single_camera.py | 34 ++- .../maya/publish/validate_rendersettings.py | 71 ++++-- 3 files changed, 85 insertions(+), 222 deletions(-) delete mode 100644 pype/plugins/maya/publish/collect_renderlayers.py diff --git a/pype/plugins/maya/publish/collect_renderlayers.py b/pype/plugins/maya/publish/collect_renderlayers.py deleted file mode 100644 index 0012b28ac9..0000000000 --- a/pype/plugins/maya/publish/collect_renderlayers.py +++ /dev/null @@ -1,202 +0,0 @@ -from maya import cmds - -import pyblish.api - -from avalon import maya, api -import pype.maya.lib as lib - - -class CollectMayaRenderlayers(pyblish.api.ContextPlugin): - """Gather instances by active render layers""" - - order = pyblish.api.CollectorOrder + 0.01 - hosts = ["maya"] - label = "Render Layers" - active = False - - def process(self, context): - - asset = api.Session["AVALON_ASSET"] - filepath = context.data["currentFile"].replace("\\", "/") - - # Get render globals node - try: - render_globals = cmds.ls("renderglobalsMain")[0] - for instance in context: - self.log.debug(instance.name) - if instance.data['family'] == 'workfile': - instance.data['publish'] = True - except IndexError: - self.log.info("Skipping renderlayer collection, no " - "renderGlobalsDefault found..") - return - # Get all valid renderlayers - # This is how Maya populates the renderlayer display - rlm_attribute = "renderLayerManager.renderLayerId" - connected_layers = cmds.listConnections(rlm_attribute) or [] - valid_layers = set(connected_layers) - - # Get all renderlayers and check their state - renderlayers = [i for i in cmds.ls(type="renderLayer") if - cmds.getAttr("{}.renderable".format(i)) and not - cmds.referenceQuery(i, isNodeReferenced=True)] - - # Sort by displayOrder - def sort_by_display_order(layer): - return cmds.getAttr("%s.displayOrder" % layer) - - renderlayers = sorted(renderlayers, key=sort_by_display_order) - - for layer in renderlayers: - - # Check if layer is in valid (linked) layers - if layer not in valid_layers: - self.log.warning("%s is invalid, skipping" % layer) - continue - - if layer.endswith("defaultRenderLayer"): - continue - else: - # Remove Maya render setup prefix `rs_` - layername = layer.split("rs_", 1)[-1] - - # Get layer specific settings, might be overrides - data = { - "subset": layername, - "setMembers": layer, - "publish": True, - "frameStart": self.get_render_attribute("startFrame", - layer=layer), - "frameEnd": self.get_render_attribute("endFrame", - layer=layer), - "byFrameStep": self.get_render_attribute("byFrameStep", - layer=layer), - "renderer": self.get_render_attribute("currentRenderer", - layer=layer), - - # instance subset - "family": "Render Layers", - "families": ["renderlayer"], - "asset": asset, - "time": api.time(), - "author": context.data["user"], - - # Add source to allow tracing back to the scene from - # which was submitted originally - "source": filepath - } - - # Apply each user defined attribute as data - for attr in cmds.listAttr(layer, userDefined=True) or list(): - try: - value = cmds.getAttr("{}.{}".format(layer, attr)) - except Exception: - # Some attributes cannot be read directly, - # such as mesh and color attributes. These - # are considered non-essential to this - # particular publishing pipeline. - value = None - - data[attr] = value - - # Include (optional) global settings - # TODO(marcus): Take into account layer overrides - # Get global overrides and translate to Deadline values - overrides = self.parse_options(render_globals) - data.update(**overrides) - - # Define nice label - label = "{0} ({1})".format(layername, data["asset"]) - label += " [{0}-{1}]".format(int(data["frameStart"]), - int(data["frameEnd"])) - - instance = context.create_instance(layername) - instance.data["label"] = label - instance.data.update(data) - - def get_render_attribute(self, attr, layer): - return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr), - layer=layer) - - def parse_options(self, render_globals): - """Get all overrides with a value, skip those without - - Here's the kicker. These globals override defaults in the submission - integrator, but an empty value means no overriding is made. - Otherwise, Frames would override the default frames set under globals. - - Args: - render_globals (str): collection of render globals - - Returns: - dict: only overrides with values - """ - - attributes = maya.read(render_globals) - - options = {"renderGlobals": {}} - options["renderGlobals"]["Priority"] = attributes["priority"] - - # Check for specific pools - pool_a, pool_b = self._discover_pools(attributes) - options["renderGlobals"].update({"Pool": pool_a}) - if pool_b: - options["renderGlobals"].update({"SecondaryPool": pool_b}) - - legacy = attributes["useLegacyRenderLayers"] - options["renderGlobals"]["UseLegacyRenderLayers"] = legacy - - # Machine list - machine_list = attributes["machineList"] - if machine_list: - key = "Whitelist" if attributes["whitelist"] else "Blacklist" - options['renderGlobals'][key] = machine_list - - # Suspend publish job - state = "Suspended" if attributes["suspendPublishJob"] else "Active" - options["publishJobState"] = state - - chunksize = attributes.get("framesPerTask", 1) - options["renderGlobals"]["ChunkSize"] = chunksize - - # Override frames should be False if extendFrames is False. This is - # to ensure it doesn't go off doing crazy unpredictable things - override_frames = False - extend_frames = attributes.get("extendFrames", False) - if extend_frames: - override_frames = attributes.get("overrideExistingFrame", False) - - options["extendFrames"] = extend_frames - options["overrideExistingFrame"] = override_frames - - maya_render_plugin = "MayaBatch" - if not attributes.get("useMayaBatch", True): - maya_render_plugin = "MayaCmd" - - options["mayaRenderPlugin"] = maya_render_plugin - - return options - - def _discover_pools(self, attributes): - - pool_a = None - pool_b = None - - # Check for specific pools - pool_b = [] - if "primaryPool" in attributes: - pool_a = attributes["primaryPool"] - if "secondaryPool" in attributes: - pool_b = attributes["secondaryPool"] - - else: - # Backwards compatibility - pool_str = attributes.get("pools", None) - if pool_str: - pool_a, pool_b = pool_str.split(";") - - # Ensure empty entry token is caught - if pool_b == "-": - pool_b = None - - return pool_a, pool_b diff --git a/pype/plugins/maya/publish/validate_render_single_camera.py b/pype/plugins/maya/publish/validate_render_single_camera.py index b8561a69c9..51c5f64c86 100644 --- a/pype/plugins/maya/publish/validate_render_single_camera.py +++ b/pype/plugins/maya/publish/validate_render_single_camera.py @@ -1,17 +1,26 @@ +import re + import pyblish.api import pype.api import pype.maya.action +from maya import cmds + + +ImagePrefixes = { + 'mentalray': 'defaultRenderGlobals.imageFilePrefix', + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' +} + class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): - """Only one camera may be renderable in a layer. - - Currently the pipeline supports only a single camera per layer. - This is because when multiple cameras are rendered the output files - automatically get different names because the render token - is not in the output path. As such the output files conflict with how - our pipeline expects the output. + """Validate renderable camera count for layer and token. + Pipeline is supporting multiple renderable cameras per layer, but image + prefix must contain token. """ order = pype.api.ValidateContentsOrder @@ -21,6 +30,8 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): "vrayscene"] actions = [pype.maya.action.SelectInvalidAction] + R_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) + def process(self, instance): """Process all the cameras in the instance""" invalid = self.get_invalid(instance) @@ -31,8 +42,17 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): def get_invalid(cls, instance): cameras = instance.data.get("cameras", []) + renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower() + # handle various renderman names + if renderer.startswith('renderman'): + renderer = 'renderman' + file_prefix = cmds.getAttr(ImagePrefixes[renderer]) if len(cameras) > 1: + if re.search(cls.R_CAMERA_TOKEN, file_prefix): + # if there is token in prefix and we have more then + # 1 camera, all is ok. + return cls.log.error("Multiple renderable cameras found for %s: %s " % (instance.data["setMembers"], cameras)) return [instance.data["setMembers"]] + cameras diff --git a/pype/plugins/maya/publish/validate_rendersettings.py b/pype/plugins/maya/publish/validate_rendersettings.py index 7bf44710e2..b74199352a 100644 --- a/pype/plugins/maya/publish/validate_rendersettings.py +++ b/pype/plugins/maya/publish/validate_rendersettings.py @@ -1,4 +1,5 @@ import os +import re from maya import cmds, mel import pymel.core as pm @@ -11,9 +12,13 @@ import pype.maya.lib as lib class ValidateRenderSettings(pyblish.api.InstancePlugin): """Validates the global render settings - * File Name Prefix must be as followed: - * vray: maya/// - * default: maya///_ + * File Name Prefix must start with: `maya/` + all other token are customizable but sane values are: + + `maya///_` + + token is supported also, usefull for multiple renderable + cameras per render layer. * Frame Padding must be: * default: 4 @@ -35,16 +40,30 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): families = ["renderlayer"] actions = [pype.api.RepairAction] + ImagePrefixes = { + 'mentalray': 'defaultRenderGlobals.imageFilePrefix', + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' + } + + R_AOV_TOKEN = re.compile( + r'%a||', re.IGNORECASE) + R_LAYER_TOKEN = re.compile( + r'%l||', re.IGNORECASE) + R_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) + R_SCENE_TOKEN = re.compile(r'%s|', re.IGNORECASE) + DEFAULT_PADDING = 4 - RENDERER_PREFIX = {"vray": "maya///"} + VRAY_PREFIX = "maya///" DEFAULT_PREFIX = "maya///_" def process(self, instance): invalid = self.get_invalid(instance) - if invalid: - raise ValueError("Invalid render settings found for '%s'!" - % instance.name) + assert invalid is False, ("Invalid render settings " + "found for '{}'!".format(instance.name)) @classmethod def get_invalid(cls, instance): @@ -53,10 +72,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): renderer = instance.data['renderer'] layer = instance.data['setMembers'] + cameras = instance.data.get("cameras", []) # Get the node attributes for current renderer attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS['default']) - prefix = lib.get_attr_in_layer("{node}.{prefix}".format(**attrs), + prefix = lib.get_attr_in_layer(cls.ImagePrefixes[renderer], layer=layer) padding = lib.get_attr_in_layer("{node}.{padding}".format(**attrs), layer=layer) @@ -68,12 +88,37 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cls.log.error("Animation needs to be enabled. Use the same " "frame for start and end to render single frame") - fname_prefix = cls.get_prefix(renderer) - - if prefix != fname_prefix: + if not prefix.lower().startswith("maya/"): invalid = True - cls.log.error("Wrong file name prefix: %s (expected: %s)" - % (prefix, fname_prefix)) + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't start with: 'maya/'".format(prefix)) + + if not re.search(cls.R_LAYER_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' or " + "'' token".format(prefix)) + + if not re.search(cls.R_AOV_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' or " + "'' token".format(prefix)) + + if len(cameras) > 1: + if not re.search(cls.R_CAMERA_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' token".format(prefix)) + + if renderer == "vray": + if prefix.lower() != cls.VRAY_PREFIX.lower(): + cls.log.warning("warning: prefix differs from " + "recommended {}".format(cls.VRAY_PREFIX)) + else: + if prefix.lower() != cls.DEFAULT_PREFIX.lower(): + cls.log.warning("warning: prefix differs from " + "recommended {}".format(cls.DEFAULT_PREFIX)) if padding != cls.DEFAULT_PADDING: invalid = True From 10853e1ade753801109009d0497b389533419316 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Sat, 8 Feb 2020 11:26:43 +0100 Subject: [PATCH 190/434] process information are refreshed by main process now --- pype/ftrack/ftrack_server/socket_thread.py | 2 +- pype/ftrack/ftrack_server/sub_event_info.py | 39 ++++++++++++++------- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/pype/ftrack/ftrack_server/socket_thread.py b/pype/ftrack/ftrack_server/socket_thread.py index cbe4f9dd8b..942965f9e2 100644 --- a/pype/ftrack/ftrack_server/socket_thread.py +++ b/pype/ftrack/ftrack_server/socket_thread.py @@ -164,7 +164,7 @@ class StatusSocketThread(SocketThread): self.process_threads[process_name] = thread self.subproc.stdin.write( - str.encode("reset:{}".format(process_name)) + str.encode("reset:{}\r\n".format(process_name)) ) self.subproc.stdin.flush() diff --git a/pype/ftrack/ftrack_server/sub_event_info.py b/pype/ftrack/ftrack_server/sub_event_info.py index a0c2564e10..4c94513eae 100644 --- a/pype/ftrack/ftrack_server/sub_event_info.py +++ b/pype/ftrack/ftrack_server/sub_event_info.py @@ -32,6 +32,7 @@ class ObjectFactory: session = None status_factory = None checker_thread = None + last_trigger = None class Status: @@ -124,8 +125,8 @@ class StatusFactory: note_item = { "type": "label", "value": ( - "NOTE: Hit `submit` and uncheck all" - " checkers to refresh data." + "HINT: To refresh data uncheck" + " all checkboxes and hit `Submit` button." ) } splitter_item = { @@ -164,9 +165,13 @@ class StatusFactory: source = event["data"]["source"] data = event["data"]["status_info"] + + self.update_status_info(source, data) + + def update_status_info(self, process_name, info): for status in self.statuses: - if status.name == source: - status.update(data) + if status.name == process_name: + status.update(info) break def bool_items(self): @@ -178,7 +183,7 @@ class StatusFactory: items.append({ "type": "label", "value": ( - "WARNING: Main process may not restart" + "WARNING: Main process may shut down when checked" " if does not run as a service!" ) }) @@ -283,6 +288,11 @@ def server_activity(event): def trigger_info_get(): + if ObjectFactory.last_trigger: + delta = datetime.datetime.now() - ObjectFactory.last_trigger + if delta.seconds() < 5: + return + session = ObjectFactory.session session.event_hub.publish( ftrack_api.event.base.Event( @@ -352,8 +362,8 @@ def main(args): statuse_names = { "main": "Main process", - "storer": "Storer", - "processor": "Processor" + "storer": "Event Storer", + "processor": "Event Processor" } ObjectFactory.status_factory = StatusFactory(statuse_names) @@ -386,12 +396,15 @@ class OutputChecker(threading.Thread): def run(self): while self.read_input: - line = sys.stdin.readlines() - log.info(str(line)) - # for line in sys.stdin.readlines(): - # log.info(str(line)) - log.info("alive-end") - time.sleep(0.5) + for line in sys.stdin: + line = line.rstrip().lower() + if not line.startswith("reset:"): + continue + process_name = line.replace("reset:", "") + + ObjectFactory.status_factory.update_status_info( + process_name, None + ) def stop(self): self.read_input = False From 49f9dbf4183f057ab2f0ad16fe4b0909de55eef1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Sat, 8 Feb 2020 11:28:33 +0100 Subject: [PATCH 191/434] renamed sub_event_info to sub_event_status --- pype/ftrack/ftrack_server/event_server_cli.py | 2 +- .../ftrack_server/{sub_event_info.py => sub_event_status.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename pype/ftrack/ftrack_server/{sub_event_info.py => sub_event_status.py} (100%) diff --git a/pype/ftrack/ftrack_server/event_server_cli.py b/pype/ftrack/ftrack_server/event_server_cli.py index 19e889f77d..90c7c566fc 100644 --- a/pype/ftrack/ftrack_server/event_server_cli.py +++ b/pype/ftrack/ftrack_server/event_server_cli.py @@ -209,7 +209,7 @@ def main_loop(ftrack_url): statuser_name = "StorerThread" statuser_port = 10021 - statuser_path = "{}/sub_event_info.py".format(file_path) + statuser_path = "{}/sub_event_status.py".format(file_path) statuser_thread = None statuser_last_failed = datetime.datetime.now() statuser_failed_count = 0 diff --git a/pype/ftrack/ftrack_server/sub_event_info.py b/pype/ftrack/ftrack_server/sub_event_status.py similarity index 100% rename from pype/ftrack/ftrack_server/sub_event_info.py rename to pype/ftrack/ftrack_server/sub_event_status.py From e9c4ec7fee46b87a067efc9a7566a09f071a4ea3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Sat, 8 Feb 2020 11:30:38 +0100 Subject: [PATCH 192/434] label has IP adress of server --- pype/ftrack/ftrack_server/sub_event_status.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/ftrack/ftrack_server/sub_event_status.py b/pype/ftrack/ftrack_server/sub_event_status.py index 4c94513eae..8dc176a091 100644 --- a/pype/ftrack/ftrack_server/sub_event_status.py +++ b/pype/ftrack/ftrack_server/sub_event_status.py @@ -1,7 +1,6 @@ import os import sys import json -import time import threading import signal import socket @@ -19,9 +18,10 @@ log = Logger().get_logger("Event storer") action_identifier = ( "event.server.status" + os.environ["FTRACK_EVENT_SUB_ID"] ) +host_ip = socket.gethostbyname(socket.gethostname()) action_data = { "label": "Pype Admin", - "variant": "- Event server Status", + "variant": "- Event server Status ({})".format(host_ip), "description": "Get Infromation about event server", "actionIdentifier": action_identifier, "icon": None From 4e85279771711e794330d414537381be9025a4b6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Sat, 8 Feb 2020 12:01:04 +0100 Subject: [PATCH 193/434] added icon to status action --- pype/ftrack/ftrack_server/sub_event_status.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/pype/ftrack/ftrack_server/sub_event_status.py b/pype/ftrack/ftrack_server/sub_event_status.py index 8dc176a091..1a15a1f28d 100644 --- a/pype/ftrack/ftrack_server/sub_event_status.py +++ b/pype/ftrack/ftrack_server/sub_event_status.py @@ -12,7 +12,7 @@ from pype.ftrack.ftrack_server.lib import ( SocketSession, StatusEventHub, TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT ) -from pypeapp import Logger +from pypeapp import Logger, config log = Logger().get_logger("Event storer") action_identifier = ( @@ -24,7 +24,16 @@ action_data = { "variant": "- Event server Status ({})".format(host_ip), "description": "Get Infromation about event server", "actionIdentifier": action_identifier, - "icon": None + "icon": "{}/ftrack/action_icons/PypeAdmin.svg".format( + os.environ.get( + "PYPE_STATICS_SERVER", + "http://localhost:{}".format( + config.get_presets().get("services", {}).get( + "rest_api", {} + ).get("default_port", 8021) + ) + ) + ) } From 6e788a8bec874c03cfa88c4ab4fadbfa7103606b Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Mon, 10 Feb 2020 15:53:23 +0100 Subject: [PATCH 194/434] fixed families for review and ftrack --- .../global/publish/collect_filesequences.py | 2 +- pype/plugins/global/publish/submit_publish_job.py | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 947d055d9b..0bab57d6eb 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -491,7 +491,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): { "name": new_subset_name, "family": 'render', - "families": ['render'], + "families": data["metadata"]["families"], "subset": new_subset_name, "asset": data.get( "asset", api.Session["AVALON_ASSET"]), diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 6fc60dc9ff..8877844671 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -297,6 +297,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): source = os.path.join("{root}", relative_path).replace("\\", "/") regex = None + families = ["render"] if data.get("expectedFiles"): representations = [] cols, rem = clique.assemble(data.get("expectedFiles")) @@ -326,11 +327,20 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "stagingDir": os.path.dirname(list(c)[0]), "anatomy_template": "render", "fps": context.data.get("fps", None), - "tags": ["review"] if preview else [], + "tags": ["review", "preview"] if preview else [], } representations.append(rep) + # if we have one representation with preview tag + # flag whole instance for review and for ftrack + if preview: + if "ftrack" not in families: + if os.environ.get("FTRACK_SERVER"): + families.append("ftrack") + if "review" not in families: + families.append("review") + for r in rem: ext = r.split(".")[-1] rep = { @@ -364,7 +374,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "frameStart": start, "frameEnd": end, "fps": context.data.get("fps", None), - "families": ["render"], + "families": families, "source": source, "user": context.data["user"], "version": context.data["version"], From 350d1ca7d038c8cc75619915262fb2a7fdff4be0 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 10 Feb 2020 16:41:58 +0100 Subject: [PATCH 195/434] implemented action that remove files from disk for paticulat asset version except lastest `x` versions --- .../actions/action_delete_old_versions.py | 481 ++++++++++++++++++ 1 file changed, 481 insertions(+) create mode 100644 pype/ftrack/actions/action_delete_old_versions.py diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py new file mode 100644 index 0000000000..126c9a5e24 --- /dev/null +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -0,0 +1,481 @@ +import os +import collections +import uuid + +import clique +from pymongo import UpdateOne + +from pype.ftrack import BaseAction +from pype.ftrack.lib.io_nonsingleton import DbConnector + +import avalon.pipeline + + +class DeleteOldVersions(BaseAction): + + identifier = "delete.old.versions" + label = "Pype Admin" + variant = "- Delete old versions" + description = ( + "Delete files from older publishes so project can be" + " archived with only lates versions." + ) + + dbcon = DbConnector() + + inteface_title = "Choose your preferences" + splitter_item = {"type": "label", "value": "---"} + sequence_splitter = "__sequence_splitter__" + + def discover(self, session, entities, event): + ''' Validation ''' + selection = event["data"].get("selection") or [] + for entity in selection: + entity_type = (entity.get("entityType") or "").lower() + if entity_type == "assetversion": + return True + return False + + def interface(self, session, entities, event): + items = [] + root = os.environ.get("AVALON_PROJECTS") + if not root: + msg = "Root path to projects is not set." + items.append({ + "type": "label", + "value": "ERROR: {}".format(msg) + }) + self.show_interface( + items=items, title=self.inteface_title, event=event + ) + return { + "success": False, + "message": msg + } + + elif not os.path.exists(root): + msg = "Root path does not exists \"{}\".".format(str(root)) + items.append({ + "type": "label", + "value": "ERROR: {}".format(msg) + }) + self.show_interface( + items=items, title=self.inteface_title, event=event + ) + return { + "success": False, + "message": msg + } + + values = event["data"].get("values") + if values: + versions_count = int(values["last_versions_count"]) + if versions_count >= 1: + return + items.append({ + "type": "label", + "value": ( + "# You have to keep at least 1 version!" + ) + }) + + items.append({ + "type": "label", + "value": ( + "WARNING: This will remove published files of older" + " versions from disk so we don't recommend use" + " this action on \"live\" project." + ) + }) + + items.append(self.splitter_item) + + # How many versions to keep + items.append({ + "type": "label", + "value": "## Choose how many versions you want to keep:" + }) + items.append({ + "type": "label", + "value": ( + "NOTE: We do recommend to keep 2" + " versions (even if default is 1)." + ) + }) + items.append({ + "type": "number", + "name": "last_versions_count", + "label": "Versions", + "value": 1 + }) + + items.append(self.splitter_item) + + items.append({ + "type": "label", + "value": ( + "## Remove publish folder even if there" + " are other than published files:" + ) + }) + items.append({ + "type": "label", + "value": ( + "WARNING: This may remove more than you want." + ) + }) + items.append({ + "type": "boolean", + "name": "force_delete_publish_folder", + "label": "Are you sure?", + "value": True + }) + + return { + "items": items, + "title": self.inteface_title + } + + def launch(self, session, entities, event): + values = event["data"].get("values") + if not values: + return + + versions_count = int(values["last_versions_count"]) + force_to_remove = values["force_delete_publish_folder"] + + _val1 = "OFF" + if force_to_remove: + _val1 = "ON" + + _val3 = "s" + if versions_count == 1: + _val3 = "" + + self.log.debug(( + "Process started. Force to delete publish folder is set to [{0}]" + " and will keep {1} latest version{2}." + ).format(_val1, versions_count, _val3)) + + self.dbcon.install() + + project = None + avalon_asset_names = [] + asset_versions_by_parent_id = collections.defaultdict(list) + subset_names_by_asset_name = collections.defaultdict(list) + + for entity in entities: + parent_ent = entity["asset"]["parent"] + parent_ftrack_id = parent_ent["id"] + parent_name = parent_ent["name"] + + if parent_name not in avalon_asset_names: + avalon_asset_names.append(parent_name) + + # Group asset versions by parent entity + asset_versions_by_parent_id[parent_ftrack_id].append(entity) + + # Get project + if project is None: + project = parent_ent["project"] + + # Collect subset names per asset + subset_name = entity["asset"]["name"] + subset_names_by_asset_name[parent_name].append(subset_name) + + # Set Mongo collection + project_name = project["full_name"] + self.dbcon.Session["AVALON_PROJECT"] = project_name + self.log.debug("Project is set to {}".format(project_name)) + + # Get Assets from avalon database + assets = list(self.dbcon.find({ + "type": "asset", + "name": {"$in": avalon_asset_names} + })) + asset_id_to_name_map = { + asset["_id"]: asset["name"] for asset in assets + } + asset_ids = list(asset_id_to_name_map.keys()) + + self.log.debug("Collected assets ({})".format(len(asset_ids))) + + # Get Subsets + subsets = list(self.dbcon.find({ + "type": "subset", + "parent": {"$in": asset_ids} + })) + subsets_by_id = {} + subset_ids = [] + for subset in subsets: + asset_id = subset["parent"] + asset_name = asset_id_to_name_map[asset_id] + available_subsets = subset_names_by_asset_name[asset_name] + + if subset["name"] not in available_subsets: + continue + + subset_ids.append(subset["_id"]) + subsets_by_id[subset["_id"]] = subset + + self.log.debug("Collected subsets ({})".format(len(subset_ids))) + + # Get Versions + versions = list(self.dbcon.find({ + "type": "version", + "parent": {"$in": subset_ids} + })) + + versions_by_parent = collections.defaultdict(list) + for ent in versions: + versions_by_parent[ent["parent"]].append(ent) + + def sort_func(ent): + return int(ent["name"]) + + last_versions_by_parent = collections.defaultdict(list) + all_last_versions = [] + for parent_id, _versions in versions_by_parent.items(): + for idx, version in enumerate( + sorted(_versions, key=sort_func, reverse=True) + ): + if idx >= versions_count: + break + last_versions_by_parent[parent_id].append(version) + all_last_versions.append(version) + + self.log.debug("Collected versions ({})".format(len(versions))) + + # Filter latest versions + for version in all_last_versions: + versions.remove(version) + + # Filter already deleted versions + versions_to_pop = [] + for version in versions: + version_tags = version["data"].get("tags") + if version_tags and "deleted" in version_tags: + versions_to_pop.append(version) + + for version in versions_to_pop: + subset = subsets_by_id[version["parent"]] + asset_id = subset["parent"] + asset_name = asset_id_to_name_map[asset_id] + msg = "Asset: \"{}\" | Subset: \"{}\" | Version: \"{}\"".format( + asset_name, subset["name"], version["name"] + ) + self.log.warning(( + "Skipping version. Already tagged as `deleted`. < {} >" + ).format(msg)) + versions.remove(version) + + version_ids = [ent["_id"] for ent in versions] + + self.log.debug( + "Filtered versions to delete ({})".format(len(version_ids)) + ) + + if not version_ids: + msg = "Skipping processing. Nothing to delete." + self.log.debug(msg) + return { + "success": True, + "message": msg + } + + repres = list(self.dbcon.find({ + "type": "representation", + "parent": {"$in": version_ids} + })) + + self.log.debug( + "Collected representations to remove ({})".format(len(repres)) + ) + + dir_paths = {} + file_paths_by_dir = collections.defaultdict(list) + for repre in repres: + file_path, seq_path = self.path_from_represenation(repre) + if file_path is None: + self.log.warning(( + "Could not format path for represenation \"{}\"" + ).format(str(repre))) + continue + + dir_path = os.path.dirname(file_path) + dir_id = None + for _dir_id, _dir_path in dir_paths.items(): + if _dir_path == dir_path: + dir_id = _dir_id + break + + if dir_id is None: + dir_id = uuid.uuid4() + dir_paths[dir_id] = dir_path + + file_paths_by_dir[dir_id].append([file_path, seq_path]) + + dir_ids_to_pop = [] + for dir_id, dir_path in dir_paths.items(): + if os.path.exists(dir_path): + continue + + dir_ids_to_pop.append(dir_id) + + # Pop dirs from both dictionaries + for dir_id in dir_ids_to_pop: + dir_paths.pop(dir_id) + paths = file_paths_by_dir.pop(dir_id) + # TODO report of missing directories? + paths_msg = ", ".join([ + "'{}'".format(path[0].replace("\\", "/")) for path in paths + ]) + self.log.warning(( + "Folder does not exist. Deleting it's files skipped: {}" + ).format(paths_msg)) + + if force_to_remove: + self.delete_whole_dir_paths(dir_paths.values()) + else: + self.delete_only_repre_files(dir_paths, file_paths_by_dir) + + mongo_changes_bulk = [] + for version in versions: + orig_version_tags = version["data"].get("tags") or [] + version_tags = [tag for tag in orig_version_tags] + if "deleted" not in version_tags: + version_tags.append("deleted") + + if version_tags == orig_version_tags: + continue + + filter = {"_id": version["_id"]} + update_data = {"$set": {"data.tags": version_tags}} + mongo_changes_bulk.append(UpdateOne(filter, update_data)) + + if mongo_changes_bulk: + self.dbcon.bulk_write(mongo_changes_bulk) + + self.dbcon.uninstall() + + return True + + def delete_whole_dir_paths(self, dir_paths): + for dir_path in dir_paths: + # Delete all files and fodlers in dir path + for root, dirs, files in os.walk(dir_path, topdown=False): + for name in files: + os.remove(os.path.join(root, name)) + + for name in dirs: + os.rmdir(os.path.join(root, name)) + + # Delete even the folder and it's parents folders if they are empty + while True: + if not os.path.exists(dir_path): + dir_path = os.path.dirname(dir_path) + continue + + if len(os.listdir(dir_path)) != 0: + break + + os.rmdir(os.path.join(dir_path)) + + def delete_only_repre_files(self, dir_paths, file_paths): + for dir_id, dir_path in dir_paths.items(): + dir_files = os.listdir(dir_path) + collections, remainders = clique.assemble(dir_files) + for file_path, seq_path in file_paths[dir_id]: + file_path_base = os.path.split(file_path)[1] + # Just remove file if `frame` key was not in context or + # filled path is in remainders (single file sequence) + if not seq_path or file_path_base in remainders: + if not os.path.exists(file_path): + self.log.warning( + "File was not found: {}".format(file_path) + ) + continue + os.remove(file_path) + self.log.debug("Removed file: {}".format(file_path)) + remainders.remove(file_path_base) + continue + + seq_path_base = os.path.split(seq_path)[1] + head, tail = seq_path_base.split(self.sequence_splitter) + + final_col = None + for collection in collections: + if head != collection.head or tail != collection.tail: + continue + final_col = collection + break + + if final_col is not None: + # Fill full path to head + final_col.head = os.path.join(dir_path, final_col.head) + for _file_path in final_col: + if os.path.exists(_file_path): + os.remove(_file_path) + _seq_path = final_col.format("{head}{padding}{tail}") + self.log.debug("Removed files: {}".format(_seq_path)) + collections.remove(final_col) + + elif os.path.exists(file_path): + os.remove(file_path) + self.log.debug("Removed file: {}".format(file_path)) + + else: + self.log.warning( + "File was not found: {}".format(file_path) + ) + + # Delete as much as possible parent folders + for dir_path in dir_paths.values(): + while True: + if not os.path.exists(dir_path): + dir_path = os.path.dirname(dir_path) + continue + + if len(os.listdir(dir_path)) != 0: + break + + self.log.debug("Removed folder: {}".format(dir_path)) + os.rmdir(dir_path) + + def path_from_represenation(self, representation): + try: + template = representation["data"]["template"] + + except KeyError: + return (None, None) + + root = os.environ["AVALON_PROJECTS"] + if not root: + return (None, None) + + sequence_path = None + try: + context = representation["context"] + context["root"] = root + path = avalon.pipeline.format_template_with_optional_keys( + context, template + ) + if "frame" in context: + context["frame"] = self.sequence_splitter + sequence_path = os.path.normpath( + avalon.pipeline.format_template_with_optional_keys( + context, template + ) + ) + + except KeyError: + # Template references unavailable data + return (None, None) + + return (os.path.normpath(path), sequence_path) + + +def register(session, plugins_presets={}): + '''Register plugin. Called when used as an plugin.''' + + PrepareForArchivation(session, plugins_presets).register() From 0401322e167b909dfcf0194755ca76350e235fec Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Mon, 10 Feb 2020 17:07:14 +0100 Subject: [PATCH 196/434] validator for ass relative texture paths --- .../publish/validate_ass_relative_paths.py | 79 +++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 pype/plugins/maya/publish/validate_ass_relative_paths.py diff --git a/pype/plugins/maya/publish/validate_ass_relative_paths.py b/pype/plugins/maya/publish/validate_ass_relative_paths.py new file mode 100644 index 0000000000..b5e16103ad --- /dev/null +++ b/pype/plugins/maya/publish/validate_ass_relative_paths.py @@ -0,0 +1,79 @@ +import os +import types + +import maya.cmds as cmds + +import pyblish.api +import pype.api +import pype.maya.action + + +class ValidateAssRelativePaths(pyblish.api.InstancePlugin): + """Ensure exporting ass file has set relative texture paths""" + + order = pype.api.ValidateContentsOrder + hosts = ['maya'] + families = ['ass'] + label = "Validate ASS has relative texture paths" + actions = [pype.api.RepairAction] + + def process(self, instance): + # we cannot ask this until user open render settings as + # `defaultArnoldRenderOptions` doesn't exists + try: + relative_texture = cmds.getAttr( + "defaultArnoldRenderOptions.absolute_procedural_paths") + texture_search_path = cmds.getAttr( + "defaultArnoldRenderOptions.tspath" + ) + except ValueError: + assert False, ("Can not validate, render setting were not opened " + "yet so Arnold setting cannot be validate") + + scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True)) + scene_name, _ = os.path.splitext(scene_basename) + project_root = "{}{}{}{}".format( + os.environ.get("AVALON_PROJECTS"), + os.path.sep, + os.environ.get("AVALON_PROJECT"), + os.pathsep + ) + assert self.maya_is_true(relative_texture) is not True, \ + ("Texture path are set to be absolute") + + texture_search_path.replace("\\", "/") + assert project_root in texture_search_path, \ + ("Project root is not in texture_search_path") + + @classmethod + def repair(cls, instance): + texture_search_path = cmds.getAttr( + "defaultArnoldRenderOptions.tspath" + ) + project_root = "{}{}{}{}".format( + os.environ.get("AVALON_PROJECTS"), + os.path.sep, + os.environ.get("AVALON_PROJECT"), + os.pathsep + ) + + project_root = project_root.replace("\\", "/") + cmds.setAttr("defaultArnoldRenderOptions.tspath", + project_root + texture_search_path, + type="string") + cmds.setAttr("defaultArnoldRenderOptions.absolute_procedural_paths", + False) + + def maya_is_true(self, attr_val): + """ + Whether a Maya attr evaluates to True. + When querying an attribute value from an ambiguous object the + Maya API will return a list of values, which need to be properly + handled to evaluate properly. + """ + if isinstance(attr_val, types.BooleanType): + return attr_val + elif isinstance(attr_val, (types.ListType, types.GeneratorType)): + return any(attr_val) + else: + return bool(attr_val) From 765ec59d7b47238bfc0579c3d2baaf14880f8a7e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 10 Feb 2020 17:07:56 +0100 Subject: [PATCH 197/434] added roles and icon to action --- pype/ftrack/actions/action_delete_old_versions.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py index 126c9a5e24..46f3e60d77 100644 --- a/pype/ftrack/actions/action_delete_old_versions.py +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -20,6 +20,10 @@ class DeleteOldVersions(BaseAction): "Delete files from older publishes so project can be" " archived with only lates versions." ) + role_list = ["Pypeclub", "Project Manager", "Administrator"] + icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format( + os.environ.get('PYPE_STATICS_SERVER', '') + ) dbcon = DbConnector() From 1cdc9595515e43d5dab43bfa965b9b37ea2ac184 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 10 Feb 2020 19:41:51 +0100 Subject: [PATCH 198/434] allow for aov_filter preset override --- pype/plugins/global/publish/submit_publish_job.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 8877844671..ece6b3660b 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -148,7 +148,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): families = ["render.farm", "renderlayer", "imagesequence"] - aov_filter = [AOVFilter("maya", ["beauty"])] + aov_filter = {"maya": ["beauty"]} enviro_filter = [ "PATH", @@ -307,9 +307,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): preview = False # if filtered aov name is found in filename, toggle it for # preview video renderin - for filter in self.aov_filter: - if os.environ.get("AVALON_APP", "") == filter.app: - for aov in filter.aov: + for app in self.aov_filter: + if os.environ.get("AVALON_APP", "") == app: + for aov in self.aov_filter[app]: if re.match( r".+(?:\.|_)({})(?:\.|_).*".format(aov), list(c)[0] From 2da3de670eda9c2cf9e6fc679ca19c291d1e8128 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 10 Feb 2020 22:44:29 +0100 Subject: [PATCH 199/434] add version --- pype/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/__init__.py b/pype/__init__.py index 91b72d7de5..89c653bf6f 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -9,7 +9,7 @@ from pypeapp import config import logging log = logging.getLogger(__name__) -__version__ = "2.3.0" +__version__ = "2.5.0" PACKAGE_DIR = os.path.dirname(__file__) PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") From 62b3f852f1d65286a650ac37fc0f8d479436d440 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 11 Feb 2020 14:01:56 +0100 Subject: [PATCH 200/434] use `get_path_to_ffmpeg` which checks if any of filled ffmpeg paths in FFMPEG_PATH exists --- pype/lib.py | 9 +++++++++ pype/plugins/global/publish/extract_jpeg.py | 3 ++- pype/plugins/global/publish/extract_review.py | 5 ++--- pype/plugins/global/publish/extract_review_slate.py | 3 ++- pype/plugins/global/publish/validate_ffmpeg_installed.py | 6 ++++-- .../standalonepublisher/publish/extract_review.py | 3 ++- .../standalonepublisher/publish/extract_thumbnail.py | 3 ++- pype/scripts/otio_burnin.py | 6 +++--- pype/standalonepublish/widgets/widget_drop_frame.py | 3 ++- 9 files changed, 28 insertions(+), 13 deletions(-) diff --git a/pype/lib.py b/pype/lib.py index f26395d930..9cde3bd3af 100644 --- a/pype/lib.py +++ b/pype/lib.py @@ -13,6 +13,15 @@ import avalon log = logging.getLogger(__name__) +def get_path_to_ffmpeg(): + paths = os.environ.get("FFMPEG_PATH") or "" + path_items = paths.split(os.pathsep) + for item in path_items: + item = os.path.normpath(item) + if os.path.exists(item): + return item + return "" + # Special naming case for subprocess since its a built-in method. def _subprocess(*args, **kwargs): """Convenience method for getting output errors for subprocess.""" diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 4978649ba2..6a0d7905b0 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -3,6 +3,7 @@ import os import pyblish.api import clique import pype.api +import pype.lib class ExtractJpegEXR(pyblish.api.InstancePlugin): @@ -67,7 +68,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): jpeg_items = [] jpeg_items.append( - os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) + os.path.join(pype.lib.get_path_to_ffmpeg(), "ffmpeg")) # override file if already exists jpeg_items.append("-y") # use same input args like with mov diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 2e79d86c38..5895a8c423 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -313,9 +313,8 @@ class ExtractReview(pyblish.api.InstancePlugin): mov_args = [ os.path.join( - os.environ.get( - "FFMPEG_PATH", - ""), "ffmpeg"), + pype.lib.get_path_to_ffmpeg(), "ffmpeg" + ), " ".join(input_args), " ".join(output_args) ] diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 9a720b77a9..5d4990a027 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -1,5 +1,6 @@ import os import pype.api +import pype.lib import pyblish @@ -21,7 +22,7 @@ class ExtractReviewSlate(pype.api.Extractor): suffix = "_slate" slate_path = inst_data.get("slateFrame") - ffmpeg_path = os.path.join(os.environ.get("FFMPEG_PATH", ""), "ffmpeg") + ffmpeg_path = os.path.join(pype.lib.get_path_to_ffmpeg(), "ffmpeg") to_width = 1920 to_height = 1080 diff --git a/pype/plugins/global/publish/validate_ffmpeg_installed.py b/pype/plugins/global/publish/validate_ffmpeg_installed.py index df7c330e95..643e0f1821 100644 --- a/pype/plugins/global/publish/validate_ffmpeg_installed.py +++ b/pype/plugins/global/publish/validate_ffmpeg_installed.py @@ -1,6 +1,7 @@ import pyblish.api import os import subprocess +import pype.lib try: import os.errno as errno except ImportError: @@ -28,9 +29,10 @@ class ValidateFfmpegInstallef(pyblish.api.Validator): def process(self, instance): self.log.info("ffmpeg path: `{}`".format( - os.environ.get("FFMPEG_PATH", ""))) + pype.lib.get_path_to_ffmpeg() + )) if self.is_tool( os.path.join( - os.environ.get("FFMPEG_PATH", ""), "ffmpeg")) is False: + pype.lib.get_path_to_ffmpeg(), "ffmpeg")) is False: self.log.error("ffmpeg not found in PATH") raise RuntimeError('ffmpeg not installed.') diff --git a/pype/plugins/standalonepublisher/publish/extract_review.py b/pype/plugins/standalonepublisher/publish/extract_review.py index f06d9bcde0..29e1fcaac0 100644 --- a/pype/plugins/standalonepublisher/publish/extract_review.py +++ b/pype/plugins/standalonepublisher/publish/extract_review.py @@ -4,6 +4,7 @@ import tempfile import pyblish.api import clique import pype.api +import pype.lib class ExtractReviewSP(pyblish.api.InstancePlugin): @@ -148,7 +149,7 @@ class ExtractReviewSP(pyblish.api.InstancePlugin): # output filename output_args.append(full_output_path) - ffmpeg_path = os.getenv("FFMPEG_PATH", "") + ffmpeg_path = pype.lib.get_path_to_ffmpeg() if ffmpeg_path: ffmpeg_path += "/ffmpeg" else: diff --git a/pype/plugins/standalonepublisher/publish/extract_thumbnail.py b/pype/plugins/standalonepublisher/publish/extract_thumbnail.py index 69a2e0fdad..b752419a35 100644 --- a/pype/plugins/standalonepublisher/publish/extract_thumbnail.py +++ b/pype/plugins/standalonepublisher/publish/extract_thumbnail.py @@ -3,6 +3,7 @@ import tempfile import subprocess import pyblish.api import pype.api +import pype.lib class ExtractThumbnailSP(pyblish.api.InstancePlugin): @@ -73,7 +74,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin): config_data.get("__default__", {}) ) - ffmpeg_path = os.getenv("FFMPEG_PATH", "") + ffmpeg_path = pype.lib.get_path_to_ffmpeg() if ffmpeg_path: ffmpeg_path += "/ffmpeg" else: diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index f128352974..1d589916e9 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -5,14 +5,14 @@ import json import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins from pypeapp.lib import config from pype import api as pype -from subprocess import Popen, PIPE +import pype.lib # FFmpeg in PATH is required log = pype.Logger().get_logger("BurninWrapper", "burninwrap") -ffmpeg_path = os.environ.get("FFMPEG_PATH") +ffmpeg_path = pype.lib.get_path_to_ffmpeg() if ffmpeg_path and os.path.exists(ffmpeg_path): # add separator "/" or "\" to be prepared for next part ffmpeg_path += os.path.sep @@ -267,7 +267,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): command = self.command(output=output, args=args, overwrite=overwrite) - proc = Popen(command, shell=True) + proc = subprocess.Popen(command, shell=True) proc.communicate() if proc.returncode != 0: raise RuntimeError("Failed to render '%s': %s'" diff --git a/pype/standalonepublish/widgets/widget_drop_frame.py b/pype/standalonepublish/widgets/widget_drop_frame.py index 73b9f0e179..aa3335fb78 100644 --- a/pype/standalonepublish/widgets/widget_drop_frame.py +++ b/pype/standalonepublish/widgets/widget_drop_frame.py @@ -4,6 +4,7 @@ import json import clique import subprocess from pypeapp import config +import pype.lib from . import QtWidgets, QtCore from . import DropEmpty, ComponentsList, ComponentItem @@ -224,7 +225,7 @@ class DropDataFrame(QtWidgets.QFrame): self._process_data(data) def load_data_with_probe(self, filepath): - ffprobe_path = os.getenv("FFMPEG_PATH", "") + ffprobe_path = pype.lib.get_path_to_ffmpeg() if ffprobe_path: ffprobe_path += '/ffprobe' else: From 52b41212064898fac697cf60dbf6a4f957f60072 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 11 Feb 2020 14:20:31 +0100 Subject: [PATCH 201/434] fix(nk): was causing troubles with linux workstations --- pype/nuke/lib.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index a7f1b64eec..c64dc0b828 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -196,7 +196,7 @@ def format_anatomy(data): "root": api.Session["AVALON_PROJECTS"], "subset": data["avalon"]["subset"], "asset": data["avalon"]["asset"], - "task": api.Session["AVALON_TASK"].lower(), + "task": api.Session["AVALON_TASK"], "family": data["avalon"]["family"], "project": {"name": project_document["name"], "code": project_document["data"].get("code", '')}, @@ -1070,7 +1070,7 @@ class BuildWorkfile(WorkfileSettings): "project": {"name": self._project["name"], "code": self._project["data"].get("code", '')}, "asset": self._asset or os.environ["AVALON_ASSET"], - "task": kwargs.get("task") or api.Session["AVALON_TASK"].lower(), + "task": kwargs.get("task") or api.Session["AVALON_TASK"], "hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(), "version": kwargs.get("version", {}).get("name", 1), "user": getpass.getuser(), From c0584eded70c2d63ab4be82484089e263bf15988 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 11 Feb 2020 18:49:40 +0100 Subject: [PATCH 202/434] integrate new will remove old representations if republishing version and set new repres IDs to those previous --- pype/plugins/global/publish/integrate_new.py | 24 +++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 7d95534897..4499445e6e 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -207,6 +207,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): 'parent': subset["_id"], 'name': next_version }) + existing_repres = None if existing_version is None: version_id = io.insert_one(version).inserted_id else: @@ -217,6 +218,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): }, {'$set': version} ) version_id = existing_version['_id'] + existing_repres = {repre["name"]: repre for repre in io.find({ + "type": "representation", + "parent": version_id + })} + instance.data['version'] = version['name'] # Write to disk @@ -249,6 +255,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if 'transfers' not in instance.data: instance.data['transfers'] = [] + new_repre_names = [] for idx, repre in enumerate(instance.data["representations"]): # Collection @@ -419,8 +426,16 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): continue repre_context[key] = template_data[key] + repre_name = repre['name'] + new_repre_names.append(repre_name) + # Use previous + if existing_repres and repre_name in existing_repres: + repre_id = existing_repres[repre_name]["_id"] + else: + repre_id = io.ObjectId() + representation = { - "_id": io.ObjectId(), + "_id": repre_id, "schema": "pype:representation-2.0", "type": "representation", "parent": version_id, @@ -446,6 +461,13 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): representations.append(representation) self.log.debug("__ representations: {}".format(representations)) + # Remove old representations if there are any (before insertion of new) + if existing_repres: + repre_ids_to_remove = [] + for repre in existing_repres.values(): + repre_ids_to_remove.append(repre["_id"]) + io.delete_many({"_id": {"$in": repre_ids_to_remove}}) + self.log.debug("__ representations: {}".format(representations)) for rep in instance.data["representations"]: self.log.debug("__ represNAME: {}".format(rep['name'])) From 85ba7f17f494a4324c0be113fff563a9edf9d597 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 12 Feb 2020 11:17:08 +0100 Subject: [PATCH 203/434] representations are not deleted but their type changes to archived_representations and their id is changed --- pype/plugins/global/publish/integrate_new.py | 39 +++++++++++++++++--- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 4499445e6e..c8e6a0188e 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -4,6 +4,8 @@ import logging import sys import clique import errno + +from pymongo import DeleteOne, InsertOne import pyblish.api from avalon import api, io from avalon.vendor import filelink @@ -207,21 +209,48 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): 'parent': subset["_id"], 'name': next_version }) - existing_repres = None + if existing_version is None: version_id = io.insert_one(version).inserted_id else: + # Update version data io.update_many({ 'type': 'version', 'parent': subset["_id"], 'name': next_version - }, {'$set': version} - ) + }, { + '$set': version + }) version_id = existing_version['_id'] - existing_repres = {repre["name"]: repre for repre in io.find({ + + # Find representations of existing version and archive them + current_repres = list(io.find({ "type": "representation", "parent": version_id - })} + })) + bulk_writes = [] + for repre in current_repres: + # Representation must change type, + # `_id` must be stored to other key and replaced with new + # - that is because new representations should have same ID + repre_id = repre["_id"] + bulk_writes.append(DeleteOne({"_id": repre_id})) + + repre["orig_id"] = repre_id + repre["_id"] = io.ObjectId() + repre["type"] = "archived_representation" + bulk_writes.append(InsertOne(repre)) + + # bulk updates + if bulk_writes: + io._database[io.Session["AVALON_PROJECT"]].bulk_write( + bulk_writes + ) + + existing_repres = list(io.find({ + "parent": version_id, + "type": "archived_representation" + })) instance.data['version'] = version['name'] From 7f49ed9fb3e353ce0be37c41d70a3da45d368ebb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 12 Feb 2020 11:17:27 +0100 Subject: [PATCH 204/434] check of existing representations was updated --- pype/plugins/global/publish/integrate_new.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index c8e6a0188e..b5b6b10aa2 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -411,7 +411,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if not dst_start_frame: dst_start_frame = dst_padding - dst = "{0}{1}{2}".format( dst_head, dst_start_frame, @@ -457,10 +456,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): repre_name = repre['name'] new_repre_names.append(repre_name) - # Use previous - if existing_repres and repre_name in existing_repres: - repre_id = existing_repres[repre_name]["_id"] - else: + + # Use previous representation's id if there are any + repre_id = None + for _repre in existing_repres: + # NOTE should we check lowered names? + if repre_name == _repre["name"]: + repre_id = _repre["orig_id"] + break + + # Create new id if existing representations does not match + if repre_id is None: repre_id = io.ObjectId() representation = { From 0b1451db3f770a245c9606770e9465ab184909ed Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 12 Feb 2020 13:49:34 +0100 Subject: [PATCH 205/434] fixed variable naming --- pype/ftrack/events/event_sync_to_avalon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index eef24a186d..49ac50c1db 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -1643,7 +1643,7 @@ class SyncToAvalonEvent(BaseEvent): new_name, "task", schema_patterns=self.regex_schemas ) if not passed_regex: - self.regex_failed.append(ent_infos["entityId"]) + self.regex_failed.append(ent_info["entityId"]) continue if new_name not in self.task_changes_by_avalon_id[mongo_id]: From e6ba0dea0884e5477aca517a30b259a992ef44ee Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 12 Feb 2020 15:35:05 +0100 Subject: [PATCH 206/434] fix(nk): didnt create backdrop string problem --- pype/plugins/nuke/create/create_backdrop.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pype/plugins/nuke/create/create_backdrop.py b/pype/plugins/nuke/create/create_backdrop.py index 2016c66095..8609117a0d 100644 --- a/pype/plugins/nuke/create/create_backdrop.py +++ b/pype/plugins/nuke/create/create_backdrop.py @@ -2,6 +2,7 @@ from avalon.nuke.pipeline import Creator from avalon.nuke import lib as anlib import nuke + class CreateBackdrop(Creator): """Add Publishable Backdrop""" @@ -35,8 +36,8 @@ class CreateBackdrop(Creator): return instance else: - msg = "Please select nodes you " - "wish to add to a container" + msg = str("Please select nodes you " + "wish to add to a container") self.log.error(msg) nuke.message(msg) return From 91aaa4058335d0bbb7d21f6a202f8c36f287dc79 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 12 Feb 2020 15:35:42 +0100 Subject: [PATCH 207/434] fix(nk): not related code in script --- pype/plugins/nuke/load/load_backdrop.py | 71 ------------------------- 1 file changed, 71 deletions(-) diff --git a/pype/plugins/nuke/load/load_backdrop.py b/pype/plugins/nuke/load/load_backdrop.py index 07a6724771..04cff311d1 100644 --- a/pype/plugins/nuke/load/load_backdrop.py +++ b/pype/plugins/nuke/load/load_backdrop.py @@ -240,77 +240,6 @@ class LoadBackdropNodes(api.Loader): return update_container(GN, data_imprint) - def connect_active_viewer(self, group_node): - """ - Finds Active viewer and - place the node under it, also adds - name of group into Input Process of the viewer - - Arguments: - group_node (nuke node): nuke group node object - - """ - group_node_name = group_node["name"].value() - - viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()] - if len(viewer) > 0: - viewer = viewer[0] - else: - if not (len(nodes) < 2): - msg = "Please create Viewer node before you " - "run this action again" - self.log.error(msg) - nuke.message(msg) - return None - - # get coordinates of Viewer1 - xpos = viewer["xpos"].value() - ypos = viewer["ypos"].value() - - ypos += 150 - - viewer["ypos"].setValue(ypos) - - # set coordinates to group node - group_node["xpos"].setValue(xpos) - group_node["ypos"].setValue(ypos + 50) - - # add group node name to Viewer Input Process - viewer["input_process_node"].setValue(group_node_name) - - # put backdrop under - pnlib.create_backdrop(label="Input Process", layer=2, - nodes=[viewer, group_node], color="0x7c7faaff") - - return True - - def get_item(self, data, trackIndex, subTrackIndex): - return {key: val for key, val in data.items() - if subTrackIndex == val["subTrackIndex"] - if trackIndex == val["trackIndex"]} - - def byteify(self, input): - """ - Converts unicode strings to strings - It goes trought all dictionary - - Arguments: - input (dict/str): input - - Returns: - dict: with fixed values and keys - - """ - - if isinstance(input, dict): - return {self.byteify(key): self.byteify(value) - for key, value in input.iteritems()} - elif isinstance(input, list): - return [self.byteify(element) for element in input] - elif isinstance(input, unicode): - return input.encode('utf-8') - else: - return input def switch(self, container, representation): self.update(container, representation) From 4ace0b2d7ccccb71c0bd2a500f944e4849435028 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 12 Feb 2020 15:36:14 +0100 Subject: [PATCH 208/434] fix(nk): version check --- pype/plugins/nuke/publish/collect_backdrop.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nuke/publish/collect_backdrop.py b/pype/plugins/nuke/publish/collect_backdrop.py index d98a20aee0..10729b217b 100644 --- a/pype/plugins/nuke/publish/collect_backdrop.py +++ b/pype/plugins/nuke/publish/collect_backdrop.py @@ -58,7 +58,11 @@ class CollectBackdrops(pyblish.api.InstancePlugin): last_frame = int(nuke.root()["last_frame"].getValue()) # get version - version = pype.get_version_from_path(nuke.root().name()) + version = instance.context.data.get('version') + + if not version: + raise RuntimeError("Script name has no version in the name.") + instance.data['version'] = version # Add version data to instance From 26f53789f3f7cbdfdac3f1f09ddf0a2d6f7566dc Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 12 Feb 2020 15:42:51 +0100 Subject: [PATCH 209/434] fix(nks): filter out audio trackitems on effect collect --- pype/plugins/nukestudio/publish/collect_clips.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 4525b4947f..48e0cb66db 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -1,7 +1,7 @@ import os from pyblish import api - +import hiero import nuke class CollectClips(api.ContextPlugin): @@ -48,7 +48,9 @@ class CollectClips(api.ContextPlugin): track = item.parent() source = item.source().mediaSource() source_path = source.firstpath() - effects = [f for f in item.linkedItems() if f.isEnabled()] + effects = [f for f in item.linkedItems() + if f.isEnabled() + if isinstance(f, hiero.core.EffectTrackItem)] # If source is *.nk its a comp effect and we need to fetch the # write node output. This should be improved by parsing the script From 0cd57430f946badfdb1e06cc9580be7d81f0f6b6 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 12 Feb 2020 15:43:31 +0100 Subject: [PATCH 210/434] fix(nks): removing optionals --- pype/plugins/nukestudio/publish/extract_audio.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pype/plugins/nukestudio/publish/extract_audio.py b/pype/plugins/nukestudio/publish/extract_audio.py index 315ba6784d..2c4afc8412 100644 --- a/pype/plugins/nukestudio/publish/extract_audio.py +++ b/pype/plugins/nukestudio/publish/extract_audio.py @@ -10,8 +10,6 @@ class ExtractAudioFile(pype.api.Extractor): hosts = ["nukestudio"] families = ["clip", "audio"] match = api.Intersection - optional = True - active = False def process(self, instance): import os From f46ca740f53b4472f46c1f955389d5d2d3aaff32 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 12 Feb 2020 15:43:59 +0100 Subject: [PATCH 211/434] feat(nks): adding debug log --- pype/plugins/nukestudio/publish/collect_plates.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index b98eccce7f..75eb5bb043 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -146,6 +146,7 @@ class CollectPlatesData(api.InstancePlugin): head, padding = os.path.splitext(basename) ext = ext[1:] padding = padding[1:] + self.log.debug("_ padding: `{}`".format(padding)) # head, padding, ext = source_file.split('.') source_first_frame = int(padding) padding = len(padding) From a3af0be8cf319448efbaac2df98ec2ce26f86cd5 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 12 Feb 2020 16:39:45 +0100 Subject: [PATCH 212/434] ftrack lib has lib file with get_project_from_entity and get_avalon_entities_for_assetversion --- pype/ftrack/lib/__init__.py | 5 ++ pype/ftrack/lib/lib.py | 135 ++++++++++++++++++++++++++++++++++++ 2 files changed, 140 insertions(+) create mode 100644 pype/ftrack/lib/lib.py diff --git a/pype/ftrack/lib/__init__.py b/pype/ftrack/lib/__init__.py index 9af9ded943..eabfdf0d7d 100644 --- a/pype/ftrack/lib/__init__.py +++ b/pype/ftrack/lib/__init__.py @@ -4,3 +4,8 @@ from .ftrack_app_handler import * from .ftrack_event_handler import * from .ftrack_action_handler import * from .ftrack_base_handler import * + +from .lib import ( + get_project_from_entity, + get_avalon_entities_for_assetversion +) diff --git a/pype/ftrack/lib/lib.py b/pype/ftrack/lib/lib.py new file mode 100644 index 0000000000..aee297fc7e --- /dev/null +++ b/pype/ftrack/lib/lib.py @@ -0,0 +1,135 @@ +from bson.objectid import ObjectId + +from .avalon_sync import CustAttrIdKey +import avalon.io + + +def get_project_from_entity(entity): + # TODO add more entities + ent_type_lowered = entity.entity_type.lower() + if ent_type_lowered == "project": + return entity + + elif ent_type_lowered == "assetversion": + return entity["asset"]["parent"]["project"] + + elif "project" in entity: + return entity["project"] + + return None + + +def get_avalon_entities_for_assetversion(asset_version, db_con=None): + output = { + "success": True, + "message": None, + "project": None, + "project_name": None, + "asset": None, + "asset_name": None, + "asset_path": None, + "subset": None, + "subset_name": None, + "version": None, + "version_name": None, + "representations": None + } + + if db_con is None: + db_con = avalon.io + db_con.install() + + ft_asset = asset_version["asset"] + subset_name = ft_asset["name"] + version = asset_version["version"] + parent = ft_asset["parent"] + ent_path = "/".join( + [ent["name"] for ent in parent["link"]] + ) + project = get_project_from_entity(asset_version) + project_name = project["full_name"] + + output["project_name"] = project_name + output["asset_name"] = parent["name"] + output["asset_path"] = ent_path + output["subset_name"] = subset_name + output["version_name"] = version + + db_con.Session["AVALON_PROJECT"] = project_name + + avalon_project = db_con.find_one({"type": "project"}) + output["project"] = avalon_project + + if not avalon_project: + output["success"] = False + output["message"] = "Project not synchronized to avalon `{}`".format( + project_name + ) + return output + + asset_ent = None + asset_mongo_id = parent["custom_attributes"].get(CustAttrIdKey) + if asset_mongo_id: + try: + asset_mongo_id = ObjectId(asset_mongo_id) + asset_ent = db_con.find_one({ + "type": "asset", + "_id": asset_mongo_id + }) + except Exception: + pass + + if not asset_ent: + asset_ent = db_con.find_one({ + "type": "asset", + "data.ftrackId": parent["id"] + }) + + output["asset"] = asset_ent + + if not asset_ent: + output["success"] = False + output["message"] = "Not synchronized entity to avalon `{}`".format( + ent_path + ) + return output + + asset_mongo_id = asset_ent["_id"] + + subset_ent = db_con.find_one({ + "type": "subset", + "parent": asset_mongo_id, + "name": subset_name + }) + + output["subset"] = subset_ent + + if not subset_ent: + output["success"] = False + output["message"] = ( + "Subset `{}` does not exist under Asset `{}`" + ).format(subset_name, ent_path) + return output + + version_ent = db_con.find_one({ + "type": "version", + "name": version, + "parent": subset_ent["_id"] + }) + + output["version"] = version_ent + + if not version_ent: + output["success"] = False + output["message"] = ( + "Version `{}` does not exist under Subset `{}` | Asset `{}`" + ).format(version, subset_name, ent_path) + return output + + repre_ents = list(db_con.find({ + "type": "representation", + "parent": version_ent["_id"] + })) + + output["representations"] = repre_ents + return output From b69fd842b118107c87bf6f08f3b7eb17510dafa1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 12 Feb 2020 16:40:07 +0100 Subject: [PATCH 213/434] added action for storing thumbnails to avalon entities --- .../action_store_thumbnails_to_avalon.py | 308 ++++++++++++++++++ 1 file changed, 308 insertions(+) create mode 100644 pype/ftrack/actions/action_store_thumbnails_to_avalon.py diff --git a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py new file mode 100644 index 0000000000..ff97534656 --- /dev/null +++ b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py @@ -0,0 +1,308 @@ +import os +import requests +import errno + +from bson.objectid import ObjectId +from pype.ftrack import BaseAction +from pype.ftrack.lib import ( + get_project_from_entity, + get_avalon_entities_for_assetversion +) +from pypeapp import Anatomy +from pype.ftrack.lib.io_nonsingleton import DbConnector + + +class StoreThumbnailsToAvalon(BaseAction): + # Action identifier + identifier = "store.thubmnail.to.avalon" + # Action label + label = "Pype Admin" + # Action variant + variant = "- Store Thumbnails to avalon" + # Action description + description = 'Test action' + # roles that are allowed to register this action + role_list = ["Pypeclub", "Administrator", "Project Manager"] + + icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format( + os.environ.get('PYPE_STATICS_SERVER', '') + ) + + thumbnail_key = "AVALON_THUMBNAIL_ROOT" + db_con = DbConnector() + + def discover(self, session, entities, event): + for entity in entities: + if entity.entity_type.lower() == "assetversion": + return True + return False + + def launch(self, session, entities, event): + # DEBUG LINE + # root_path = r"C:\Users\jakub.trllo\Desktop\Tests\ftrack_thumbnails" + + thumbnail_roots = os.environ.get(self.thumbnail_key) + if not thumbnail_roots: + return { + "success": False, + "message": "`{}` environment is not set".format( + self.thumbnail_key + ) + } + + existing_thumbnail_root = None + for path in thumbnail_roots.split(os.pathsep): + if os.path.exists(path): + existing_thumbnail_root = path + break + + if existing_thumbnail_root is None: + return { + "success": False, + "message": ( + "Can't access paths, set in `{}` ({})" + ).format(self.thumbnail_key, thumbnail_roots) + } + + project = get_project_from_entity(entities[0]) + project_name = project["full_name"] + anatomy = Anatomy(project_name) + + if "publish" not in anatomy.templates: + msg = "Anatomy does not have set publish key!" + + self.log.warning(msg) + + return { + "success": False, + "message": msg + } + + if "thumbnail" not in anatomy.templates["publish"]: + msg = ( + "There is not set \"thumbnail\"" + " template in Antomy for project \"{}\"" + ).format(project_name) + + self.log.warning(msg) + + return { + "success": False, + "message": msg + } + + example_template_data = { + "_id": "ID", + "thumbnail_root": "THUBMNAIL_ROOT", + "thumbnail_type": "THUMBNAIL_TYPE", + "ext": ".EXT", + "project": { + "name": "PROJECT_NAME", + "code": "PROJECT_CODE" + }, + "asset": "ASSET_NAME", + "subset": "SUBSET_NAME", + "version": "VERSION_NAME", + "hierarchy": "HIERARCHY" + } + tmp_filled = anatomy.format_all(example_template_data) + thumbnail_result = tmp_filled["publish"]["thumbnail"] + if not thumbnail_result.solved: + missing_keys = thumbnail_result.missing_keys + invalid_types = thumbnail_result.invalid_types + submsg = "" + if missing_keys: + submsg += "Missing keys: {}".format(", ".join( + ["\"{}\"".format(key) for key in missing_keys] + )) + + if invalid_types: + items = [] + for key, value in invalid_types.items(): + items.append("{}{}".format(str(key), str(value))) + submsg += "Invalid types: {}".format(", ".join(items)) + + msg = ( + "Thumbnail Anatomy template expects more keys than action" + " can offer. {}" + ).format(submsg) + + self.log.warning(msg) + + return { + "success": False, + "message": msg + } + + thumbnail_template = anatomy.templates["publish"]["thumbnail"] + + self.db_con.install() + + for entity in entities: + # Skip if entity is not AssetVersion (never should happend, but..) + if entity.entity_type.lower() != "assetversion": + continue + + # Skip if AssetVersion don't have thumbnail + thumbnail_ent = entity["thumbnail"] + if thumbnail_ent is None: + self.log.debug(( + "Skipping. AssetVersion don't " + "have set thumbnail. {}" + ).format(entity["id"])) + continue + + avalon_ents_result = get_avalon_entities_for_assetversion( + entity, self.db_con + ) + version_full_path = ( + "Asset: \"{project_name}/{asset_path}\"" + " | Subset: \"{subset_name}\"" + " | Version: \"{version_name}\"" + ).format(**avalon_ents_result) + + version = avalon_ents_result["version"] + if not version: + self.log.warning(( + "AssetVersion does not have version in avalon. {}" + ).format(version_full_path)) + continue + + thumbnail_id = version["data"].get("thumbnail_id") + if thumbnail_id: + self.log.info(( + "AssetVersion skipped, already has thubmanil set. {}" + ).format(version_full_path)) + continue + + # Get thumbnail extension + file_ext = thumbnail_ent["file_type"] + if not file_ext.startswith("."): + file_ext = ".{}".format(file_ext) + + avalon_project = avalon_ents_result["project"] + avalon_asset = avalon_ents_result["asset"] + hierarchy = "" + parents = avalon_asset["data"].get("parents") or [] + if parents: + hierarchy = "/".join(parents) + + # Prepare anatomy template fill data + # 1. Create new id for thumbnail entity + thumbnail_id = ObjectId() + + template_data = { + "_id": str(thumbnail_id), + "thumbnail_root": existing_thumbnail_root, + "thumbnail_type": "thumbnail", + "ext": file_ext, + "project": { + "name": avalon_project["name"], + "code": avalon_project["data"].get("code") + }, + "asset": avalon_ents_result["asset_name"], + "subset": avalon_ents_result["subset_name"], + "version": avalon_ents_result["version_name"], + "hierarchy": hierarchy + } + + anatomy_filled = anatomy.format(template_data) + thumbnail_path = anatomy_filled["publish"]["thumbnail"] + thumbnail_path = thumbnail_path.replace("..", ".") + thumbnail_path = os.path.normpath(thumbnail_path) + + downloaded = False + for loc in (thumbnail_ent.get("component_locations") or []): + res_id = loc.get("resource_identifier") + if not res_id: + continue + + thubmnail_url = self.get_thumbnail_url(res_id) + if self.download_file(thubmnail_url, thumbnail_path): + downloaded = True + break + + if not downloaded: + self.log.warning( + "Could not download thumbnail for {}".format( + version_full_path + ) + ) + continue + + # Clean template data from keys that are dynamic + template_data.pop("_id") + template_data.pop("thumbnail_root") + + thumbnail_entity = { + "_id": thumbnail_id, + "type": "thumbnail", + "schema": "pype:thumbnail-1.0", + "data": { + "template": thumbnail_template, + "template_data": template_data + } + } + + # Create thumbnail entity + self.db_con.insert_one(thumbnail_entity) + self.log.debug( + "Creating entity in database {}".format(str(thumbnail_entity)) + ) + + # Set thumbnail id for version + self.db_con.update_one( + {"_id": version["_id"]}, + {"$set": {"data.thumbnail_id": thumbnail_id}} + ) + + return True + + def get_thumbnail_url(self, resource_identifier, size=None): + # TODO use ftrack_api method rather (find way how to use it) + url_string = ( + u'{url}/component/thumbnail?id={id}&username={username}' + u'&apiKey={apiKey}' + ) + url = url_string.format( + url=self.session.server_url, + id=resource_identifier, + username=self.session.api_user, + apiKey=self.session.api_key + ) + if size: + url += u'&size={0}'.format(size) + + return url + + def download_file(self, source_url, dst_file_path): + dir_path = os.path.dirname(dst_file_path) + try: + os.makedirs(dir_path) + except OSError as exc: + if exc.errno != errno.EEXIST: + self.log.warning( + "Could not create folder: \"{}\"".format(dir_path) + ) + return False + + self.log.debug( + "Downloading file \"{}\" -> \"{}\"".format( + source_url, dst_file_path + ) + ) + file_open = open(dst_file_path, "wb") + try: + file_open.write(requests.get(source_url).content) + except Exception: + self.log.warning( + "Download of image `{}` failed.".format(source_url) + ) + return False + finally: + file_open.close() + return True + + +def register(session, plugins_presets={}): + StoreThumbnailsToAvalon(session, plugins_presets).register() From 256cc85d86f819ebb05c2d7e949ae11ff2d44944 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 12 Feb 2020 16:41:31 +0100 Subject: [PATCH 214/434] fix(nk): removing deprecating code --- pype/lib.py | 67 ++--------------------------------------------------- 1 file changed, 2 insertions(+), 65 deletions(-) diff --git a/pype/lib.py b/pype/lib.py index f26395d930..2235efa2f4 100644 --- a/pype/lib.py +++ b/pype/lib.py @@ -361,23 +361,7 @@ def _get_host_name(): def get_asset(asset_name=None): - entity_data_keys_from_project_when_miss = [ - "frameStart", "frameEnd", "handleStart", "handleEnd", "fps", - "resolutionWidth", "resolutionHeight" - ] - - entity_keys_from_project_when_miss = [] - - alternatives = { - "handleStart": "handles", - "handleEnd": "handles" - } - - defaults = { - "handleStart": 0, - "handleEnd": 0 - } - + """ Returning asset document from database """ if not asset_name: asset_name = avalon.api.Session["AVALON_ASSET"] @@ -385,57 +369,10 @@ def get_asset(asset_name=None): "name": asset_name, "type": "asset" }) + if not asset_document: raise TypeError("Entity \"{}\" was not found in DB".format(asset_name)) - project_document = io.find_one({"type": "project"}) - - for key in entity_data_keys_from_project_when_miss: - if asset_document["data"].get(key): - continue - - value = project_document["data"].get(key) - if value is not None or key not in alternatives: - asset_document["data"][key] = value - continue - - alt_key = alternatives[key] - value = asset_document["data"].get(alt_key) - if value is not None: - asset_document["data"][key] = value - continue - - value = project_document["data"].get(alt_key) - if value: - asset_document["data"][key] = value - continue - - if key in defaults: - asset_document["data"][key] = defaults[key] - - for key in entity_keys_from_project_when_miss: - if asset_document.get(key): - continue - - value = project_document.get(key) - if value is not None or key not in alternatives: - asset_document[key] = value - continue - - alt_key = alternatives[key] - value = asset_document.get(alt_key) - if value: - asset_document[key] = value - continue - - value = project_document.get(alt_key) - if value: - asset_document[key] = value - continue - - if key in defaults: - asset_document[key] = defaults[key] - return asset_document From 9e13ac98ec34ac376a8f2e81f10722255b4597ab Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 12 Feb 2020 16:42:13 +0100 Subject: [PATCH 215/434] fix(nuke): cleanup and adding debug log --- pype/nuke/lib.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index c64dc0b828..6eb4da951c 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -519,11 +519,6 @@ class WorkfileSettings(object): self.data = kwargs def get_nodes(self, nodes=None, nodes_filter=None): - # filter out only dictionaries for node creation - # - # print("\n\n") - # pprint(self._nodes) - # if not isinstance(nodes, list) and not isinstance(nodes_filter, list): return [n for n in nuke.allNodes()] @@ -791,6 +786,8 @@ class WorkfileSettings(object): return data = self._asset_entity["data"] + log.debug("__ asset data: `{}`".format(data)) + missing_cols = [] check_cols = ["fps", "frameStart", "frameEnd", "handleStart", "handleEnd"] From e19f04ec8590f9aca24700b7a455a681e680ff8e Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 12 Feb 2020 17:14:34 +0100 Subject: [PATCH 216/434] fix(nk): multi line string needed to be added to str() --- pype/plugins/nuke/load/load_gizmo_ip.py | 4 ++-- pype/plugins/nuke/load/load_luts_ip.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pype/plugins/nuke/load/load_gizmo_ip.py b/pype/plugins/nuke/load/load_gizmo_ip.py index 23d7ef2f4a..5fecbc4c5c 100644 --- a/pype/plugins/nuke/load/load_gizmo_ip.py +++ b/pype/plugins/nuke/load/load_gizmo_ip.py @@ -176,8 +176,8 @@ class LoadGizmoInputProcess(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - msg = "Please create Viewer node before you " - "run this action again" + msg = str("Please create Viewer node before you " + "run this action again") self.log.error(msg) nuke.message(msg) return None diff --git a/pype/plugins/nuke/load/load_luts_ip.py b/pype/plugins/nuke/load/load_luts_ip.py index 2b38a9ff08..41cc6c1a43 100644 --- a/pype/plugins/nuke/load/load_luts_ip.py +++ b/pype/plugins/nuke/load/load_luts_ip.py @@ -276,8 +276,8 @@ class LoadLutsInputProcess(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - msg = "Please create Viewer node before you " - "run this action again" + msg = str("Please create Viewer node before you " + "run this action again") self.log.error(msg) nuke.message(msg) return None From bf35ee99f6e4c623d131aa7cc729e2f67f76e109 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 12 Feb 2020 17:45:54 +0100 Subject: [PATCH 217/434] add thumbnail to asset as well --- pype/ftrack/actions/action_store_thumbnails_to_avalon.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py index ff97534656..d63d3a6ae3 100644 --- a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py +++ b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py @@ -256,6 +256,11 @@ class StoreThumbnailsToAvalon(BaseAction): {"$set": {"data.thumbnail_id": thumbnail_id}} ) + self.db_con.update_one( + {"_id": avalon_asset["_id"]}, + {"$set": {"data.thumbnail_id": thumbnail_id}} + ) + return True def get_thumbnail_url(self, resource_identifier, size=None): From 5d8e2dc37fc618304268f49291a38b69740dec82 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 12 Feb 2020 18:00:59 +0100 Subject: [PATCH 218/434] fix(nk): swap `write` family for `render2d` --- pype/plugins/global/load/open_file.py | 2 +- .../global/publish/collect_filesequences.py | 12 +-- pype/plugins/global/publish/extract_jpeg.py | 93 ++++++++++--------- 3 files changed, 53 insertions(+), 54 deletions(-) diff --git a/pype/plugins/global/load/open_file.py b/pype/plugins/global/load/open_file.py index 9425eaab04..b496311e0c 100644 --- a/pype/plugins/global/load/open_file.py +++ b/pype/plugins/global/load/open_file.py @@ -18,7 +18,7 @@ def open(filepath): class Openfile(api.Loader): """Open Image Sequence with system default""" - families = ["write"] + families = ["render2d"] representations = ["*"] label = "Open" diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 6c06229304..8b42606e4a 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -211,12 +211,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): # Get family from the data families = data.get("families", ["render"]) - if "render" not in families: - families.append("render") if "ftrack" not in families: families.append("ftrack") - if "write" in instance_family: - families.append("write") + if families_data and "render2d" in families_data: + families.append("render2d") if families_data and "slate" in families_data: families.append("slate") @@ -334,7 +332,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): "stagingDir": root, "anatomy_template": "render", "fps": fps, - "tags": ["review"] if not baked_mov_path else [], + "tags": ["review"] if not baked_mov_path else ["thumb-nuke"], } instance.data["representations"].append( representation) @@ -388,8 +386,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): # If no start or end frame provided, get it from collection indices = list(collection.indexes) - start = data.get("frameStart", indices[0]) - end = data.get("frameEnd", indices[-1]) + start = int(data.get("frameStart", indices[0])) + end = int(data.get("frameEnd", indices[-1])) ext = list(collection)[0].split(".")[-1] diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 4978649ba2..7c0820ea28 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -19,7 +19,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): label = "Extract Jpeg EXR" hosts = ["shell"] order = pyblish.api.ExtractorOrder - families = ["imagesequence", "render", "write", "source"] + families = ["imagesequence", "render", "render2d", "source"] enabled = False def process(self, instance): @@ -41,62 +41,63 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): for repre in representations: self.log.debug(repre) - if 'review' not in repre['tags']: - return + if 'review' in repre['tags'] or "thumb-nuke" in repre['tags']: + if not isinstance(repre['files'], list): + return - input_file = repre['files'][0] + input_file = repre['files'][0] - # input_file = ( - # collections[0].format('{head}{padding}{tail}') % start - # ) - full_input_path = os.path.join(stagingdir, input_file) - self.log.info("input {}".format(full_input_path)) + # input_file = ( + # collections[0].format('{head}{padding}{tail}') % start + # ) + full_input_path = os.path.join(stagingdir, input_file) + self.log.info("input {}".format(full_input_path)) - filename = os.path.splitext(input_file)[0] - if not filename.endswith('.'): - filename += "." - jpeg_file = filename + "jpg" - full_output_path = os.path.join(stagingdir, jpeg_file) + filename = os.path.splitext(input_file)[0] + if not filename.endswith('.'): + filename += "." + jpeg_file = filename + "jpg" + full_output_path = os.path.join(stagingdir, jpeg_file) - self.log.info("output {}".format(full_output_path)) + self.log.info("output {}".format(full_output_path)) - config_data = instance.context.data['output_repre_config'] + config_data = instance.context.data['output_repre_config'] - proj_name = os.environ.get('AVALON_PROJECT', '__default__') - profile = config_data.get(proj_name, config_data['__default__']) + proj_name = os.environ.get('AVALON_PROJECT', '__default__') + profile = config_data.get(proj_name, config_data['__default__']) - jpeg_items = [] - jpeg_items.append( - os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) - # override file if already exists - jpeg_items.append("-y") - # use same input args like with mov - jpeg_items.extend(profile.get('input', [])) - # input file - jpeg_items.append("-i {}".format(full_input_path)) - # output file - jpeg_items.append(full_output_path) + jpeg_items = [] + jpeg_items.append( + os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) + # override file if already exists + jpeg_items.append("-y") + # use same input args like with mov + jpeg_items.extend(profile.get('input', [])) + # input file + jpeg_items.append("-i {}".format(full_input_path)) + # output file + jpeg_items.append(full_output_path) - subprocess_jpeg = " ".join(jpeg_items) + subprocess_jpeg = " ".join(jpeg_items) - # run subprocess - self.log.debug("{}".format(subprocess_jpeg)) - pype.api.subprocess(subprocess_jpeg) + # run subprocess + self.log.debug("{}".format(subprocess_jpeg)) + pype.api.subprocess(subprocess_jpeg) - if "representations" not in instance.data: - instance.data["representations"] = [] + if "representations" not in instance.data: + instance.data["representations"] = [] - representation = { - 'name': 'thumbnail', - 'ext': 'jpg', - 'files': jpeg_file, - "stagingDir": stagingdir, - "thumbnail": True, - "tags": ['thumbnail'] - } + representation = { + 'name': 'thumbnail', + 'ext': 'jpg', + 'files': jpeg_file, + "stagingDir": stagingdir, + "thumbnail": True, + "tags": ['thumbnail'] + } - # adding representation - self.log.debug("Adding: {}".format(representation)) - representations_new.append(representation) + # adding representation + self.log.debug("Adding: {}".format(representation)) + representations_new.append(representation) instance.data["representations"] = representations_new From 00e77d690d6e8c42999f0ec154c71f84b9dbe52b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 13 Feb 2020 11:36:55 +0100 Subject: [PATCH 219/434] added notelabellink to ignored entity types --- pype/ftrack/events/event_sync_to_avalon.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 49ac50c1db..708ae707e9 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -28,7 +28,7 @@ class SyncToAvalonEvent(BaseEvent): ignore_entTypes = [ "socialfeed", "socialnotification", "note", "assetversion", "job", "user", "reviewsessionobject", "timer", - "timelog", "auth_userrole", "appointment" + "timelog", "auth_userrole", "appointment", "notelabellink" ] ignore_ent_types = ["Milestone"] ignore_keys = ["statusid", "thumbid"] From 2ff72b5aeea0ce4c83e27b84f7da017733f7b489 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 13 Feb 2020 11:37:13 +0100 Subject: [PATCH 220/434] small cleanup in code --- pype/ftrack/events/event_sync_to_avalon.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 708ae707e9..643a3d793e 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -573,8 +573,7 @@ class SyncToAvalonEvent(BaseEvent): if auto_sync is not True: return True - debug_msg = "" - debug_msg += "Updated: {}".format(len(updated)) + debug_msg = "Updated: {}".format(len(updated)) debug_action_map = { "add": "Created", "remove": "Removed", From 5b1f33350b2d2b6d6d02d19919435aa73ef35c9a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 13 Feb 2020 11:37:43 +0100 Subject: [PATCH 221/434] added another bug report message when configuration id is not for specific entity --- pype/ftrack/events/event_sync_to_avalon.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 643a3d793e..c646756788 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -1544,6 +1544,14 @@ class SyncToAvalonEvent(BaseEvent): entity_type_conf_ids[entity_type] = configuration_id break + if not configuration_id: + self.log.warning( + "BUG REPORT: Missing configuration for `{} < {} >`".format( + entity_type, ent_info["entityType"] + ) + ) + continue + _entity_key = collections.OrderedDict({ "configuration_id": configuration_id, "entity_id": ftrack_id From bbe3ce3781cd4ff4fc5181e9237cc984c29c6836 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 13 Feb 2020 11:38:44 +0100 Subject: [PATCH 222/434] added debug logs for specific reason at this moment, to avoid ignoring entity types but to find out which we are using --- pype/ftrack/events/event_sync_to_avalon.py | 33 ++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index c646756788..345bc5b925 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -3,6 +3,7 @@ import collections import copy import queue import time +import datetime import atexit import traceback @@ -51,9 +52,36 @@ class SyncToAvalonEvent(BaseEvent): def __init__(self, session, plugins_presets={}): '''Expects a ftrack_api.Session instance''' + # Debug settings + # - time expiration in seconds + self.debug_print_time_expiration = 5 * 60 + # - store current time + self.debug_print_time = datetime.datetime.now() + # - store synchronize entity types to be able to use + # only entityTypes in interest instead of filtering by ignored + self.debug_sync_types = collections.defaultdict(list) + + # Set processing session to not use global self.set_process_session(session) super().__init__(session, plugins_presets) + def debug_logs(self): + """This is debug method for printing small debugs messages. """ + now_datetime = datetime.datetime.now() + delta = now_datetime - self.debug_print_time + if delta.total_seconds() < self.debug_print_time_expiration: + return + + self.debug_print_time = now_datetime + known_types_items = [] + for entityType, entity_type in self.debug_sync_types.items(): + known_types_items.append("{} <{}>".format(entity_type, entityType)) + + known_entityTypes = ", ".join(known_types_items) + self.log.debug( + "DEBUG MESSAGE: Known entityTypes {}".format(known_entityTypes) + ) + @property def cur_project(self): if self._cur_project is None: @@ -484,6 +512,9 @@ class SyncToAvalonEvent(BaseEvent): if not entity_type or entity_type in self.ignore_ent_types: continue + if entity_type not in self.debug_sync_types[entityType]: + self.debug_sync_types[entityType].append(entity_type) + action = ent_info["action"] ftrack_id = ent_info["entityId"] if isinstance(ftrack_id, list): @@ -633,6 +664,8 @@ class SyncToAvalonEvent(BaseEvent): self.ftrack_added = entities_by_action["add"] self.ftrack_updated = updated + self.debug_logs() + self.log.debug("Synchronization begins") try: time_1 = time.time() From 9fec5fa0e3f997e85d16ae5b83f3771c828a2de8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 13 Feb 2020 12:08:06 +0100 Subject: [PATCH 223/434] fixed messages --- pype/ftrack/events/event_sync_to_avalon.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 345bc5b925..53de588bcc 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -75,11 +75,14 @@ class SyncToAvalonEvent(BaseEvent): self.debug_print_time = now_datetime known_types_items = [] for entityType, entity_type in self.debug_sync_types.items(): - known_types_items.append("{} <{}>".format(entity_type, entityType)) + ent_types_msg = ", ".join(entity_type) + known_types_items.append( + "<{}> ({})".format(entityType, ent_types_msg) + ) known_entityTypes = ", ".join(known_types_items) self.log.debug( - "DEBUG MESSAGE: Known entityTypes {}".format(known_entityTypes) + "DEBUG MESSAGE: Known types {}".format(known_entityTypes) ) @property @@ -1603,7 +1606,7 @@ class SyncToAvalonEvent(BaseEvent): try: # Commit changes of mongo_id to empty string self.process_session.commit() - self.log.debug("Commititng unsetting") + self.log.debug("Committing unsetting") except Exception: self.process_session.rollback() # TODO logging From f12bb0f8597bcbad0862b5eac50963d225e1284b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 13 Feb 2020 12:08:26 +0100 Subject: [PATCH 224/434] fixed prints when credentials to event server are not valid --- pype/ftrack/ftrack_server/event_server_cli.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/pype/ftrack/ftrack_server/event_server_cli.py b/pype/ftrack/ftrack_server/event_server_cli.py index b09b0bc84e..cae037f2d9 100644 --- a/pype/ftrack/ftrack_server/event_server_cli.py +++ b/pype/ftrack/ftrack_server/event_server_cli.py @@ -63,10 +63,19 @@ def validate_credentials(url, user, api): ) session.close() except Exception as e: - print( - 'ERROR: Can\'t log into Ftrack with used credentials:' - ' Ftrack server: "{}" // Username: {} // API key: {}' - ).format(url, user, api) + print("Can't log into Ftrack with used credentials:") + ftrack_cred = { + "Ftrack server": str(url), + "Username": str(user), + "API key": str(api) + } + item_lens = [len(key) + 1 for key in ftrack_cred.keys()] + justify_len = max(*item_lens) + for key, value in ftrack_cred.items(): + print("{} {}".format( + (key + ":").ljust(justify_len, " "), + value + )) return False print('DEBUG: Credentials Username: "{}", API key: "{}" are valid.'.format( From aea05e2fe912c1a46d60625dc57d0f73ec009165 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 13 Feb 2020 12:09:12 +0100 Subject: [PATCH 225/434] fixed error message formatting --- pype/ftrack/lib/ftrack_base_handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/lib/ftrack_base_handler.py b/pype/ftrack/lib/ftrack_base_handler.py index 8329505ffb..f11cb020e9 100644 --- a/pype/ftrack/lib/ftrack_base_handler.py +++ b/pype/ftrack/lib/ftrack_base_handler.py @@ -49,7 +49,7 @@ class BaseHandler(object): ).format( str(type(session)), str(ftrack_api.session.Session), - str(session_processor.ProcessSession) + str(SocketSession) )) self._session = session From 75bff66ce21e88cd43f165f2355286882b0f4bf3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Fri, 14 Feb 2020 10:40:22 +0000 Subject: [PATCH 226/434] submit_publish_job.py edited online with Bitbucket --- pype/plugins/global/publish/submit_publish_job.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index a9fa8febd4..792fc05a38 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -166,6 +166,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "PYPE_STUDIO_PROJECTS_PATH", "PYPE_STUDIO_PROJECTS_MOUNT" ] + + deadline_pool = "" def _submit_deadline_post_job(self, instance, job): """ @@ -201,7 +203,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "JobDependency0": job["_id"], "UserName": job["Props"]["User"], "Comment": instance.context.data.get("comment", ""), - "Priority": job["Props"]["Pri"] + "Priority": job["Props"]["Pri"], + "Pool": self.deadline_pool }, "PluginInfo": { "Version": "3.6", From f6b91ed589f94da3c9e3989d1ce04b2aaa405122 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 17:06:50 +0100 Subject: [PATCH 227/434] credentials functions are not private and are ready to store credentials by host and user --- pype/ftrack/lib/credentials.py | 163 ++++++++++++++++++++++----------- 1 file changed, 109 insertions(+), 54 deletions(-) diff --git a/pype/ftrack/lib/credentials.py b/pype/ftrack/lib/credentials.py index 7e305942f2..16b1fb25fb 100644 --- a/pype/ftrack/lib/credentials.py +++ b/pype/ftrack/lib/credentials.py @@ -2,85 +2,140 @@ import os import json import ftrack_api import appdirs +import getpass +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse -config_path = os.path.normpath(appdirs.user_data_dir('pype-app', 'pype')) -action_file_name = 'ftrack_cred.json' -event_file_name = 'ftrack_event_cred.json' -action_fpath = os.path.join(config_path, action_file_name) -event_fpath = os.path.join(config_path, event_file_name) -folders = set([os.path.dirname(action_fpath), os.path.dirname(event_fpath)]) +CONFIG_PATH = os.path.normpath(appdirs.user_data_dir("pype-app", "pype")) +CREDENTIALS_FILE_NAME = "ftrack_cred.json" +CREDENTIALS_PATH = os.path.join(CONFIG_PATH, CREDENTIALS_FILE_NAME) +CREDENTIALS_FOLDER = os.path.dirname(CREDENTIALS_PATH) -for folder in folders: - if not os.path.isdir(folder): - os.makedirs(folder) +if not os.path.isdir(CREDENTIALS_FOLDER): + os.makedirs(CREDENTIALS_FOLDER) + +USER_GETTER = None -def _get_credentials(event=False): - if event: - fpath = event_fpath - else: - fpath = action_fpath +def get_ftrack_hostname(ftrack_server=None): + if not ftrack_server: + ftrack_server = os.environ["FTRACK_SERVER"] + if "//" not in ftrack_server: + ftrack_server = "//" + ftrack_server + + return urlparse(ftrack_server).hostname + + +def get_user(): + if USER_GETTER: + return USER_GETTER() + return getpass.getuser() + + +def get_credentials(ftrack_server=None, user=None): credentials = {} - try: - file = open(fpath, 'r') - credentials = json.load(file) - except Exception: - file = open(fpath, 'w') + if not os.path.exists(CREDENTIALS_PATH): + with open(CREDENTIALS_PATH, "w") as file: + file.write(json.dumps(credentials)) + file.close() + return credentials - file.close() + with open(CREDENTIALS_PATH, "r") as file: + content = file.read() + + hostname = get_ftrack_hostname(ftrack_server) + if not user: + user = get_user() + + content_json = json.loads(content or "{}") + credentials = content_json.get(hostname, {}).get(user) or {} return credentials -def _save_credentials(username, apiKey, event=False, auto_connect=None): - data = { - 'username': username, - 'apiKey': apiKey +def save_credentials(ft_user, ft_api_key, ftrack_server=None, user=None): + hostname = get_ftrack_hostname(ftrack_server) + if not user: + user = get_user() + + with open(CREDENTIALS_PATH, "r") as file: + content = file.read() + + content_json = json.loads(content or "{}") + if hostname not in content_json: + content_json[hostname] = {} + + content_json[hostname][user] = { + "username": ft_user, + "api_key": ft_api_key } - if event: - fpath = event_fpath - if auto_connect is None: - cred = _get_credentials(True) - auto_connect = cred.get('auto_connect', False) - data['auto_connect'] = auto_connect - else: - fpath = action_fpath + # Deprecated keys + if "username" in content_json: + content_json.pop("username") + if "apiKey" in content_json: + content_json.pop("apiKey") - file = open(fpath, 'w') - file.write(json.dumps(data)) - file.close() + with open(CREDENTIALS_PATH, "w") as file: + file.write(json.dumps(content_json, indent=4)) -def _clear_credentials(event=False): - if event: - fpath = event_fpath - else: - fpath = action_fpath - open(fpath, 'w').close() - _set_env(None, None) +def clear_credentials(ft_user=None, ftrack_server=None, user=None): + if not ft_user: + ft_user = os.environ.get("FTRACK_API_USER") + + if not ft_user: + return + + hostname = get_ftrack_hostname(ftrack_server) + if not user: + user = get_user() + + with open(CREDENTIALS_PATH, "r") as file: + content = file.read() + + content_json = json.loads(content or "{}") + if hostname not in content_json: + content_json[hostname] = {} + + content_json[hostname].pop(user, None) + + with open(CREDENTIALS_PATH, "w") as file: + file.write(json.dumps(content_json)) -def _set_env(username, apiKey): - if not username: - username = '' - if not apiKey: - apiKey = '' - os.environ['FTRACK_API_USER'] = username - os.environ['FTRACK_API_KEY'] = apiKey +def set_env(ft_user=None, ft_api_key=None): + os.environ["FTRACK_API_USER"] = ft_user or "" + os.environ["FTRACK_API_KEY"] = ft_api_key or "" -def _check_credentials(username=None, apiKey=None): +def get_env_credentials(): + return ( + os.environ.get("FTRACK_API_USER"), + os.environ.get("FTRACK_API_KEY") + ) - if username and apiKey: - _set_env(username, apiKey) + +def check_credentials(ft_user, ft_api_key, ftrack_server=None): + if not ftrack_server: + ftrack_server = os.environ["FTRACK_SERVER"] + + if not ft_user or not ft_api_key: + return False try: - session = ftrack_api.Session() + session = ftrack_api.Session( + server_url=ftrack_server, + api_key=ft_api_key, + api_user=ft_user + ) session.close() - except Exception as e: + + except Exception: return False return True From 0272d38c7eb98bb68341b1762b93f5da4571b695 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 17:09:07 +0100 Subject: [PATCH 228/434] lib init do not import all credentials functions but only credentials module --- pype/ftrack/lib/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/lib/__init__.py b/pype/ftrack/lib/__init__.py index eabfdf0d7d..9da3b819b3 100644 --- a/pype/ftrack/lib/__init__.py +++ b/pype/ftrack/lib/__init__.py @@ -1,5 +1,5 @@ from . import avalon_sync -from .credentials import * +from . import credentials from .ftrack_app_handler import * from .ftrack_event_handler import * from .ftrack_action_handler import * From 79245bcd00283fb8e424ce438e836af0b17eba70 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 17:10:50 +0100 Subject: [PATCH 229/434] user module can execute callbacks on username change --- pype/user/user_module.py | 16 +++++++++++++++- pype/user/widget_user.py | 2 +- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/pype/user/user_module.py b/pype/user/user_module.py index d70885b211..a43866f471 100644 --- a/pype/user/user_module.py +++ b/pype/user/user_module.py @@ -19,8 +19,8 @@ class UserModule: log = pype.Logger().get_logger("UserModule", "user") def __init__(self, main_parent=None, parent=None): + self._callbacks_on_user_change = [] self.cred = {} - self.cred_path = os.path.normpath(os.path.join( self.cred_folder_path, self.cred_filename )) @@ -28,6 +28,9 @@ class UserModule: self.load_credentials() + def register_callback_on_user_change(self, callback): + self._callbacks_on_user_change.append(callback) + def tray_start(self): """Store credentials to env and preset them to widget""" username = "" @@ -95,6 +98,17 @@ class UserModule: )) return self.save_credentials(getpass.getuser()) + def change_credentials(self, username): + self.save_credentials(username) + for callback in self._callbacks_on_user_change: + try: + callback() + except Exception: + self.log.warning( + "Failed to execute callback \"{}\".".format(str(callback)), + exc_info=True + ) + def save_credentials(self, username): """Save credentials to JSON file, env and widget""" if username is None: diff --git a/pype/user/widget_user.py b/pype/user/widget_user.py index 7ca12ec4d4..27faa857f5 100644 --- a/pype/user/widget_user.py +++ b/pype/user/widget_user.py @@ -77,7 +77,7 @@ class UserWidget(QtWidgets.QWidget): def click_save(self): # all what should happen - validations and saving into appsdir username = self.input_username.text() - self.module.save_credentials(username) + self.module.change_credentials(username) self._close_widget() def closeEvent(self, event): From ce5ad584dd405597272c0b592998cce7e9953ef8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 17:11:06 +0100 Subject: [PATCH 230/434] user module has get_user method to get currently set user --- pype/user/user_module.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pype/user/user_module.py b/pype/user/user_module.py index a43866f471..46ceb0031f 100644 --- a/pype/user/user_module.py +++ b/pype/user/user_module.py @@ -40,6 +40,9 @@ class UserModule: os.environ[self.env_name] = username self.widget_login.set_user(username) + def get_user(self): + return self.cred.get("username") or getpass.getuser() + def process_modules(self, modules): """ Gives ability to connect with imported modules from TrayManager. From 908a89f4ca2a0af681021f9fb8c86c7fd4723a93 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 17:11:44 +0100 Subject: [PATCH 231/434] ftrack module uses new credentials functions and has ability to change user on User module's user change --- pype/ftrack/tray/ftrack_module.py | 57 ++++++++++++++++++------------- pype/ftrack/tray/login_dialog.py | 12 +++---- 2 files changed, 40 insertions(+), 29 deletions(-) diff --git a/pype/ftrack/tray/ftrack_module.py b/pype/ftrack/tray/ftrack_module.py index 250872f239..5811209a02 100644 --- a/pype/ftrack/tray/ftrack_module.py +++ b/pype/ftrack/tray/ftrack_module.py @@ -34,29 +34,28 @@ class FtrackModule: def validate(self): validation = False - cred = credentials._get_credentials() - try: - if 'username' in cred and 'apiKey' in cred: - validation = credentials._check_credentials( - cred['username'], - cred['apiKey'] - ) - if validation is False: - self.show_login_widget() - else: - self.show_login_widget() - - except Exception as e: - log.error("We are unable to connect to Ftrack: {0}".format(e)) - - validation = credentials._check_credentials() - if validation is True: + cred = credentials.get_credentials() + ft_user = cred.get("username") + ft_api_key = cred.get("api_key") + validation = credentials.check_credentials(ft_user, ft_api_key) + if validation: + credentials.set_env(ft_user, ft_api_key) log.info("Connected to Ftrack successfully") self.loginChange() - else: - log.warning("Please sign in to Ftrack") - self.bool_logged = False - self.set_menu_visibility() + + return validation + + if not validation and ft_user and ft_api_key: + log.warning( + "Current Ftrack credentials are not valid. {}: {} - {}".format( + str(os.environ.get("FTRACK_SERVER")), ft_user, ft_api_key + ) + ) + + log.info("Please sign in to Ftrack") + self.bool_logged = False + self.show_login_widget() + self.set_menu_visibility() return validation @@ -67,7 +66,7 @@ class FtrackModule: self.start_action_server() def logout(self): - credentials._clear_credentials() + credentials.clear_credentials() self.stop_action_server() log.info("Logged out of Ftrack") @@ -307,11 +306,23 @@ class FtrackModule: except Exception as e: log.error("During Killing Timer event server: {0}".format(e)) + def changed_user(self): + self.stop_action_server() + credentials.set_env() + self.validate() + def process_modules(self, modules): if 'TimersManager' in modules: self.timer_manager = modules['TimersManager'] self.timer_manager.add_module(self) + if "UserModule" in modules: + credentials.USER_GETTER = modules["UserModule"].get_user + modules["UserModule"].register_callback_on_user_change( + self.changed_user + ) + + def start_timer_manager(self, data): if self.thread_timer is not None: self.thread_timer.ftrack_start_timer(data) @@ -336,7 +347,7 @@ class FtrackEventsThread(QtCore.QThread): def __init__(self, parent): super(FtrackEventsThread, self).__init__() - cred = credentials._get_credentials() + cred = credentials.get_credentials() self.username = cred['username'] self.user = None self.last_task = None diff --git a/pype/ftrack/tray/login_dialog.py b/pype/ftrack/tray/login_dialog.py index 4dcbec5ab3..5f3777f93e 100644 --- a/pype/ftrack/tray/login_dialog.py +++ b/pype/ftrack/tray/login_dialog.py @@ -204,11 +204,11 @@ class Login_Dialog_ui(QtWidgets.QWidget): self.setError("{0} {1}".format(msg, " and ".join(missing))) return - verification = credentials._check_credentials(username, apiKey) + verification = credentials.check_credentials(username, apiKey) if verification: - credentials._save_credentials(username, apiKey, self.is_event) - credentials._set_env(username, apiKey) + credentials.save_credentials(username, apiKey, self.is_event) + credentials.set_env(username, apiKey) if self.parent is not None: self.parent.loginChange() self._close_widget() @@ -304,11 +304,11 @@ class Login_Dialog_ui(QtWidgets.QWidget): self._login_server_thread.start(url) return - verification = credentials._check_credentials(username, apiKey) + verification = credentials.check_credentials(username, apiKey) if verification is True: - credentials._save_credentials(username, apiKey, self.is_event) - credentials._set_env(username, apiKey) + credentials.save_credentials(username, apiKey, self.is_event) + credentials.set_env(username, apiKey) if self.parent is not None: self.parent.loginChange() self._close_widget() From a7c4dffb42c78a096655efa50e6164e579584636 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 17:12:05 +0100 Subject: [PATCH 232/434] event server cli also uses new credentials functions --- pype/ftrack/ftrack_server/event_server_cli.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/ftrack/ftrack_server/event_server_cli.py b/pype/ftrack/ftrack_server/event_server_cli.py index b09b0bc84e..d889b6be23 100644 --- a/pype/ftrack/ftrack_server/event_server_cli.py +++ b/pype/ftrack/ftrack_server/event_server_cli.py @@ -446,9 +446,9 @@ def main(argv): event_paths = kwargs.ftrackeventpaths if not kwargs.noloadcred: - cred = credentials._get_credentials(True) + cred = credentials.get_credentials(ftrack_url) username = cred.get('username') - api_key = cred.get('apiKey') + api_key = cred.get('api_key') if kwargs.ftrackuser: username = kwargs.ftrackuser @@ -482,7 +482,7 @@ def main(argv): return 1 if kwargs.storecred: - credentials._save_credentials(username, api_key, True) + credentials.save_credentials(username, api_key, ftrack_url) # Set Ftrack environments os.environ["FTRACK_SERVER"] = ftrack_url From 4de7478d9dcf176eb349a52a20eddac76e0424e8 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 17:19:13 +0100 Subject: [PATCH 233/434] again remove collect templates --- .../global/publish/collect_templates.py | 119 ------------------ 1 file changed, 119 deletions(-) delete mode 100644 pype/plugins/global/publish/collect_templates.py diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py deleted file mode 100644 index 3104b5b705..0000000000 --- a/pype/plugins/global/publish/collect_templates.py +++ /dev/null @@ -1,119 +0,0 @@ -""" -Requires: - session -> AVALON_PROJECT - context -> anatomy (pypeapp.Anatomy) - instance -> subset - instance -> asset - instance -> family - -Provides: - instance -> template - instance -> assumedTemplateData - instance -> assumedDestination -""" - -import os - -from avalon import io, api -import pyblish.api - - -class CollectTemplates(pyblish.api.InstancePlugin): - """Fill templates with data needed for publish""" - - order = pyblish.api.CollectorOrder + 0.1 - label = "Collect and fill Templates" - hosts = ["maya", "nuke", "standalonepublisher"] - - def process(self, instance): - # get all the stuff from the database - subset_name = instance.data["subset"] - asset_name = instance.data["asset"] - project_name = api.Session["AVALON_PROJECT"] - - project = io.find_one( - { - "type": "project", - "name": project_name - }, - projection={"config": True, "data": True} - ) - - template = project["config"]["template"]["publish"] - anatomy = instance.context.data['anatomy'] - - asset = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project["_id"] - }) - - assert asset, ("No asset found by the name '{}' " - "in project '{}'".format(asset_name, project_name)) - silo = asset.get('silo') - - subset = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset["_id"] - }) - - # assume there is no version yet, we start at `1` - version = None - version_number = 1 - if subset is not None: - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) - - # if there is a subset there ought to be version - if version is not None: - version_number += int(version["name"]) - - hierarchy = asset['data']['parents'] - if hierarchy: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = os.path.join(*hierarchy) - else: - hierarchy = "" - - template_data = {"root": api.Session["AVALON_PROJECTS"], - "project": {"name": project_name, - "code": project['data']['code']}, - "silo": silo, - "family": instance.data['family'], - "asset": asset_name, - "subset": subset_name, - "version": version_number, - "hierarchy": hierarchy.replace("\\", "/"), - "representation": "TEMP"} - - # Add datetime data to template data - datetime_data = instance.context.data.get("datetimeData") or {} - template_data.update(datetime_data) - - resolution_width = instance.data.get("resolutionWidth") - resolution_height = instance.data.get("resolutionHeight") - fps = instance.data.get("fps") - - if resolution_width: - template_data["resolution_width"] = resolution_width - if resolution_width: - template_data["resolution_height"] = resolution_height - if resolution_width: - template_data["fps"] = fps - - instance.data["template"] = template - instance.data["assumedTemplateData"] = template_data - - # We take the parent folder of representation 'filepath' - instance.data["assumedDestination"] = os.path.dirname( - (anatomy.format(template_data))["publish"]["path"] - ) - self.log.info("Assumed Destination has been created...") - self.log.debug("__ assumedTemplateData: `{}`".format(instance.data["assumedTemplateData"])) - self.log.debug("__ template: `{}`".format(instance.data["template"])) From 0618b7a85ff9767ac6f5d4eaf3f58bd72f2b433c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 17:20:04 +0100 Subject: [PATCH 234/434] fix order --- pype/plugins/global/publish/collect_resources_path.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_resources_path.py b/pype/plugins/global/publish/collect_resources_path.py index 9fc8c576f5..734d1f84e4 100644 --- a/pype/plugins/global/publish/collect_resources_path.py +++ b/pype/plugins/global/publish/collect_resources_path.py @@ -19,7 +19,7 @@ class CollectResourcesPath(pyblish.api.InstancePlugin): """Generate directory path where the files and resources will be stored""" label = "Collect Resources Path" - order = pyblish.api.CollectorOrder + 0.995 + order = pyblish.api.CollectorOrder + 0.495 def process(self, instance): anatomy = instance.context.data["anatomy"] From a7ca458e4ee1550859fee03f84592aea9615947e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 17:24:56 +0100 Subject: [PATCH 235/434] collect scene has publish set to True by default --- pype/plugins/maya/publish/collect_scene.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/maya/publish/collect_scene.py b/pype/plugins/maya/publish/collect_scene.py index f2fbb4d623..089019f2d3 100644 --- a/pype/plugins/maya/publish/collect_scene.py +++ b/pype/plugins/maya/publish/collect_scene.py @@ -35,7 +35,7 @@ class CollectMayaScene(pyblish.api.ContextPlugin): "subset": subset, "asset": os.getenv("AVALON_ASSET", None), "label": subset, - "publish": False, + "publish": True, "family": 'workfile', "families": ['workfile'], "setMembers": [current_file] From 377513f01f77c49d656f152157a1245e63e3bab6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 17:25:25 +0100 Subject: [PATCH 236/434] removed locations from version --- pype/plugins/global/publish/integrate_new.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index d27582bb71..8735f8fed7 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -168,14 +168,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if version_data_instance: version_data.update(version_data_instance) - # TODO remove avalon_location (shall we?) - avalon_location = api.Session["AVALON_LOCATION"] # TODO rename method from `create_version` to # `prepare_version` or similar... version = self.create_version( subset=subset, version_number=version_number, - locations=[avalon_location], data=version_data ) @@ -528,26 +525,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): return subset - def create_version(self, subset, version_number, locations, data=None): + def create_version(self, subset, version_number, data=None): """ Copy given source to destination Args: subset (dict): the registered subset of the asset version_number (int): the version number - locations (list): the currently registered locations Returns: dict: collection of data to create a version """ - # Imprint currently registered location - version_locations = [location for location in locations if - location is not None] return {"schema": "pype:version-3.0", "type": "version", "parent": subset["_id"], "name": version_number, - "locations": version_locations, "data": data} def create_version_data(self, context, instance): From 3d1e231a0db9f075eb7b6157cb99665f285e34e1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 17:41:24 +0100 Subject: [PATCH 237/434] added job to store thumbnails to avalon action --- .../action_store_thumbnails_to_avalon.py | 52 +++++++++++++++---- 1 file changed, 42 insertions(+), 10 deletions(-) diff --git a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py index d63d3a6ae3..ce0dfeb244 100644 --- a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py +++ b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py @@ -1,6 +1,7 @@ import os import requests import errno +import json from bson.objectid import ObjectId from pype.ftrack import BaseAction @@ -41,13 +42,30 @@ class StoreThumbnailsToAvalon(BaseAction): # DEBUG LINE # root_path = r"C:\Users\jakub.trllo\Desktop\Tests\ftrack_thumbnails" + user = session.query( + "User where username is '{0}'".format(session.api_user) + ).one() + action_job = session.create("Job", { + "user": user, + "status": "running", + "data": json.dumps({ + "description": "Storing thumbnails to avalon." + }) + }) + session.commit() + thumbnail_roots = os.environ.get(self.thumbnail_key) if not thumbnail_roots: + msg = "`{}` environment is not set".format(self.thumbnail_key) + + action_job["status"] = "failed" + session.commit() + + self.log.warning(msg) + return { "success": False, - "message": "`{}` environment is not set".format( - self.thumbnail_key - ) + "message": msg } existing_thumbnail_root = None @@ -57,11 +75,18 @@ class StoreThumbnailsToAvalon(BaseAction): break if existing_thumbnail_root is None: + msg = ( + "Can't access paths, set in `{}` ({})" + ).format(self.thumbnail_key, thumbnail_roots) + + action_job["status"] = "failed" + session.commit() + + self.log.warning(msg) + return { "success": False, - "message": ( - "Can't access paths, set in `{}` ({})" - ).format(self.thumbnail_key, thumbnail_roots) + "message": msg } project = get_project_from_entity(entities[0]) @@ -71,6 +96,9 @@ class StoreThumbnailsToAvalon(BaseAction): if "publish" not in anatomy.templates: msg = "Anatomy does not have set publish key!" + action_job["status"] = "failed" + session.commit() + self.log.warning(msg) return { @@ -84,6 +112,9 @@ class StoreThumbnailsToAvalon(BaseAction): " template in Antomy for project \"{}\"" ).format(project_name) + action_job["status"] = "failed" + session.commit() + self.log.warning(msg) return { @@ -127,6 +158,9 @@ class StoreThumbnailsToAvalon(BaseAction): " can offer. {}" ).format(submsg) + action_job["status"] = "failed" + session.commit() + self.log.warning(msg) return { @@ -256,10 +290,8 @@ class StoreThumbnailsToAvalon(BaseAction): {"$set": {"data.thumbnail_id": thumbnail_id}} ) - self.db_con.update_one( - {"_id": avalon_asset["_id"]}, - {"$set": {"data.thumbnail_id": thumbnail_id}} - ) + action_job["status"] = "done" + session.commit() return True From 5290f6dd58de1abf78be75ab54c949c84972ae83 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 18:09:03 +0100 Subject: [PATCH 238/434] fix arguments appending --- pype/scripts/otio_burnin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 590939df56..fd3c51816a 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -432,7 +432,7 @@ def burnins_from_data( if not value.startswith(TIME_CODE_KEY): value_items = value.split(TIME_CODE_KEY) text = value_items[0].format(**data) - args.append(value_items[0]) + args.append(text) burnin.add_timecode(*args) continue From feb2037c0259dba1fa5b130dd66da8655571ec6d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 18:09:42 +0100 Subject: [PATCH 239/434] excahnge timecode and text keys in arguments --- pype/scripts/otio_burnin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index fd3c51816a..1d00a08521 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -34,7 +34,7 @@ DRAWTEXT = ( "%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'" ) TIMECODE = ( - "drawtext=text=\\'%(text)s\\':timecode=\\'%(timecode)s\\'" + "drawtext=timecode=\\'%(timecode)s\\':text=\\'%(text)s\\'" ":timecode_rate=%(fps).2f:x=%(x)s:y=%(y)s:fontcolor=" "%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'" ) From 773fbf106a89f6d901addf543dda849d86f8ae1d Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 14 Feb 2020 18:23:37 +0100 Subject: [PATCH 240/434] ftrack server won't raise exception if there are any event handlers to register --- pype/ftrack/ftrack_server/ftrack_server.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pype/ftrack/ftrack_server/ftrack_server.py b/pype/ftrack/ftrack_server/ftrack_server.py index eebc3f6ec4..8464203c1d 100644 --- a/pype/ftrack/ftrack_server/ftrack_server.py +++ b/pype/ftrack/ftrack_server/ftrack_server.py @@ -100,9 +100,9 @@ class FtrackServer: log.warning(msg, exc_info=e) if len(register_functions_dict) < 1: - raise Exception(( - "There are no events with register function." - " Registered paths: \"{}\"" + log.warning(( + "There are no events with `register` function" + " in registered paths: \"{}\"" ).format("| ".join(paths))) # Load presets for setting plugins @@ -122,7 +122,7 @@ class FtrackServer: else: register(self.session, plugins_presets=plugins_presets) - if function_counter%7 == 0: + if function_counter % 7 == 0: time.sleep(0.1) function_counter += 1 except Exception as exc: From 6a5d43a4790d9156c814a88457f92aa00e7cfb65 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Sat, 15 Feb 2020 11:06:49 +0100 Subject: [PATCH 241/434] `get_path_to_ffmpeg` replaced with `get_paths_from_environ` and `get_ffmpeg_tool_path` --- pype/lib.py | 61 +++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 54 insertions(+), 7 deletions(-) diff --git a/pype/lib.py b/pype/lib.py index 9cde3bd3af..87b9facdb3 100644 --- a/pype/lib.py +++ b/pype/lib.py @@ -13,14 +13,61 @@ import avalon log = logging.getLogger(__name__) -def get_path_to_ffmpeg(): - paths = os.environ.get("FFMPEG_PATH") or "" +def get_paths_from_environ(env_key, return_first=False): + """Return existing paths from specific envirnment variable. + + :param env_key: Environment key where should look for paths. + :type env_key: str + :param return_first: Return first path on `True`, list of all on `False`. + :type return_first: boolean + + Difference when none of paths exists: + - when `return_first` is set to `False` then function returns empty list. + - when `return_first` is set to `True` then function returns `None`. + """ + + existing_paths = [] + paths = os.environ.get(env_key) or "" path_items = paths.split(os.pathsep) - for item in path_items: - item = os.path.normpath(item) - if os.path.exists(item): - return item - return "" + for path in path_items: + # Skip empty string + if not path: + continue + # Normalize path + path = os.path.normpath(path) + # Check if path exists + if os.path.exists(path): + # Return path if `return_first` is set to True + if return_first: + return path + # Store path + existing_paths.append(path) + + # Return None if none of paths exists + if return_first: + return None + # Return all existing paths from environment variable + return existing_paths + + +def get_ffmpeg_tool_path(tool="ffmpeg"): + """Find path to ffmpeg tool in FFMPEG_PATH paths. + + Function looks for tool in paths set in FFMPEG_PATH environment. If tool + exists then returns it's full path. + + Returns tool name itself when tool path was not found. (FFmpeg path may be + set in PATH environment variable) + """ + + dir_paths = get_paths_from_environ("FFMPEG_PATH") + for dir_path in dir_paths: + for file_name in os.listdir(dir_path): + base, ext = os.path.splitext(file_name) + if base.lower() == tool.lower(): + return os.path.join(dir_path, tool) + return tool + # Special naming case for subprocess since its a built-in method. def _subprocess(*args, **kwargs): From 6efb1b0a74572df70179ff0993fe68c86a372520 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Sat, 15 Feb 2020 11:08:39 +0100 Subject: [PATCH 242/434] use new functions in code --- pype/plugins/global/publish/extract_jpeg.py | 5 +++-- pype/plugins/global/publish/extract_review.py | 7 ++++--- pype/plugins/global/publish/extract_review_slate.py | 2 +- .../global/publish/validate_ffmpeg_installed.py | 9 +++------ .../standalonepublisher/publish/extract_review.py | 7 +------ .../publish/extract_thumbnail.py | 6 +----- pype/scripts/otio_burnin.py | 13 +++++-------- pype/standalonepublish/widgets/widget_drop_frame.py | 7 +------ 8 files changed, 19 insertions(+), 37 deletions(-) diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 6a0d7905b0..803ce51000 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -66,9 +66,10 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): proj_name = os.environ.get('AVALON_PROJECT', '__default__') profile = config_data.get(proj_name, config_data['__default__']) + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + jpeg_items = [] - jpeg_items.append( - os.path.join(pype.lib.get_path_to_ffmpeg(), "ffmpeg")) + jpeg_items.append(ffmpeg_path) # override file if already exists jpeg_items.append("-y") # use same input args like with mov diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 5895a8c423..cdec90cb3d 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -311,10 +311,11 @@ class ExtractReview(pyblish.api.InstancePlugin): "creating dir: {}".format(stg_dir)) os.mkdir(stg_dir) + ffmpeg_path = ( + pype.lib.get_ffmpeg_tool_path("ffmpeg") + ) mov_args = [ - os.path.join( - pype.lib.get_path_to_ffmpeg(), "ffmpeg" - ), + ffmpeg_path, " ".join(input_args), " ".join(output_args) ] diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 5d4990a027..699ed4a5eb 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -22,7 +22,7 @@ class ExtractReviewSlate(pype.api.Extractor): suffix = "_slate" slate_path = inst_data.get("slateFrame") - ffmpeg_path = os.path.join(pype.lib.get_path_to_ffmpeg(), "ffmpeg") + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") to_width = 1920 to_height = 1080 diff --git a/pype/plugins/global/publish/validate_ffmpeg_installed.py b/pype/plugins/global/publish/validate_ffmpeg_installed.py index 643e0f1821..40006789f7 100644 --- a/pype/plugins/global/publish/validate_ffmpeg_installed.py +++ b/pype/plugins/global/publish/validate_ffmpeg_installed.py @@ -28,11 +28,8 @@ class ValidateFfmpegInstallef(pyblish.api.Validator): return True def process(self, instance): - self.log.info("ffmpeg path: `{}`".format( - pype.lib.get_path_to_ffmpeg() - )) - if self.is_tool( - os.path.join( - pype.lib.get_path_to_ffmpeg(), "ffmpeg")) is False: + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + self.log.info("ffmpeg path: `{}`".format(ffmpeg_path)) + if self.is_tool(ffmpeg_path) is False: self.log.error("ffmpeg not found in PATH") raise RuntimeError('ffmpeg not installed.') diff --git a/pype/plugins/standalonepublisher/publish/extract_review.py b/pype/plugins/standalonepublisher/publish/extract_review.py index 29e1fcaac0..66cdcdf4df 100644 --- a/pype/plugins/standalonepublisher/publish/extract_review.py +++ b/pype/plugins/standalonepublisher/publish/extract_review.py @@ -149,12 +149,7 @@ class ExtractReviewSP(pyblish.api.InstancePlugin): # output filename output_args.append(full_output_path) - ffmpeg_path = pype.lib.get_path_to_ffmpeg() - if ffmpeg_path: - ffmpeg_path += "/ffmpeg" - else: - ffmpeg_path = "ffmpeg" - + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") mov_args = [ ffmpeg_path, " ".join(input_args), diff --git a/pype/plugins/standalonepublisher/publish/extract_thumbnail.py b/pype/plugins/standalonepublisher/publish/extract_thumbnail.py index b752419a35..daa3936359 100644 --- a/pype/plugins/standalonepublisher/publish/extract_thumbnail.py +++ b/pype/plugins/standalonepublisher/publish/extract_thumbnail.py @@ -74,11 +74,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin): config_data.get("__default__", {}) ) - ffmpeg_path = pype.lib.get_path_to_ffmpeg() - if ffmpeg_path: - ffmpeg_path += "/ffmpeg" - else: - ffmpeg_path = "ffmpeg" + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") jpeg_items = [] jpeg_items.append(ffmpeg_path) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 1d589916e9..1da254adb1 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -12,20 +12,17 @@ import pype.lib log = pype.Logger().get_logger("BurninWrapper", "burninwrap") -ffmpeg_path = pype.lib.get_path_to_ffmpeg() -if ffmpeg_path and os.path.exists(ffmpeg_path): - # add separator "/" or "\" to be prepared for next part - ffmpeg_path += os.path.sep -else: - ffmpeg_path = "" +ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") +ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") + FFMPEG = ( '{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s' -).format(os.path.normpath(ffmpeg_path + "ffmpeg")) +).format(ffmpeg_path) FFPROBE = ( '{} -v quiet -print_format json -show_format -show_streams %(source)s' -).format(os.path.normpath(ffmpeg_path + "ffprobe")) +).format(ffprobe_path) def _streams(source): diff --git a/pype/standalonepublish/widgets/widget_drop_frame.py b/pype/standalonepublish/widgets/widget_drop_frame.py index aa3335fb78..c85105a333 100644 --- a/pype/standalonepublish/widgets/widget_drop_frame.py +++ b/pype/standalonepublish/widgets/widget_drop_frame.py @@ -225,12 +225,7 @@ class DropDataFrame(QtWidgets.QFrame): self._process_data(data) def load_data_with_probe(self, filepath): - ffprobe_path = pype.lib.get_path_to_ffmpeg() - if ffprobe_path: - ffprobe_path += '/ffprobe' - else: - ffprobe_path = 'ffprobe' - + ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") args = [ ffprobe_path, '-v', 'quiet', From c0af43dafecb0d1c0956b78cd3b0d8164c533dda Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Sat, 15 Feb 2020 11:09:24 +0100 Subject: [PATCH 243/434] fix typo in class name `ValidateFfmpegInstallef` --- pype/plugins/global/publish/validate_ffmpeg_installed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/validate_ffmpeg_installed.py b/pype/plugins/global/publish/validate_ffmpeg_installed.py index 40006789f7..f6738e6de1 100644 --- a/pype/plugins/global/publish/validate_ffmpeg_installed.py +++ b/pype/plugins/global/publish/validate_ffmpeg_installed.py @@ -8,7 +8,7 @@ except ImportError: import errno -class ValidateFfmpegInstallef(pyblish.api.Validator): +class ValidateFFmpegInstalled(pyblish.api.Validator): """Validate availability of ffmpeg tool in PATH""" order = pyblish.api.ValidatorOrder From d1372fa25fb2c3fd5c2ccdbc101db73aaf8c74bf Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Sat, 15 Feb 2020 11:36:27 +0100 Subject: [PATCH 244/434] reversed logic of extract review conditions which don't have else statement and maked bigger indentation --- pype/plugins/global/publish/extract_review.py | 632 +++++++++--------- 1 file changed, 320 insertions(+), 312 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 2e79d86c38..4d63e2c641 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -43,320 +43,328 @@ class ExtractReview(pyblish.api.InstancePlugin): # filter out mov and img sequences representations_new = representations[:] for repre in representations: - if repre['ext'] in self.ext_filter: - tags = repre.get("tags", []) - - if "thumbnail" in tags: - continue - - self.log.info("Try repre: {}".format(repre)) - - if "review" in tags: - staging_dir = repre["stagingDir"] - - # iterating preset output profiles - for name, profile in output_profiles.items(): - repre_new = repre.copy() - ext = profile.get("ext", None) - p_tags = profile.get('tags', []) - self.log.info("p_tags: `{}`".format(p_tags)) - - # adding control for presets to be sequence - # or single file - is_sequence = ("sequence" in p_tags) and (ext in ( - "png", "jpg", "jpeg")) - - self.log.debug("Profile name: {}".format(name)) - - if not ext: - ext = "mov" - self.log.warning( - str("`ext` attribute not in output " - "profile. Setting to default ext: `mov`")) - - self.log.debug( - "instance.families: {}".format( - instance.data['families'])) - self.log.debug( - "profile.families: {}".format(profile['families'])) - - if any(item in instance.data['families'] for item in profile['families']): - if isinstance(repre["files"], list): - collections, remainder = clique.assemble( - repre["files"]) - - full_input_path = os.path.join( - staging_dir, collections[0].format( - '{head}{padding}{tail}') - ) - - filename = collections[0].format('{head}') - if filename.endswith('.'): - filename = filename[:-1] - else: - full_input_path = os.path.join( - staging_dir, repre["files"]) - filename = repre["files"].split(".")[0] - - repr_file = filename + "_{0}.{1}".format(name, ext) - full_output_path = os.path.join( - staging_dir, repr_file) - - if is_sequence: - filename_base = filename + "_{0}".format(name) - repr_file = filename_base + ".%08d.{0}".format( - ext) - repre_new["sequence_file"] = repr_file - full_output_path = os.path.join( - staging_dir, filename_base, repr_file) - - self.log.info("input {}".format(full_input_path)) - self.log.info("output {}".format(full_output_path)) - - new_tags = [x for x in tags if x != "delete"] - - # add families - [instance.data["families"].append(t) - for t in p_tags - if t not in instance.data["families"]] - - # add to - [new_tags.append(t) for t in p_tags - if t not in new_tags] - - self.log.info("new_tags: `{}`".format(new_tags)) - - input_args = [] - - # overrides output file - input_args.append("-y") - - # preset's input data - input_args.extend(profile.get('input', [])) - - # necessary input data - # adds start arg only if image sequence - if isinstance(repre["files"], list): - input_args.append( - "-start_number {0} -framerate {1}".format( - start_frame, fps)) - - input_args.append("-i {}".format(full_input_path)) - - for audio in instance.data.get("audio", []): - offset_frames = ( - instance.data.get("startFrameReview") - - audio["offset"] - ) - offset_seconds = offset_frames / fps - - if offset_seconds > 0: - input_args.append("-ss") - else: - input_args.append("-itsoffset") - - input_args.append(str(abs(offset_seconds))) - - input_args.extend( - ["-i", audio["filename"]] - ) - - # Need to merge audio if there are more - # than 1 input. - if len(instance.data["audio"]) > 1: - input_args.extend( - [ - "-filter_complex", - "amerge", - "-ac", - "2" - ] - ) - - output_args = [] - codec_args = profile.get('codec', []) - output_args.extend(codec_args) - # preset's output data - output_args.extend(profile.get('output', [])) - - # defining image ratios - resolution_ratio = float(resolution_width / ( - resolution_height * pixel_aspect)) - delivery_ratio = float(to_width) / float(to_height) - self.log.debug(resolution_ratio) - self.log.debug(delivery_ratio) - - # get scale factor - scale_factor = to_height / ( - resolution_height * pixel_aspect) - self.log.debug(scale_factor) - - # letter_box - lb = profile.get('letter_box', 0) - if lb != 0: - ffmpet_width = to_width - ffmpet_height = to_height - if "reformat" not in p_tags: - lb /= pixel_aspect - if resolution_ratio != delivery_ratio: - ffmpet_width = resolution_width - ffmpet_height = int( - resolution_height * pixel_aspect) - else: - if resolution_ratio != delivery_ratio: - lb /= scale_factor - else: - lb /= pixel_aspect - - output_args.append(str( - "-filter:v scale={0}x{1}:flags=lanczos," - "setsar=1,drawbox=0:0:iw:" - "round((ih-(iw*(1/{2})))/2):t=fill:" - "c=black,drawbox=0:ih-round((ih-(iw*(" - "1/{2})))/2):iw:round((ih-(iw*(1/{2})))" - "/2):t=fill:c=black").format( - ffmpet_width, ffmpet_height, lb)) - - # In case audio is longer than video. - output_args.append("-shortest") - - # output filename - output_args.append(full_output_path) - - self.log.debug( - "__ pixel_aspect: `{}`".format(pixel_aspect)) - self.log.debug( - "__ resolution_width: `{}`".format( - resolution_width)) - self.log.debug( - "__ resolution_height: `{}`".format( - resolution_height)) - - # scaling none square pixels and 1920 width - if "reformat" in p_tags: - if resolution_ratio < delivery_ratio: - self.log.debug("lower then delivery") - width_scale = int(to_width * scale_factor) - width_half_pad = int(( - to_width - width_scale)/2) - height_scale = to_height - height_half_pad = 0 - else: - self.log.debug("heigher then delivery") - width_scale = to_width - width_half_pad = 0 - scale_factor = float(to_width) / float( - resolution_width) - self.log.debug(scale_factor) - height_scale = int( - resolution_height * scale_factor) - height_half_pad = int( - (to_height - height_scale)/2) - - self.log.debug( - "__ width_scale: `{}`".format(width_scale)) - self.log.debug( - "__ width_half_pad: `{}`".format( - width_half_pad)) - self.log.debug( - "__ height_scale: `{}`".format( - height_scale)) - self.log.debug( - "__ height_half_pad: `{}`".format( - height_half_pad)) - - scaling_arg = str( - "scale={0}x{1}:flags=lanczos," - "pad={2}:{3}:{4}:{5}:black,setsar=1" - ).format(width_scale, height_scale, - to_width, to_height, - width_half_pad, - height_half_pad - ) - - vf_back = self.add_video_filter_args( - output_args, scaling_arg) - # add it to output_args - output_args.insert(0, vf_back) - - # baking lut file application - lut_path = instance.data.get("lutPath") - if lut_path and ("bake-lut" in p_tags): - # removing Gama info as it is all baked in lut - gamma = next((g for g in input_args - if "-gamma" in g), None) - if gamma: - input_args.remove(gamma) - - # create lut argument - lut_arg = "lut3d=file='{}'".format( - lut_path.replace( - "\\", "/").replace(":/", "\\:/") - ) - lut_arg += ",colormatrix=bt601:bt709" - - vf_back = self.add_video_filter_args( - output_args, lut_arg) - # add it to output_args - output_args.insert(0, vf_back) - self.log.info("Added Lut to ffmpeg command") - self.log.debug( - "_ output_args: `{}`".format(output_args)) - - if is_sequence: - stg_dir = os.path.dirname(full_output_path) - - if not os.path.exists(stg_dir): - self.log.debug( - "creating dir: {}".format(stg_dir)) - os.mkdir(stg_dir) - - mov_args = [ - os.path.join( - os.environ.get( - "FFMPEG_PATH", - ""), "ffmpeg"), - " ".join(input_args), - " ".join(output_args) - ] - subprcs_cmd = " ".join(mov_args) - - # run subprocess - self.log.debug("Executing: {}".format(subprcs_cmd)) - output = pype.api.subprocess(subprcs_cmd) - self.log.debug("Output: {}".format(output)) - - # create representation data - repre_new.update({ - 'name': name, - 'ext': ext, - 'files': repr_file, - "tags": new_tags, - "outputName": name, - "codec": codec_args, - "_profile": profile, - "resolutionHeight": resolution_height, - "resolutionWidth": resolution_width, - }) - if is_sequence: - repre_new.update({ - "stagingDir": stg_dir, - "files": os.listdir(stg_dir) - }) - - if repre_new.get('preview'): - repre_new.pop("preview") - if repre_new.get('thumbnail'): - repre_new.pop("thumbnail") - - # adding representation - self.log.debug("Adding: {}".format(repre_new)) - representations_new.append(repre_new) - else: - continue - else: + if repre['ext'] not in self.ext_filter: continue + tags = repre.get("tags", []) + + if "thumbnail" in tags: + continue + + self.log.info("Try repre: {}".format(repre)) + + if "review" not in tags: + continue + + staging_dir = repre["stagingDir"] + + # iterating preset output profiles + for name, profile in output_profiles.items(): + repre_new = repre.copy() + ext = profile.get("ext", None) + p_tags = profile.get('tags', []) + self.log.info("p_tags: `{}`".format(p_tags)) + + # adding control for presets to be sequence + # or single file + is_sequence = ("sequence" in p_tags) and (ext in ( + "png", "jpg", "jpeg")) + + self.log.debug("Profile name: {}".format(name)) + + if not ext: + ext = "mov" + self.log.warning( + str("`ext` attribute not in output " + "profile. Setting to default ext: `mov`")) + + self.log.debug( + "instance.families: {}".format( + instance.data['families'])) + self.log.debug( + "profile.families: {}".format(profile['families'])) + + profile_family_check = False + for _family in profile['families']: + if _family in instance.data['families']: + profile_family_check = True + break + + if not profile_family_check: + continue + + if isinstance(repre["files"], list): + collections, remainder = clique.assemble( + repre["files"]) + + full_input_path = os.path.join( + staging_dir, collections[0].format( + '{head}{padding}{tail}') + ) + + filename = collections[0].format('{head}') + if filename.endswith('.'): + filename = filename[:-1] + else: + full_input_path = os.path.join( + staging_dir, repre["files"]) + filename = repre["files"].split(".")[0] + + repr_file = filename + "_{0}.{1}".format(name, ext) + full_output_path = os.path.join( + staging_dir, repr_file) + + if is_sequence: + filename_base = filename + "_{0}".format(name) + repr_file = filename_base + ".%08d.{0}".format( + ext) + repre_new["sequence_file"] = repr_file + full_output_path = os.path.join( + staging_dir, filename_base, repr_file) + + self.log.info("input {}".format(full_input_path)) + self.log.info("output {}".format(full_output_path)) + + new_tags = [x for x in tags if x != "delete"] + + # add families + [instance.data["families"].append(t) + for t in p_tags + if t not in instance.data["families"]] + + # add to + [new_tags.append(t) for t in p_tags + if t not in new_tags] + + self.log.info("new_tags: `{}`".format(new_tags)) + + input_args = [] + + # overrides output file + input_args.append("-y") + + # preset's input data + input_args.extend(profile.get('input', [])) + + # necessary input data + # adds start arg only if image sequence + if isinstance(repre["files"], list): + input_args.append( + "-start_number {0} -framerate {1}".format( + start_frame, fps)) + + input_args.append("-i {}".format(full_input_path)) + + for audio in instance.data.get("audio", []): + offset_frames = ( + instance.data.get("startFrameReview") - + audio["offset"] + ) + offset_seconds = offset_frames / fps + + if offset_seconds > 0: + input_args.append("-ss") + else: + input_args.append("-itsoffset") + + input_args.append(str(abs(offset_seconds))) + + input_args.extend( + ["-i", audio["filename"]] + ) + + # Need to merge audio if there are more + # than 1 input. + if len(instance.data["audio"]) > 1: + input_args.extend( + [ + "-filter_complex", + "amerge", + "-ac", + "2" + ] + ) + + output_args = [] + codec_args = profile.get('codec', []) + output_args.extend(codec_args) + # preset's output data + output_args.extend(profile.get('output', [])) + + # defining image ratios + resolution_ratio = float(resolution_width / ( + resolution_height * pixel_aspect)) + delivery_ratio = float(to_width) / float(to_height) + self.log.debug(resolution_ratio) + self.log.debug(delivery_ratio) + + # get scale factor + scale_factor = to_height / ( + resolution_height * pixel_aspect) + self.log.debug(scale_factor) + + # letter_box + lb = profile.get('letter_box', 0) + if lb != 0: + ffmpet_width = to_width + ffmpet_height = to_height + if "reformat" not in p_tags: + lb /= pixel_aspect + if resolution_ratio != delivery_ratio: + ffmpet_width = resolution_width + ffmpet_height = int( + resolution_height * pixel_aspect) + else: + if resolution_ratio != delivery_ratio: + lb /= scale_factor + else: + lb /= pixel_aspect + + output_args.append(str( + "-filter:v scale={0}x{1}:flags=lanczos," + "setsar=1,drawbox=0:0:iw:" + "round((ih-(iw*(1/{2})))/2):t=fill:" + "c=black,drawbox=0:ih-round((ih-(iw*(" + "1/{2})))/2):iw:round((ih-(iw*(1/{2})))" + "/2):t=fill:c=black").format( + ffmpet_width, ffmpet_height, lb)) + + # In case audio is longer than video. + output_args.append("-shortest") + + # output filename + output_args.append(full_output_path) + + self.log.debug( + "__ pixel_aspect: `{}`".format(pixel_aspect)) + self.log.debug( + "__ resolution_width: `{}`".format( + resolution_width)) + self.log.debug( + "__ resolution_height: `{}`".format( + resolution_height)) + + # scaling none square pixels and 1920 width + if "reformat" in p_tags: + if resolution_ratio < delivery_ratio: + self.log.debug("lower then delivery") + width_scale = int(to_width * scale_factor) + width_half_pad = int(( + to_width - width_scale)/2) + height_scale = to_height + height_half_pad = 0 + else: + self.log.debug("heigher then delivery") + width_scale = to_width + width_half_pad = 0 + scale_factor = float(to_width) / float( + resolution_width) + self.log.debug(scale_factor) + height_scale = int( + resolution_height * scale_factor) + height_half_pad = int( + (to_height - height_scale)/2) + + self.log.debug( + "__ width_scale: `{}`".format(width_scale)) + self.log.debug( + "__ width_half_pad: `{}`".format( + width_half_pad)) + self.log.debug( + "__ height_scale: `{}`".format( + height_scale)) + self.log.debug( + "__ height_half_pad: `{}`".format( + height_half_pad)) + + scaling_arg = str( + "scale={0}x{1}:flags=lanczos," + "pad={2}:{3}:{4}:{5}:black,setsar=1" + ).format(width_scale, height_scale, + to_width, to_height, + width_half_pad, + height_half_pad + ) + + vf_back = self.add_video_filter_args( + output_args, scaling_arg) + # add it to output_args + output_args.insert(0, vf_back) + + # baking lut file application + lut_path = instance.data.get("lutPath") + if lut_path and ("bake-lut" in p_tags): + # removing Gama info as it is all baked in lut + gamma = next((g for g in input_args + if "-gamma" in g), None) + if gamma: + input_args.remove(gamma) + + # create lut argument + lut_arg = "lut3d=file='{}'".format( + lut_path.replace( + "\\", "/").replace(":/", "\\:/") + ) + lut_arg += ",colormatrix=bt601:bt709" + + vf_back = self.add_video_filter_args( + output_args, lut_arg) + # add it to output_args + output_args.insert(0, vf_back) + self.log.info("Added Lut to ffmpeg command") + self.log.debug( + "_ output_args: `{}`".format(output_args)) + + if is_sequence: + stg_dir = os.path.dirname(full_output_path) + + if not os.path.exists(stg_dir): + self.log.debug( + "creating dir: {}".format(stg_dir)) + os.mkdir(stg_dir) + + mov_args = [ + os.path.join( + os.environ.get( + "FFMPEG_PATH", + ""), "ffmpeg"), + " ".join(input_args), + " ".join(output_args) + ] + subprcs_cmd = " ".join(mov_args) + + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) + output = pype.api.subprocess(subprcs_cmd) + self.log.debug("Output: {}".format(output)) + + # create representation data + repre_new.update({ + 'name': name, + 'ext': ext, + 'files': repr_file, + "tags": new_tags, + "outputName": name, + "codec": codec_args, + "_profile": profile, + "resolutionHeight": resolution_height, + "resolutionWidth": resolution_width, + }) + if is_sequence: + repre_new.update({ + "stagingDir": stg_dir, + "files": os.listdir(stg_dir) + }) + + if repre_new.get('preview'): + repre_new.pop("preview") + if repre_new.get('thumbnail'): + repre_new.pop("thumbnail") + + # adding representation + self.log.debug("Adding: {}".format(repre_new)) + representations_new.append(repre_new) + for repre in representations_new: if "delete" in repre.get("tags", []): representations_new.remove(repre) From 5f5a80818c20e26deeded4f616d477a479999ee8 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Sun, 16 Feb 2020 20:07:42 +0100 Subject: [PATCH 245/434] fix(global): fixing version collection --- pype/plugins/nuke/publish/collect_writes.py | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index bf1c6a4b66..c29f676ef7 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -52,9 +52,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): output_dir = os.path.dirname(path) self.log.debug('output dir: {}'.format(output_dir)) - # get version to instance for integration - instance.data['version'] = instance.context.data.get( - "version", pype.get_version_from_path(nuke.root().name())) + # # get version to instance for integration + # instance.data['version'] = instance.context.data.get( + # "version", pype.get_version_from_path(nuke.root().name())) self.log.debug('Write Version: %s' % instance.data('version')) @@ -92,16 +92,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): # Add version data to instance version_data = { - "handles": handle_start, - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": first_frame + handle_start, - "frameEnd": last_frame - handle_end, - "version": int(instance.data['version']), "colorspace": node["colorspace"].value(), - "families": ["render"], - "subset": instance.data["subset"], - "fps": instance.context.data["fps"] } instance.data["family"] = "write" From e8499b43ff4cf6a0b9a15b502fbf164474ca0e49 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Sun, 16 Feb 2020 20:08:18 +0100 Subject: [PATCH 246/434] fix(global): wrong version format print --- pype/plugins/global/publish/integrate_new.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index d27582bb71..bb65a02bce 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -160,7 +160,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): subset = self.get_subset(asset_entity, instance) version_number = instance.data["version"] - self.log.debug("Next version: v{0:03d}".format(version_number)) + self.log.debug("Next version: v{}".format(version_number)) version_data = self.create_version_data(context, instance) From ff2cbb78b5356338eb3f19a0a0f15e1dc3426ac3 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Mon, 17 Feb 2020 11:06:12 +0100 Subject: [PATCH 247/434] wip refactoring collect_filesequences --- .../global/publish/collect_filesequences.py | 69 +-- .../global/publish/submit_publish_job.py | 410 ++++++++++++++---- pype/plugins/maya/publish/collect_render.py | 4 +- 3 files changed, 369 insertions(+), 114 deletions(-) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 0bab57d6eb..33a1e82ba6 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -161,7 +161,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): api.Session.update(session) os.environ.update(session) instance = metadata.get("instance") - if instance: + if instance and isinstance(instance, list): instance_family = instance.get("family") pixel_aspect = instance.get("pixelAspect", 1) resolution_width = instance.get( @@ -225,11 +225,15 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): if families_data and "slate" in families_data: families.append("slate") - if data["metadata"]["instance"].get("attachTo"): + if not isinstance(instance, list): + instances = [instance] + + # attachTo must be only on single instance + if instances[0].get("attachTo"): # we need to attach found collections to existing # subset version as review represenation. - for attach in data["metadata"]["instance"]["attachTo"]: + for attach in instances[0].get("attachTo"): self.log.info( "Attaching render {}:v{}".format( attach["subset"], attach["version"])) @@ -476,37 +480,40 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): 'ftrack') representation["tags"].remove('review') else: - subset = data["metadata"]["instance"]["subset"] data = copy.deepcopy(data) - task = data["metadata"]["session"]["AVALON_TASK"] - new_subset_name = 'render{}{}{}{}'.format( - task[0].upper(), task[1:], - subset[0].upper(), subset[1:]) + if not isinstance(data["metadata"]["instance"], list): + instances = [data["metadata"]["instance"]] + for instance in instances: + subset = instance["subset"] + task = data["metadata"]["session"]["AVALON_TASK"] + new_subset_name = 'render{}{}{}{}'.format( + task[0].upper(), task[1:], + subset[0].upper(), subset[1:]) - self.log.info( - "Creating new subset: {}".format(new_subset_name)) - new_instance = context.create_instance(new_subset_name) + self.log.info( + "Creating new subset: {}".format(new_subset_name)) + new_instance = context.create_instance(new_subset_name) - new_instance.data.update( - { - "name": new_subset_name, - "family": 'render', - "families": data["metadata"]["families"], - "subset": new_subset_name, - "asset": data.get( - "asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, - "frameStart": frame_start, - "frameEnd": frame_end, - "fps": fps, - "source": data.get("source", ""), - "pixelAspect": pixel_aspect, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "slateFrame": slate_frame - } - ) - new_instance.data["representations"] = data["metadata"]["instance"]["representations"] # noqa: E501 + new_instance.data.update( + { + "name": new_subset_name, + "family": 'render', + "families": data["metadata"]["families"], + "subset": new_subset_name, + "asset": data.get( + "asset", api.Session["AVALON_ASSET"]), + "stagingDir": root, + "frameStart": frame_start, + "frameEnd": frame_end, + "fps": fps, + "source": data.get("source", ""), + "pixelAspect": pixel_aspect, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "slateFrame": slate_frame + } + ) + new_instance.data["representations"] = instance["representations"] # noqa: E501 if new_instance is not None: self.log.info("remapping paths ...") diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index ece6b3660b..b79318af76 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -2,7 +2,6 @@ import os import json import re import logging -from collections import namedtuple from avalon import api, io from avalon.vendor import requests, clique @@ -10,7 +9,7 @@ from avalon.vendor import requests, clique import pyblish.api -AOVFilter = namedtuple("AOVFilter", ["app", "aov"]) +R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+') def _get_script(): @@ -162,6 +161,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "PYPE_STUDIO_PROJECTS_MOUNT", ] + # pool used to do the publishing job + deadline_pool = "" + def _submit_deadline_post_job(self, instance, job): """ Deadline specific code separated from :meth:`process` for sake of @@ -196,6 +198,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "UserName": job["Props"]["User"], "Comment": instance.context.data.get("comment", ""), "Priority": job["Props"]["Pri"], + "Pool": self.deadline_pool }, "PluginInfo": { "Version": "3.6", @@ -240,6 +243,183 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if not response.ok: raise Exception(response.text) + def _create_instances_for_aov(self, context, instance_data, exp_files): + task = os.environ["AVALON_TASK"] + subset = instance_data["subset"] + instances = [] + for aov, files in exp_files.items(): + cols, rem = clique.assemble(files) + # we shouldn't have any reminders + if rem: + self.log.warning( + "skipping unexpected files found " + "in sequence: {}".format(rem)) + + # but we really expect only one collection, nothing else make sense + self.log.error("got {} sequence type".format(len(cols))) + assert len(cols) == 1, "only one image sequence type is expected" + + # create subset name `familyTaskSubset_AOV` + subset_name = 'render{}{}{}{}_{}'.format( + task[0].upper(), task[1:], + subset[0].upper(), subset[1:], + aov) + + staging = os.path.dirname(list(cols[0])[0]) + start = int(instance_data.get("frameStart")) + end = int(instance_data.get("frameEnd")) + + new_instance = self.context.create_instance(subset_name) + app = os.environ.get("AVALON_APP", "") + + preview = False + if app in self.aov_filter.keys(): + if aov in self.aov_filter[app]: + preview = True + + new_instance.data.update(instance_data) + new_instance.data["subset"] = subset_name + ext = cols[0].tail.lstrip(".") + rep = { + "name": ext, + "ext": ext, + "files": [os.path.basename(f) for f in list(cols[0])], + "frameStart": start, + "frameEnd": end, + # If expectedFile are absolute, we need only filenames + "stagingDir": staging, + "anatomy_template": "render", + "fps": new_instance.data.get("fps"), + "tags": ["review", "preview"] if preview else [] + } + + # if extending frames from existing version, copy files from there + # into our destination directory + if instance_data.get("extendFrames", False): + self.log.info("Preparing to copy ...") + import speedcopy + + # get latest version of subset + # this will stop if subset wasn't published yet + version = get_latest_version( + instance_data.get("asset"), + subset_name, "render") + # get its files based on extension + subset_resources = get_resources(version, ext) + r_col, _ = clique.assemble(subset_resources) + + # if override remove all frames we are expecting to be rendered + # so we'll copy only those missing from current render + if instance_data.get("overrideExistingFrame"): + for frame in range(start, end+1): + if frame not in r_col.indexes: + continue + r_col.indexes.remove(frame) + + # now we need to translate published names from represenation + # back. This is tricky, right now we'll just use same naming + # and only switch frame numbers + resource_files = [] + r_filename = os.path.basename(list(cols[0])[0]) # first file + op = re.search(R_FRAME_NUMBER, r_filename) + pre = r_filename[:op.start("frame")] + post = r_filename[op.end("frame"):] + assert op is not None, "padding string wasn't found" + for frame in list(r_col): + fn = re.search(R_FRAME_NUMBER, frame) + # silencing linter as we need to compare to True, not to + # type + assert fn is not None, "padding string wasn't found" + # list of tuples (source, destination) + resource_files.append( + (frame, + os.path.join(staging, + "{}{}{}".format(pre, + fn.group("frame"), + post))) + ) + + for source in resource_files: + speedcopy.copy(source[0], source[1]) + + self.log.info( + "Finished copying %i files" % len(resource_files)) + + if preview: + if "ftrack" not in new_instance.data["families"]: + if os.environ.get("FTRACK_SERVER"): + new_instance.data["families"].append("ftrack") + if "review" not in new_instance.data["families"]: + new_instance.data["families"].append("review") + + new_instance.data["representations"] = [rep] + instances.append(new_instance) + + return instances + + def _get_representations(self, instance, exp_files): + representations = [] + start = int(instance.data.get("frameStart")) + end = int(instance.data.get("frameEnd")) + cols, rem = clique.assemble(exp_files) + # create representation for every collected sequence + for c in cols: + ext = c.tail.lstrip(".") + preview = False + # if filtered aov name is found in filename, toggle it for + # preview video rendering + for app in self.aov_filter: + if os.environ.get("AVALON_APP", "") == app: + for aov in self.aov_filter[app]: + if re.match( + r".+(?:\.|_)({})(?:\.|_).*".format(aov), + list(c)[0] + ): + preview = True + break + break + rep = { + "name": str(c), + "ext": ext, + "files": [os.path.basename(f) for f in list(c)], + "frameStart": start, + "frameEnd": end, + # If expectedFile are absolute, we need only filenames + "stagingDir": os.path.dirname(list(c)[0]), + "anatomy_template": "render", + "fps": instance.data.get("fps"), + "tags": ["review", "preview"] if preview else [], + } + + representations.append(rep) + + # TODO: implement extendFrame + + families = instance.data.get("families") + # if we have one representation with preview tag + # flag whole instance for review and for ftrack + if preview: + if "ftrack" not in families: + if os.environ.get("FTRACK_SERVER"): + families.append("ftrack") + if "review" not in families: + families.append("review") + instance.data["families"] = families + + for r in rem: + ext = r.split(".")[-1] + rep = { + "name": r, + "ext": ext, + "files": os.path.basename(r), + "stagingDir": os.path.dirname(r), + "anatomy_template": "publish", + } + + representations.append(rep) + + return representations + def process(self, instance): """ Detect type of renderfarm submission and create and post dependend job @@ -252,6 +432,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): data = instance.data.copy() context = instance.context + self.context = context if hasattr(instance, "_log"): data['_log'] = instance._log @@ -285,6 +466,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if end is None: end = context.data["frameEnd"] + if data.get("extendFrames", False): + start, end = self._extend_frames( + asset, + subset, + start, + end, + data["overrideExistingFrame"]) + try: source = data["source"] except KeyError: @@ -298,78 +487,91 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): regex = None families = ["render"] + + instance_skeleton_data = { + "family": "render", + "subset": subset, + "families": families, + "asset": asset, + "frameStart": start, + "frameEnd": end, + "fps": data.get("fps", 25), + "source": source, + "extendFrames": data.get("extendFrames"), + "overrideExistingFrame": data.get("overrideExistingFrame") + } + + instances = None if data.get("expectedFiles"): - representations = [] - cols, rem = clique.assemble(data.get("expectedFiles")) - # create representation for every collected sequence - for c in cols: - ext = c.tail.lstrip(".") - preview = False - # if filtered aov name is found in filename, toggle it for - # preview video renderin - for app in self.aov_filter: - if os.environ.get("AVALON_APP", "") == app: - for aov in self.aov_filter[app]: - if re.match( - r".+(?:\.|_)({})(?:\.|_).*".format(aov), - list(c)[0] - ): - preview = True - break - break - rep = { - "name": ext, - "ext": ext, - "files": [os.path.basename(f) for f in list(c)], - "frameStart": int(start), - "frameEnd": int(end), - # If expectedFile are absolute, we need only filenames - "stagingDir": os.path.dirname(list(c)[0]), - "anatomy_template": "render", - "fps": context.data.get("fps", None), - "tags": ["review", "preview"] if preview else [], + """ + if content of `expectedFiles` are dictionaries, we will handle + it as list of AOVs, creating instance from every one of them. + + Example: + -------- + + expectedFiles = [ + { + "beauty": [ + "foo_v01.0001.exr", + "foo_v01.0002.exr" + ], + "Z": [ + "boo_v01.0001.exr", + "boo_v01.0002.exr" + ] } + ] - representations.append(rep) + This will create instances for `beauty` and `Z` subset + adding those files to their respective representations. - # if we have one representation with preview tag - # flag whole instance for review and for ftrack - if preview: - if "ftrack" not in families: - if os.environ.get("FTRACK_SERVER"): - families.append("ftrack") - if "review" not in families: - families.append("review") + If we've got only list of files, we collect all filesequences. + More then one doesn't probably make sense, but we'll handle it + like creating one instance with multiple representations. - for r in rem: - ext = r.split(".")[-1] - rep = { - "name": ext, - "ext": ext, - "files": os.path.basename(r), - "stagingDir": os.path.dirname(r), - "anatomy_template": "publish", - } + Example: + -------- - representations.append(rep) + expectedFiles = [ + "foo_v01.0001.exr", + "foo_v01.0002.exr", + "xxx_v01.0001.exr", + "xxx_v01.0002.exr" + ] - if "representations" not in instance.data: - data["representations"] = [] + This will result in one instance with two representations: + `foo` and `xxx` + """ + if isinstance(data.get("expectedFiles")[0], dict): + instances = self._create_instances_for_aov( + instance_skeleton_data, + data.get("expectedFiles")) + else: + representations = self._get_representations( + instance_skeleton_data, + data.get("expectedFiles") + ) - # add representation - data["representations"] += representations + if "representations" not in instance.data: + data["representations"] = [] + + # add representation + data["representations"] += representations else: + # deprecated: passing regex is depecated. Please use + # `expectedFiles` and collect them. if "ext" in instance.data: ext = r"\." + re.escape(instance.data["ext"]) else: ext = r"\.\D+" regex = r"^{subset}.*\d+{ext}$".format( - subset=re.escape(subset), ext=ext - ) + subset=re.escape(subset), ext=ext) # Write metadata for publish job - metadata = { + # publish job file + publish_job = { "asset": asset, "frameStart": start, "frameEnd": end, @@ -380,34 +582,33 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "version": context.data["version"], "intent": context.data.get("intent"), "comment": context.data.get("comment"), - # Optional metadata (for debugging) - "metadata": { - "job": render_job, - "session": api.Session.copy(), - "instance": data, - }, + "job": render_job, + "session": api.Session.copy(), + "instances": instances or [data] } - if api.Session["AVALON_APP"] == "nuke": - metadata["subset"] = subset - + # pass Ftrack credentials in case of Muster if submission_type == "muster": ftrack = { "FTRACK_API_USER": os.environ.get("FTRACK_API_USER"), "FTRACK_API_KEY": os.environ.get("FTRACK_API_KEY"), "FTRACK_SERVER": os.environ.get("FTRACK_SERVER"), } - metadata.update({"ftrack": ftrack}) + publish_job.update({"ftrack": ftrack}) if regex: - metadata["regex"] = regex + publish_job["regex"] = regex # Ensure output dir exists output_dir = instance.data["outputDir"] if not os.path.isdir(output_dir): os.makedirs(output_dir) - if data.get("extendFrames", False): + # TODO: remove this code + # deprecated: this is left here for backwards compatibility and is + # not probably working at all. :hammer: + if data.get("extendFrames", False) \ + and not data.get("expectedFiles", False): family = "render" override = data["overrideExistingFrame"] @@ -423,18 +624,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # Frame comparison prev_start = None prev_end = None - resource_range = range(int(start), int(end) + 1) + resource_range = range(int(start), int(end)+1) # Gather all the subset files (one subset per render pass!) subset_names = [data["subset"]] subset_names.extend(data.get("renderPasses", [])) resources = [] for subset_name in subset_names: - version = get_latest_version( - asset_name=data["asset"], - subset_name=subset_name, - family=family, - ) + version = get_latest_version(asset_name=data["asset"], + subset_name=subset_name, + family=family) # Set prev start / end frames for comparison if not prev_start and not prev_end: @@ -442,9 +641,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): prev_end = version["data"]["frameEnd"] subset_resources = get_resources(version, _ext) - resource_files = get_resource_files( - subset_resources, resource_range, override - ) + resource_files = get_resource_files(subset_resources, + resource_range, + override) resources.extend(resource_files) @@ -452,10 +651,27 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): updated_end = max(end, prev_end) # Update metadata and instance start / end frame - self.log.info( - "Updating start / end frame : " - "{} - {}".format(updated_start, updated_end) - ) + self.log.info("Updating start / end frame : " + "{} - {}".format(updated_start, updated_end)) + + # TODO : Improve logic to get new frame range for the + # publish job (publish_filesequence.py) + # The current approach is not following Pyblish logic + # which is based + # on Collect / Validate / Extract. + + # ---- Collect Plugins --- + # Collect Extend Frames - Only run if extendFrames is toggled + # # # Store in instance: + # # # Previous rendered files per subset based on frames + # # # --> Add to instance.data[resources] + # # # Update publish frame range + + # ---- Validate Plugins --- + # Validate Extend Frames + # # # Check if instance has the requirements to extend frames + # There might have been some things which can be added to the list + # Please do so when fixing this. # Start frame metadata["frameStart"] = updated_start @@ -494,3 +710,33 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): shutil.copy(source, dest) self.log.info("Finished copying %i files" % len(resources)) + + def _extend_frames(self, asset, subset, start, end, override): + family = "render" + # override = data.get("overrideExistingFrame", False) + + # Frame comparison + prev_start = None + prev_end = None + + version = get_latest_version( + asset_name=asset, + subset_name=subset, + family=family, + ) + + # Set prev start / end frames for comparison + if not prev_start and not prev_end: + prev_start = version["data"]["frameStart"] + prev_end = version["data"]["frameEnd"] + + updated_start = min(start, prev_start) + updated_end = max(end, prev_end) + + # Update metadata and instance start / end frame + self.log.info( + "Updating start / end frame : " + "{} - {}".format(updated_start, updated_end) + ) + + return updated_start, updated_end diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 771078a5f5..37e1d0d7b1 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -374,14 +374,16 @@ class AExpectedFiles: for regex, value in mappings: file_prefix = re.sub(regex, value, file_prefix) + aov_files = [] for frame in range( int(start_frame), int(end_frame) + 1, int(frame_step)): - expected_files.append( + aov_files.append( '{}.{}.{}'.format(file_prefix, str(frame).rjust(padding, "0"), aov[1])) + expected_files.append({aov[0]: aov_files}) file_prefix = resolved_path else: mappings = ( From 957ca8ecd2f03097e4c1d48dff955d49b4150825 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 17 Feb 2020 14:02:59 +0100 Subject: [PATCH 248/434] fix current frame key --- pype/scripts/otio_burnin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 1d00a08521..e34f7235e4 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -378,7 +378,7 @@ def burnins_from_data( # Check frame start and add expression if is available if frame_start is not None: - data[CURRENT_FRAME_KEY] = r'%%{eif\:n+%d\:d}' % frame_start + data[CURRENT_FRAME_KEY[1:-1]] = r'%%{eif\:n+%d\:d}' % frame_start if frame_start_tc is not None: data[TIME_CODE_KEY[1:-1]] = TIME_CODE_KEY From b657af153f7d9af72ea73327ebbef4a5e8a333eb Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 17 Feb 2020 15:25:33 +0100 Subject: [PATCH 249/434] fix(global): removing unnecessary host argument --- pype/plugins/global/publish/collect_anatomy.py | 2 +- pype/plugins/global/publish/collect_instance_anatomy_data.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/collect_anatomy.py b/pype/plugins/global/publish/collect_anatomy.py index 0831c16d32..ae83e39513 100644 --- a/pype/plugins/global/publish/collect_anatomy.py +++ b/pype/plugins/global/publish/collect_anatomy.py @@ -18,7 +18,7 @@ Provides: import os import json -from avalon import io, api, lib +from avalon import api, lib from pypeapp import Anatomy import pyblish.api diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py index 9c6a8b08f2..825c48dcf4 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -33,7 +33,6 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.49 label = "Collect instance anatomy data" - hosts = ["maya", "nuke", "standalonepublisher"] def process(self, instance): # get all the stuff from the database From 3922529058d43a631a7269ba4006707edd68c150 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 17 Feb 2020 15:54:44 +0100 Subject: [PATCH 250/434] escape colon and comma in texts --- pype/scripts/otio_burnin.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index e34f7235e4..c61ea66d2d 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -199,7 +199,11 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): """ resolution = self.resolution data = { - 'text': text, + 'text': ( + text + .replace(",", r"\,") + .replace(':', r'\:') + ), 'color': options['font_color'], 'size': options['font_size'] } From d9ffc411a4d65559e436e7d220b8023c8eba5dc6 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 17 Feb 2020 16:48:36 +0100 Subject: [PATCH 251/434] integrate new's version override is ready to handle "append" method per instance --- pype/plugins/global/publish/integrate_new.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index b5b6b10aa2..2e2094dfc8 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -204,6 +204,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): data=version_data) self.log.debug("Creating version ...") + + new_repre_names_low = [_repre["name"].lower() for _repre in repres] + existing_version = io.find_one({ 'type': 'version', 'parent': subset["_id"], @@ -213,6 +216,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if existing_version is None: version_id = io.insert_one(version).inserted_id else: + # Check if instance have set `append` mode which cause that + # only replicated representations are set to archive + append_repres = instance.data.get("append", False) + # Update version data io.update_many({ 'type': 'version', @@ -230,6 +237,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): })) bulk_writes = [] for repre in current_repres: + if append_repres: + # archive only duplicated representations + if repre["name"].lower() not in new_repre_names_low: + continue # Representation must change type, # `_id` must be stored to other key and replaced with new # - that is because new representations should have same ID @@ -284,7 +295,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if 'transfers' not in instance.data: instance.data['transfers'] = [] - new_repre_names = [] for idx, repre in enumerate(instance.data["representations"]): # Collection @@ -454,9 +464,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): continue repre_context[key] = template_data[key] - repre_name = repre['name'] - new_repre_names.append(repre_name) - # Use previous representation's id if there are any repre_id = None for _repre in existing_repres: From 06f9187119ff8b15074717c83c224b96e287e7e7 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 17 Feb 2020 16:52:20 +0100 Subject: [PATCH 252/434] thumbnail is also stored to asset in store thumbnails action --- pype/ftrack/actions/action_store_thumbnails_to_avalon.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py index ce0dfeb244..21ac6666d5 100644 --- a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py +++ b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py @@ -290,6 +290,11 @@ class StoreThumbnailsToAvalon(BaseAction): {"$set": {"data.thumbnail_id": thumbnail_id}} ) + self.db_con.update_one( +- {"_id": avalon_asset["_id"]}, +- {"$set": {"data.thumbnail_id": thumbnail_id}} +- ) + action_job["status"] = "done" session.commit() From 4256eccc2b797d1e8af4d800e11a14c78222c669 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 17 Feb 2020 17:05:08 +0100 Subject: [PATCH 253/434] fixed few merge issues --- pype/plugins/global/publish/integrate_new.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 9de29cd387..8d41aa7907 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -196,6 +196,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): append_repres = instance.data.get("append", False) # Update version data + # TODO query by _id and io.update_many({ 'type': 'version', 'parent': subset["_id"], @@ -322,7 +323,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): index_frame_start = None if repre.get("frameStart"): - frame_start_padding = anatomy.templates["render"]["padding"] + frame_start_padding = ( + anatomy.templates["render"]["padding"] + ) index_frame_start = int(repre.get("frameStart")) # exception for slate workflow @@ -407,9 +410,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Use previous representation's id if there are any repre_id = None + repre_name_low = repre["name"].lower() for _repre in existing_repres: # NOTE should we check lowered names? - if repre_name == _repre["name"]: + if repre_name_low == _repre["name"]: repre_id = _repre["orig_id"] break @@ -435,7 +439,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): representation["context"]["output"] = repre['outputName'] if sequence_repre and repre.get("frameStart"): - representation['context']['frame'] = src_padding_exp % int(repre.get("frameStart")) + representation['context']['frame'] = ( + src_padding_exp % int(repre.get("frameStart")) + ) self.log.debug("__ representation: {}".format(representation)) destination_list.append(dst) From d3823aecd1c36fa876142d7775fe5f47cbf913eb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 17 Feb 2020 17:34:59 +0100 Subject: [PATCH 254/434] fixed skipping jpex extraction --- pype/plugins/global/publish/extract_jpeg.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 7c0820ea28..28d16198cd 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -43,7 +43,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): self.log.debug(repre) if 'review' in repre['tags'] or "thumb-nuke" in repre['tags']: if not isinstance(repre['files'], list): - return + continue input_file = repre['files'][0] From 1a1e73649866a77e932075b901fe8edccf2e29ca Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 17 Feb 2020 17:36:31 +0100 Subject: [PATCH 255/434] moved identation by changing validation condition logic --- pype/plugins/global/publish/extract_jpeg.py | 95 +++++++++++---------- 1 file changed, 49 insertions(+), 46 deletions(-) diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 28d16198cd..abd20bb9ea 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -41,63 +41,66 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): for repre in representations: self.log.debug(repre) - if 'review' in repre['tags'] or "thumb-nuke" in repre['tags']: - if not isinstance(repre['files'], list): - continue + valid = 'review' in repre['tags'] or "thumb-nuke" in repre['tags'] + if not valid: + continue - input_file = repre['files'][0] + if not isinstance(repre['files'], list): + continue - # input_file = ( - # collections[0].format('{head}{padding}{tail}') % start - # ) - full_input_path = os.path.join(stagingdir, input_file) - self.log.info("input {}".format(full_input_path)) + input_file = repre['files'][0] - filename = os.path.splitext(input_file)[0] - if not filename.endswith('.'): - filename += "." - jpeg_file = filename + "jpg" - full_output_path = os.path.join(stagingdir, jpeg_file) + # input_file = ( + # collections[0].format('{head}{padding}{tail}') % start + # ) + full_input_path = os.path.join(stagingdir, input_file) + self.log.info("input {}".format(full_input_path)) - self.log.info("output {}".format(full_output_path)) + filename = os.path.splitext(input_file)[0] + if not filename.endswith('.'): + filename += "." + jpeg_file = filename + "jpg" + full_output_path = os.path.join(stagingdir, jpeg_file) - config_data = instance.context.data['output_repre_config'] + self.log.info("output {}".format(full_output_path)) - proj_name = os.environ.get('AVALON_PROJECT', '__default__') - profile = config_data.get(proj_name, config_data['__default__']) + config_data = instance.context.data['output_repre_config'] - jpeg_items = [] - jpeg_items.append( - os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) - # override file if already exists - jpeg_items.append("-y") - # use same input args like with mov - jpeg_items.extend(profile.get('input', [])) - # input file - jpeg_items.append("-i {}".format(full_input_path)) - # output file - jpeg_items.append(full_output_path) + proj_name = os.environ.get('AVALON_PROJECT', '__default__') + profile = config_data.get(proj_name, config_data['__default__']) - subprocess_jpeg = " ".join(jpeg_items) + jpeg_items = [] + jpeg_items.append( + os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) + # override file if already exists + jpeg_items.append("-y") + # use same input args like with mov + jpeg_items.extend(profile.get('input', [])) + # input file + jpeg_items.append("-i {}".format(full_input_path)) + # output file + jpeg_items.append(full_output_path) - # run subprocess - self.log.debug("{}".format(subprocess_jpeg)) - pype.api.subprocess(subprocess_jpeg) + subprocess_jpeg = " ".join(jpeg_items) - if "representations" not in instance.data: - instance.data["representations"] = [] + # run subprocess + self.log.debug("{}".format(subprocess_jpeg)) + pype.api.subprocess(subprocess_jpeg) - representation = { - 'name': 'thumbnail', - 'ext': 'jpg', - 'files': jpeg_file, - "stagingDir": stagingdir, - "thumbnail": True, - "tags": ['thumbnail'] - } + if "representations" not in instance.data: + instance.data["representations"] = [] - # adding representation - self.log.debug("Adding: {}".format(representation)) - representations_new.append(representation) + representation = { + 'name': 'thumbnail', + 'ext': 'jpg', + 'files': jpeg_file, + "stagingDir": stagingdir, + "thumbnail": True, + "tags": ['thumbnail'] + } + + # adding representation + self.log.debug("Adding: {}".format(representation)) + representations_new.append(representation) instance.data["representations"] = representations_new From 3fe6a13f0c2c97adf34bf6aea042970fdf90e572 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 17 Feb 2020 17:40:49 +0100 Subject: [PATCH 256/434] sync actions ignore milestones --- pype/ftrack/lib/avalon_sync.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index f08dc73c19..f5b4c4b8c3 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -236,6 +236,7 @@ class SyncEntitiesFactory: " from TypedContext where project_id is \"{}\"" ) ignore_custom_attr_key = "avalon_ignore_sync" + ignore_entity_types = ["milestone"] report_splitter = {"type": "label", "value": "---"} @@ -366,7 +367,10 @@ class SyncEntitiesFactory: parent_id = entity["parent_id"] entity_type = entity.entity_type entity_type_low = entity_type.lower() - if entity_type_low == "task": + if entity_type_low in self.ignore_entity_types: + continue + + elif entity_type_low == "task": entities_dict[parent_id]["tasks"].append(entity["name"]) continue From 871870c603119c4920dd15f4995e27425898ceb8 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 17 Feb 2020 18:34:01 +0100 Subject: [PATCH 257/434] fix remaining dashes --- pype/ftrack/actions/action_store_thumbnails_to_avalon.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py index 21ac6666d5..7adc36f4b5 100644 --- a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py +++ b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py @@ -291,9 +291,9 @@ class StoreThumbnailsToAvalon(BaseAction): ) self.db_con.update_one( -- {"_id": avalon_asset["_id"]}, -- {"$set": {"data.thumbnail_id": thumbnail_id}} -- ) + {"_id": avalon_asset["_id"]}, + {"$set": {"data.thumbnail_id": thumbnail_id}} + ) action_job["status"] = "done" session.commit() From c32e0460eb7257f3f31d24cbdd424fddee28331c Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Mon, 17 Feb 2020 19:15:33 +0100 Subject: [PATCH 258/434] removed unused code --- .../global/publish/submit_publish_job.py | 273 +++++++----------- 1 file changed, 110 insertions(+), 163 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index b79318af76..35d6bf5c4a 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -243,10 +243,93 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if not response.ok: raise Exception(response.text) + def _copy_extend_frames(self, instance, representation): + """ + This will copy all existing frames from subset's latest version back + to render directory and rename them to what renderer is expecting. + + :param instance: instance to get required data from + :type instance: pyblish.plugin.Instance + """ + import speedcopy + + self.log.info("Preparing to copy ...") + start = instance.data.get("startFrame") + end = instance.data.get("endFrame") + + # get latest version of subset + # this will stop if subset wasn't published yet + version = get_latest_version( + instance.data.get("asset"), + instance.data.get("subset"), "render") + # get its files based on extension + subset_resources = get_resources(version, representation.get("ext")) + r_col, _ = clique.assemble(subset_resources) + + # if override remove all frames we are expecting to be rendered + # so we'll copy only those missing from current render + if instance.data.get("overrideExistingFrame"): + for frame in range(start, end+1): + if frame not in r_col.indexes: + continue + r_col.indexes.remove(frame) + + # now we need to translate published names from represenation + # back. This is tricky, right now we'll just use same naming + # and only switch frame numbers + resource_files = [] + r_filename = os.path.basename( + representation.get("files")[0]) # first file + op = re.search(R_FRAME_NUMBER, r_filename) + pre = r_filename[:op.start("frame")] + post = r_filename[op.end("frame"):] + assert op is not None, "padding string wasn't found" + for frame in list(r_col): + fn = re.search(R_FRAME_NUMBER, frame) + # silencing linter as we need to compare to True, not to + # type + assert fn is not None, "padding string wasn't found" + # list of tuples (source, destination) + resource_files.append( + (frame, + os.path.join(representation.get("stagingDir"), + "{}{}{}".format(pre, + fn.group("frame"), + post))) + ) + + # test if destination dir exists and create it if not + output_dir = os.path.dirname(representation.get("files")[0]) + if not os.path.isdir(output_dir): + os.makedirs(output_dir) + + # copy files + for source in resource_files: + speedcopy.copy(source[0], source[1]) + self.log.info(" > {}".format(source[1])) + + self.log.info( + "Finished copying %i files" % len(resource_files)) + def _create_instances_for_aov(self, context, instance_data, exp_files): + """ + This will create new instance for every aov it can detect in expected + files list. + + :param context: context of orignal instance to get important data + :type context: pyblish.plugin.Context + :param instance_data: skeleton data for instance (those needed) later + by collector + :type instance_data: pyblish.plugin.Instance + :param exp_files: list of expected files divided by aovs + :type exp_files: list + :returns: list of instances + :rtype: list(publish.plugin.Instance) + """ task = os.environ["AVALON_TASK"] subset = instance_data["subset"] instances = [] + # go through aovs in expected files for aov, files in exp_files.items(): cols, rem = clique.assemble(files) # we shouldn't have any reminders @@ -280,6 +363,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): new_instance.data.update(instance_data) new_instance.data["subset"] = subset_name ext = cols[0].tail.lstrip(".") + + # create represenation rep = { "name": ext, "ext": ext, @@ -293,58 +378,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "tags": ["review", "preview"] if preview else [] } - # if extending frames from existing version, copy files from there - # into our destination directory - if instance_data.get("extendFrames", False): - self.log.info("Preparing to copy ...") - import speedcopy - - # get latest version of subset - # this will stop if subset wasn't published yet - version = get_latest_version( - instance_data.get("asset"), - subset_name, "render") - # get its files based on extension - subset_resources = get_resources(version, ext) - r_col, _ = clique.assemble(subset_resources) - - # if override remove all frames we are expecting to be rendered - # so we'll copy only those missing from current render - if instance_data.get("overrideExistingFrame"): - for frame in range(start, end+1): - if frame not in r_col.indexes: - continue - r_col.indexes.remove(frame) - - # now we need to translate published names from represenation - # back. This is tricky, right now we'll just use same naming - # and only switch frame numbers - resource_files = [] - r_filename = os.path.basename(list(cols[0])[0]) # first file - op = re.search(R_FRAME_NUMBER, r_filename) - pre = r_filename[:op.start("frame")] - post = r_filename[op.end("frame"):] - assert op is not None, "padding string wasn't found" - for frame in list(r_col): - fn = re.search(R_FRAME_NUMBER, frame) - # silencing linter as we need to compare to True, not to - # type - assert fn is not None, "padding string wasn't found" - # list of tuples (source, destination) - resource_files.append( - (frame, - os.path.join(staging, - "{}{}{}".format(pre, - fn.group("frame"), - post))) - ) - - for source in resource_files: - speedcopy.copy(source[0], source[1]) - - self.log.info( - "Finished copying %i files" % len(resource_files)) - + # add tags if preview: if "ftrack" not in new_instance.data["families"]: if os.environ.get("FTRACK_SERVER"): @@ -355,9 +389,26 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): new_instance.data["representations"] = [rep] instances.append(new_instance) + # if extending frames from existing version, copy files from there + # into our destination directory + if instance_data.get("extendFrames", False): + self._copy_extend_frames(new_instance, rep) + return instances def _get_representations(self, instance, exp_files): + """ + This will return representations of expected files if they are not + in hierarchy of aovs. There should be only one sequence of files for + most cases, but if not - we create representation from each of them. + + :param instance: instance for which we are setting representations + :type instance: pyblish.plugin.Instance + :param exp_files: list of expected files + :type exp_files: list + :returns: list of representations + :rtype: list(dict) + """ representations = [] start = int(instance.data.get("frameStart")) end = int(instance.data.get("frameEnd")) @@ -406,6 +457,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): families.append("review") instance.data["families"] = families + # add reminders as representations for r in rem: ext = r.split(".")[-1] rep = { @@ -569,6 +621,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): regex = r"^{subset}.*\d+{ext}$".format( subset=re.escape(subset), ext=ext) + # Write metadata for publish job # publish job file publish_job = { @@ -604,116 +657,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if not os.path.isdir(output_dir): os.makedirs(output_dir) - # TODO: remove this code - # deprecated: this is left here for backwards compatibility and is - # not probably working at all. :hammer: - if data.get("extendFrames", False) \ - and not data.get("expectedFiles", False): - - family = "render" - override = data["overrideExistingFrame"] - - # override = data.get("overrideExistingFrame", False) - out_file = render_job.get("OutFile") - if not out_file: - raise RuntimeError("OutFile not found in render job!") - - extension = os.path.splitext(out_file[0])[1] - _ext = extension[1:] - - # Frame comparison - prev_start = None - prev_end = None - resource_range = range(int(start), int(end)+1) - - # Gather all the subset files (one subset per render pass!) - subset_names = [data["subset"]] - subset_names.extend(data.get("renderPasses", [])) - resources = [] - for subset_name in subset_names: - version = get_latest_version(asset_name=data["asset"], - subset_name=subset_name, - family=family) - - # Set prev start / end frames for comparison - if not prev_start and not prev_end: - prev_start = version["data"]["frameStart"] - prev_end = version["data"]["frameEnd"] - - subset_resources = get_resources(version, _ext) - resource_files = get_resource_files(subset_resources, - resource_range, - override) - - resources.extend(resource_files) - - updated_start = min(start, prev_start) - updated_end = max(end, prev_end) - - # Update metadata and instance start / end frame - self.log.info("Updating start / end frame : " - "{} - {}".format(updated_start, updated_end)) - - # TODO : Improve logic to get new frame range for the - # publish job (publish_filesequence.py) - # The current approach is not following Pyblish logic - # which is based - # on Collect / Validate / Extract. - - # ---- Collect Plugins --- - # Collect Extend Frames - Only run if extendFrames is toggled - # # # Store in instance: - # # # Previous rendered files per subset based on frames - # # # --> Add to instance.data[resources] - # # # Update publish frame range - - # ---- Validate Plugins --- - # Validate Extend Frames - # # # Check if instance has the requirements to extend frames - # There might have been some things which can be added to the list - # Please do so when fixing this. - - # Start frame - metadata["frameStart"] = updated_start - metadata["metadata"]["instance"]["frameStart"] = updated_start - - # End frame - metadata["frameEnd"] = updated_end - metadata["metadata"]["instance"]["frameEnd"] = updated_end - - metadata_filename = "{}_metadata.json".format(subset) - - metadata_path = os.path.join(output_dir, metadata_filename) - # convert log messages if they are `LogRecord` to their - # string format to allow serializing as JSON later on. - rendered_logs = [] - for log in metadata["metadata"]["instance"].get("_log", []): - if isinstance(log, logging.LogRecord): - rendered_logs.append(log.getMessage()) - else: - rendered_logs.append(log) - - metadata["metadata"]["instance"]["_log"] = rendered_logs - with open(metadata_path, "w") as f: - json.dump(metadata, f, indent=4, sort_keys=True) - - # Copy files from previous render if extendFrame is True - if data.get("extendFrames", False): - - self.log.info("Preparing to copy ..") - import shutil - - dest_path = data["outputDir"] - for source in resources: - src_file = os.path.basename(source) - dest = os.path.join(dest_path, src_file) - shutil.copy(source, dest) - - self.log.info("Finished copying %i files" % len(resources)) - def _extend_frames(self, asset, subset, start, end, override): - family = "render" - # override = data.get("overrideExistingFrame", False) + """ + This will get latest version of asset and update frame range based + on minimum and maximuma values + """ # Frame comparison prev_start = None @@ -722,7 +670,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): version = get_latest_version( asset_name=asset, subset_name=subset, - family=family, + family='render' ) # Set prev start / end frames for comparison @@ -733,7 +681,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): updated_start = min(start, prev_start) updated_end = max(end, prev_end) - # Update metadata and instance start / end frame self.log.info( "Updating start / end frame : " "{} - {}".format(updated_start, updated_end) From 07dbb2533d3a091c27a4a40a237b46377509f9b4 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 17 Feb 2020 21:07:26 +0100 Subject: [PATCH 259/434] fix(nks): missing family if `review` applied --- pype/plugins/nukestudio/publish/collect_audio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nukestudio/publish/collect_audio.py b/pype/plugins/nukestudio/publish/collect_audio.py index 61419b1ad9..e141f50488 100644 --- a/pype/plugins/nukestudio/publish/collect_audio.py +++ b/pype/plugins/nukestudio/publish/collect_audio.py @@ -15,7 +15,7 @@ class CollectAudio(api.InstancePlugin): order = api.CollectorOrder + 0.1025 label = "Collect Audio" hosts = ["nukestudio"] - families = ["clip"] + families = ["clip", "plate"] def process(self, instance): # Exclude non-tagged instances. From bc4447a971e48ee375801146ff391c66c1e1c681 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 17 Feb 2020 21:45:15 +0100 Subject: [PATCH 260/434] fix(nks): tags collection for plates and audio wrong subset name --- .../nukestudio/publish/collect_audio.py | 20 ++++++++++--------- .../nukestudio/publish/collect_plates.py | 11 ++++------ 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_audio.py b/pype/plugins/nukestudio/publish/collect_audio.py index e141f50488..727d7da795 100644 --- a/pype/plugins/nukestudio/publish/collect_audio.py +++ b/pype/plugins/nukestudio/publish/collect_audio.py @@ -1,5 +1,5 @@ from pyblish import api - +import os class CollectAudio(api.InstancePlugin): """Collect audio from tags. @@ -12,17 +12,19 @@ class CollectAudio(api.InstancePlugin): """ # Run just before CollectSubsets - order = api.CollectorOrder + 0.1025 + order = api.CollectorOrder + 0.1021 label = "Collect Audio" hosts = ["nukestudio"] - families = ["clip", "plate"] + families = ["clip"] def process(self, instance): # Exclude non-tagged instances. tagged = False for tag in instance.data["tags"]: - family = dict(tag["metadata"]).get("tag.family", "") + tag_data = dict(tag["metadata"]) + family = tag_data.get("tag.family", "") if family.lower() == "audio": + subset = tag_data.get("tag.subset", "Main") tagged = True if not tagged: @@ -40,14 +42,14 @@ class CollectAudio(api.InstancePlugin): data["family"] = "audio" data["families"] = ["ftrack"] - subset = "" - for tag in instance.data["tags"]: - tag_data = dict(tag["metadata"]) - if "tag.subset" in tag_data: - subset = tag_data["tag.subset"] data["subset"] = "audio" + subset.title() data["source"] = data["sourcePath"] + data["label"] = "{} - {} - ({})".format( + data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[ + 1] + ) + self.log.debug("Creating instance with data: {}".format(data)) instance.context.create_instance(**data) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index 75eb5bb043..e0ecbaf302 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -23,8 +23,10 @@ class CollectPlates(api.InstancePlugin): # Exclude non-tagged instances. tagged = False for tag in instance.data["tags"]: - family = dict(tag["metadata"]).get("tag.family", "") + tag_data = dict(tag["metadata"]) + family = tag_data.get("tag.family", "") if family.lower() == "plate": + subset = tag_data.get("tag.subset", "Main") tagged = True break @@ -43,12 +45,7 @@ class CollectPlates(api.InstancePlugin): data["family"] = family.lower() data["families"] = ["ftrack"] + instance.data["families"][1:] data["source"] = data["sourcePath"] - - subset = "" - for tag in instance.data["tags"]: - tag_data = dict(tag["metadata"]) - if "tag.subset" in tag_data: - subset = tag_data["tag.subset"] + data["subset"] = data["family"] + subset.title() data["name"] = data["subset"] + "_" + data["asset"] From f3fc92881ad360017a03d905a55d1485256ce9e5 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 17 Feb 2020 21:47:43 +0100 Subject: [PATCH 261/434] fix(nks): not correct way of collecting frame start - should not be offset-ed by handle start --- pype/plugins/nukestudio/publish/collect_tag_framestart.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_tag_framestart.py b/pype/plugins/nukestudio/publish/collect_tag_framestart.py index 1342d996ab..993aa99a3e 100644 --- a/pype/plugins/nukestudio/publish/collect_tag_framestart.py +++ b/pype/plugins/nukestudio/publish/collect_tag_framestart.py @@ -30,9 +30,12 @@ class CollectClipTagFrameStart(api.InstancePlugin): except ValueError: if "source" in t_value: source_first = instance.data["sourceFirst"] + if source_first == 0: + source_first = 1 + self.log.info("Start frame on `{0}`".format(source_first)) source_in = instance.data["sourceIn"] - handle_start = instance.data["handleStart"] - start_frame = (source_first + source_in) - handle_start + self.log.info("Start frame on `{0}`".format(source_in)) + start_frame = source_first + source_in instance.data["startingFrame"] = start_frame self.log.info("Start frame on `{0}` set to `{1}`".format( From 188881a0a5873ee7984b610f64599eebb57d9ac0 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 18 Feb 2020 00:02:50 +0100 Subject: [PATCH 262/434] fix(nks): review family to `plate` and plates cleanup --- .../nukestudio/publish/collect_plates.py | 17 ++++++++++------- .../nukestudio/publish/collect_reviews.py | 2 +- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index e0ecbaf302..acdc5193ae 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -14,7 +14,7 @@ class CollectPlates(api.InstancePlugin): """ # Run just before CollectSubsets - order = api.CollectorOrder + 0.1025 + order = api.CollectorOrder + 0.1021 label = "Collect Plates" hosts = ["nukestudio"] families = ["clip"] @@ -36,24 +36,27 @@ class CollectPlates(api.InstancePlugin): "\"plate\"".format(instance) ) return + self.log.debug("__ subset: `{}`".format(instance.data["subset"])) + # if "audio" in instance.data["subset"]: + # return # Collect data. data = {} for key, value in instance.data.iteritems(): data[key] = value + self.log.debug("__ family: `{}`".format(family)) + self.log.debug("__ subset: `{}`".format(subset)) + data["family"] = family.lower() data["families"] = ["ftrack"] + instance.data["families"][1:] data["source"] = data["sourcePath"] - - data["subset"] = data["family"] + subset.title() - + data["subset"] = family + subset.title() data["name"] = data["subset"] + "_" + data["asset"] data["label"] = "{} - {} - ({})".format( - data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[ - 1] - ) + data['asset'], data["subset"], os.path.splitext( + data["sourcePath"])[1]) if "review" in instance.data["families"]: data["label"] += " - review" diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index f223e5ca65..af8fd4a0e7 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -16,7 +16,7 @@ class CollectReviews(api.InstancePlugin): order = api.CollectorOrder + 0.1022 label = "Collect Reviews" hosts = ["nukestudio"] - families = ["clip"] + families = ["plate"] def process(self, instance): # Exclude non-tagged instances. From a635e0108733e7d220961a67858fd5e2fb2fe929 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 08:35:45 +0100 Subject: [PATCH 263/434] reimplemented after merge --- pype/plugins/global/publish/extract_jpeg.py | 6 ++++-- pype/plugins/global/publish/extract_review.py | 8 ++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index abd20bb9ea..bedfe4f7f8 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -3,6 +3,7 @@ import os import pyblish.api import clique import pype.api +import pype.lib class ExtractJpegEXR(pyblish.api.InstancePlugin): @@ -69,9 +70,10 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): proj_name = os.environ.get('AVALON_PROJECT', '__default__') profile = config_data.get(proj_name, config_data['__default__']) + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + jpeg_items = [] - jpeg_items.append( - os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) + jpeg_items.append(ffmpeg_path) # override file if already exists jpeg_items.append("-y") # use same input args like with mov diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 4d63e2c641..44c3b5dbc4 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -2,6 +2,7 @@ import os import pyblish.api import clique import pype.api +import pype.lib class ExtractReview(pyblish.api.InstancePlugin): @@ -40,6 +41,8 @@ class ExtractReview(pyblish.api.InstancePlugin): # get representation and loop them representations = inst_data["representations"] + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + # filter out mov and img sequences representations_new = representations[:] for repre in representations: @@ -324,10 +327,7 @@ class ExtractReview(pyblish.api.InstancePlugin): os.mkdir(stg_dir) mov_args = [ - os.path.join( - os.environ.get( - "FFMPEG_PATH", - ""), "ffmpeg"), + ffmpeg_path, " ".join(input_args), " ".join(output_args) ] From c4f049f64e411071bd41f3ff3ddc7d3c4e5e6225 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 18 Feb 2020 11:39:32 +0100 Subject: [PATCH 264/434] fix(nk): validator of rendered frames failed if slate were missing --- pype/plugins/nuke/publish/validate_rendered_frames.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py index 169ea1ecb5..8a8bf3cc5e 100644 --- a/pype/plugins/nuke/publish/validate_rendered_frames.py +++ b/pype/plugins/nuke/publish/validate_rendered_frames.py @@ -76,7 +76,8 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): 'len(collection.indexes): {}'.format(collected_frames_len) ) - if "slate" in instance.data["families"]: + if ("slate" in instance.data["families"]) \ + and (frame_length != collected_frames_len): collected_frames_len -= 1 assert (collected_frames_len == frame_length), ( From 9526e0b5a3c8f4a386333ab62cd60384570f4a83 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 13:08:16 +0100 Subject: [PATCH 265/434] implemented event that can change task status on first asset version creation --- .../events/event_first_version_status.py | 175 ++++++++++++++++++ 1 file changed, 175 insertions(+) create mode 100644 pype/ftrack/events/event_first_version_status.py diff --git a/pype/ftrack/events/event_first_version_status.py b/pype/ftrack/events/event_first_version_status.py new file mode 100644 index 0000000000..ac0e94c3ae --- /dev/null +++ b/pype/ftrack/events/event_first_version_status.py @@ -0,0 +1,175 @@ +from pype.ftrack import BaseEvent + + +class FirstVersionStatus(BaseEvent): + + # WARNING Priority MUST be higher + # than handler in `event_version_to_task_statuses.py` + priority = 200 + + first_run = True + keys_enum = ["task", "task_type"] + # This should be set with presets + task_status_map = [] + + # EXAMPLE of `task_status_map` + __example_status_map__ = [{ + # `key` specify where to look for name (is enumerator of `keys_enum`) + "key": "task", + # speicification of name + "name": "compositing", + # Status to set to the task + "status": "Blocking" + }] + + def launch(self, session, event): + """Set task's status for first created Asset Version.""" + + if not self.task_status_map: + return + + if self.first_run: + self.first_run = False + valid_task_status_map = [] + for item in self.task_status_map: + key = (item.get("key") or "").lower() + name = (item.get("name") or "").lower() + status = (item.get("status") or "").lower() + if not (key and name and status): + self.log.warning(( + "Invalid item in Task -> Status mapping. {}" + ).format(str(item))) + continue + + if key not in self.keys_enum: + expected_msg = "" + last_key_idx = len(self.keys_enum) - 1 + for idx, key in enumerate(self.keys_enum): + if idx == 0: + joining_part = "`{}`" + elif idx == last_key_idx: + joining_part = "or `{}`" + else: + joining_part = ", `{}`" + expected_msg += joining_part.format(key) + + self.log.warning(( + "Invalid key `{}`. Expected: {}." + ).format(key, expected_msg)) + continue + + valid_task_status_map.append({ + "key": key, + "name": name, + "status": status + }) + self.task_status_map = valid_task_status_map + + entities_info = self.filter_event_ents(event) + if not entities_info: + return + + entity_ids = [] + for entity_info in entities_info: + entity_ids.append(entity_info["entityId"]) + + joined_entity_ids = ",".join( + ["\"{}\"".format(entity_id) for entity_id in entity_ids] + ) + asset_verisons = session.query( + "AssetVersion where id in ({})".format(joined_entity_ids) + ).all() + + statuses_per_type_id = {} + + project_schema = None + for asset_verison in asset_verisons: + task_entity = asset_verison["task"] + found_item = None + for item in self.task_status_map: + if ( + item["key"] == "task" and + task_entity["name"].lower() != item["name"] + ): + continue + + elif ( + item["key"] == "task_type" and + task_entity["type"]["name"].lower() != item["name"] + ): + continue + + found_item = item + break + + if not found_item: + continue + + if project_schema is None: + project_schema = task_entity["project"]["project_schema"] + + # Get all available statuses for Task + type_id = task_entity["type_id"] + if type_id not in statuses_per_type_id: + statuses = project_schema.get_statuses( + "Task", task_entity["type_id"] + ) + + # map lowered status name with it's object + statuses_per_type_id[type_id] = { + status["name"].lower(): status for status in statuses + } + + statuses_by_low_name = statuses_per_type_id[type_id] + new_status = statuses_by_low_name.get(found_item["status"]) + if not new_status: + continue + + ent_path = "/".join([ent["name"] for ent in task_entity["link"]]) + + try: + task_entity["status"] = new_status + session.commit() + self.log.debug("[ {} ] Status updated to [ {} ]".format( + ent_path, new_status['name'] + )) + + except Exception: + session.rollback() + self.log.warning( + "[ {} ] Status couldn't be set.".format(ent_path), + exc_info=True + ) + + def filter_event_ents(self, event): + filtered_ents = [] + for entity in event["data"].get("entities", []): + # Care only about add actions + if entity["action"] != "add": + continue + + # Filter AssetVersions + if entity["entityType"] != "assetversion": + continue + + entity_changes = entity.get("changes") or {} + + # Check if version of Asset Version is `1` + version_num = entity_changes.get("version", {}).get("new") + if version_num != 1: + continue + + # Skip in Asset Version don't have task + task_id = entity_changes.get("taskid", {}).get("new") + if not task_id: + continue + + filtered_ents.append(entity) + + return filtered_ents + + +def register(session, plugins_presets): + '''Register plugin. Called when used as an plugin.''' + + FirstVersionStatus(session, plugins_presets).register() From e57fecdeb34ac6dfb41d7cc75e019a32b40c5981 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Tue, 18 Feb 2020 13:08:25 +0100 Subject: [PATCH 266/434] making sure that options from capture.json are applied correctly this will need some refactoring though --- pype/maya/lib.py | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/pype/maya/lib.py b/pype/maya/lib.py index ec39b3556e..dafc281903 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2176,18 +2176,29 @@ def load_capture_preset(path=None, data=None): 4: 'nolights'} for key in preset[id]: if key == 'high_quality': - temp_options2['multiSampleEnable'] = True - temp_options2['multiSampleCount'] = 8 - temp_options2['textureMaxResolution'] = 1024 - temp_options2['enableTextureMaxRes'] = True + if preset[id][key] == True: + temp_options2['multiSampleEnable'] = True + temp_options2['multiSampleCount'] = 4 + temp_options2['textureMaxResolution'] = 1024 + temp_options2['enableTextureMaxRes'] = True + temp_options2['textureMaxResMode'] = 1 + else: + temp_options2['multiSampleEnable'] = False + temp_options2['multiSampleCount'] = 4 + temp_options2['textureMaxResolution'] = 512 + temp_options2['enableTextureMaxRes'] = True + temp_options2['textureMaxResMode'] = 0 + + if key == 'ssaoEnable': + if preset[id][key] == True: + temp_options2['ssaoEnable'] = True + else: + temp_options2['ssaoEnable'] = False if key == 'alphaCut': temp_options2['transparencyAlgorithm'] = 5 temp_options2['transparencyQuality'] = 1 - if key == 'ssaoEnable': - temp_options2['ssaoEnable'] = True - if key == 'headsUpDisplay': temp_options['headsUpDisplay'] = True From 5a38ba950c728fc6718ae4e9ad39712b7a329119 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 13:14:22 +0100 Subject: [PATCH 267/434] validation of presets happens after registration --- .../events/event_first_version_status.py | 78 ++++++++++--------- 1 file changed, 40 insertions(+), 38 deletions(-) diff --git a/pype/ftrack/events/event_first_version_status.py b/pype/ftrack/events/event_first_version_status.py index ac0e94c3ae..59956697b6 100644 --- a/pype/ftrack/events/event_first_version_status.py +++ b/pype/ftrack/events/event_first_version_status.py @@ -7,7 +7,6 @@ class FirstVersionStatus(BaseEvent): # than handler in `event_version_to_task_statuses.py` priority = 200 - first_run = True keys_enum = ["task", "task_type"] # This should be set with presets task_status_map = [] @@ -22,49 +21,52 @@ class FirstVersionStatus(BaseEvent): "status": "Blocking" }] + def register(self, *args, **kwargs): + result = super(FirstVersionStatus, self).register(*args, **kwargs) + + valid_task_status_map = [] + for item in self.task_status_map: + key = (item.get("key") or "").lower() + name = (item.get("name") or "").lower() + status = (item.get("status") or "").lower() + if not (key and name and status): + self.log.warning(( + "Invalid item in Task -> Status mapping. {}" + ).format(str(item))) + continue + + if key not in self.keys_enum: + expected_msg = "" + last_key_idx = len(self.keys_enum) - 1 + for idx, key in enumerate(self.keys_enum): + if idx == 0: + joining_part = "`{}`" + elif idx == last_key_idx: + joining_part = "or `{}`" + else: + joining_part = ", `{}`" + expected_msg += joining_part.format(key) + + self.log.warning(( + "Invalid key `{}`. Expected: {}." + ).format(key, expected_msg)) + continue + + valid_task_status_map.append({ + "key": key, + "name": name, + "status": status + }) + self.task_status_map = valid_task_status_map + + return result + def launch(self, session, event): """Set task's status for first created Asset Version.""" if not self.task_status_map: return - if self.first_run: - self.first_run = False - valid_task_status_map = [] - for item in self.task_status_map: - key = (item.get("key") or "").lower() - name = (item.get("name") or "").lower() - status = (item.get("status") or "").lower() - if not (key and name and status): - self.log.warning(( - "Invalid item in Task -> Status mapping. {}" - ).format(str(item))) - continue - - if key not in self.keys_enum: - expected_msg = "" - last_key_idx = len(self.keys_enum) - 1 - for idx, key in enumerate(self.keys_enum): - if idx == 0: - joining_part = "`{}`" - elif idx == last_key_idx: - joining_part = "or `{}`" - else: - joining_part = ", `{}`" - expected_msg += joining_part.format(key) - - self.log.warning(( - "Invalid key `{}`. Expected: {}." - ).format(key, expected_msg)) - continue - - valid_task_status_map.append({ - "key": key, - "name": name, - "status": status - }) - self.task_status_map = valid_task_status_map - entities_info = self.filter_event_ents(event) if not entities_info: return From 96dce267e8c21bbbde068f50f657a881fbcd88bc Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 13:17:37 +0100 Subject: [PATCH 268/434] default value of `key` in status mapping is `task` to look after task's name --- pype/ftrack/events/event_first_version_status.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/ftrack/events/event_first_version_status.py b/pype/ftrack/events/event_first_version_status.py index 59956697b6..c147692dc4 100644 --- a/pype/ftrack/events/event_first_version_status.py +++ b/pype/ftrack/events/event_first_version_status.py @@ -14,6 +14,7 @@ class FirstVersionStatus(BaseEvent): # EXAMPLE of `task_status_map` __example_status_map__ = [{ # `key` specify where to look for name (is enumerator of `keys_enum`) + # By default is set to "task" "key": "task", # speicification of name "name": "compositing", @@ -26,7 +27,7 @@ class FirstVersionStatus(BaseEvent): valid_task_status_map = [] for item in self.task_status_map: - key = (item.get("key") or "").lower() + key = (item.get("key") or "task").lower() name = (item.get("name") or "").lower() status = (item.get("status") or "").lower() if not (key and name and status): From cc6d70f8498a364c5ce643efd683e1427ca46179 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 13:56:48 +0100 Subject: [PATCH 269/434] integrate ftrack note adds intent at the beginning of comment (if is set) --- pype/plugins/ftrack/publish/integrate_ftrack_note.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py index f7fb5addbb..87016684ed 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_note.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -18,7 +18,17 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): self.log.info("Comment is not set.") return - self.log.debug("Comment is set to {}".format(comment)) + self.log.debug("Comment is set to `{}`".format(comment)) + + intent = instance.context.data.get("intent") + if intent: + msg = "Intent is set to `{}` and was added to comment.".format( + intent + ) + comment = "{}: {}".format(intent, comment) + else: + msg = "Intent is not set." + self.log.debug(msg) asset_versions_key = "ftrackIntegratedAssetVersions" asset_versions = instance.data.get(asset_versions_key) From a3ad40e34a729f79902cf653beece81d0617b270 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 14:05:02 +0100 Subject: [PATCH 270/434] added possiblity to set note with intent template through presets --- pype/plugins/ftrack/publish/integrate_ftrack_note.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py index 87016684ed..bab7d1ecf5 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_note.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -10,6 +10,8 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): order = pyblish.api.IntegratorOrder + 0.4999 label = "Integrate Ftrack note" families = ["ftrack"] + # Can be set in presets (Allows only `intent` and `comment` keys) + note_with_intent_template = "{intent}: {comment}" optional = True def process(self, instance): @@ -25,7 +27,10 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): msg = "Intent is set to `{}` and was added to comment.".format( intent ) - comment = "{}: {}".format(intent, comment) + comment = note_with_intent_template.format(**{ + "intent": intent, + "comment": comment + }) else: msg = "Intent is not set." self.log.debug(msg) From 7296e86475077d014085be296e7747b7e01fbb06 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Tue, 18 Feb 2020 14:49:11 +0100 Subject: [PATCH 271/434] fix forgotten .value() call --- pype/plugins/global/publish/integrate_new.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 8d41aa7907..a2343ce8a9 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -453,7 +453,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Remove old representations if there are any (before insertion of new) if existing_repres: repre_ids_to_remove = [] - for repre in existing_repres.values(): + for repre in existing_repres: repre_ids_to_remove.append(repre["_id"]) io.delete_many({"_id": {"$in": repre_ids_to_remove}}) From cc1c176ef0f398b3fa7463c92a28a1365ff15471 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 18 Feb 2020 14:53:47 +0100 Subject: [PATCH 272/434] feat(nk): dealing with slate if `render` family in write collector --- pype/plugins/nuke/publish/collect_writes.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index bf1c6a4b66..c3e408341e 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -41,7 +41,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): handle_end = instance.context.data["handleEnd"] first_frame = int(nuke.root()["first_frame"].getValue()) last_frame = int(nuke.root()["last_frame"].getValue()) - + frame_length = int( + last_frame - first_frame + 1 + ) + if node["use_limit"].getValue(): handles = 0 first_frame = int(node["first"].getValue()) @@ -82,8 +85,17 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): collected_frames = [f for f in os.listdir(output_dir) if ext in f] if collected_frames: - representation['frameStart'] = "%0{}d".format( + collected_frames_len = len(collected_frames) + frame_start_str = "%0{}d".format( len(str(last_frame))) % first_frame + representation['frameStart'] = frame_start_str + if "slate" in instance.data["families"] \ + and (frame_length != collected_frames_len): + frame_slate_str = "%0{}d".format( + len(str(last_frame))) % (first_frame - 1) + slate_frame = collected_frames[0].replace( + frame_start_str, frame_slate_str) + collected_frames.insert(0, slate_frame) representation['files'] = collected_frames instance.data["representations"].append(representation) except Exception: From 20a3e054452406db1fab8e190f02a0944c0170c9 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 18 Feb 2020 14:54:49 +0100 Subject: [PATCH 273/434] fix(nk): rendering slate frame to imagesequence --- .../plugins/nuke/publish/extract_slate_frame.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/pype/plugins/nuke/publish/extract_slate_frame.py b/pype/plugins/nuke/publish/extract_slate_frame.py index 4d43f38859..488f9bd31d 100644 --- a/pype/plugins/nuke/publish/extract_slate_frame.py +++ b/pype/plugins/nuke/publish/extract_slate_frame.py @@ -33,6 +33,7 @@ class ExtractSlateFrame(pype.api.Extractor): self.render_slate(instance) def render_slate(self, instance): + node_subset_name = instance.data.get("name", None) node = instance[0] # group node self.log.info("Creating staging dir...") @@ -47,6 +48,10 @@ class ExtractSlateFrame(pype.api.Extractor): self.log.info( "StagingDir `{0}`...".format(instance.data["stagingDir"])) + frame_length = int( + instance.data["frameEnd"] - instance.data["frameStart"] + 1 + ) + temporary_nodes = [] collection = instance.data.get("collection", None) @@ -56,10 +61,16 @@ class ExtractSlateFrame(pype.api.Extractor): "{head}{padding}{tail}")) fhead = collection.format("{head}") + collected_frames_len = int(len(collection.indexes)) + # get first and last frame first_frame = min(collection.indexes) - 1 - - if "slate" in instance.data["families"]: + self.log.info('frame_length: {}'.format(frame_length)) + self.log.info( + 'len(collection.indexes): {}'.format(collected_frames_len) + ) + if ("slate" in instance.data["families"]) \ + and (frame_length != collected_frames_len): first_frame += 1 last_frame = first_frame @@ -103,6 +114,8 @@ class ExtractSlateFrame(pype.api.Extractor): # Render frames nuke.execute(write_node.name(), int(first_frame), int(last_frame)) + # also render slate as sequence frame + nuke.execute(node_subset_name, int(first_frame), int(last_frame)) self.log.debug( "slate frame path: {}".format(instance.data["slateFrame"])) From 79ad22b5fbbd49afaab132a16b270ab35bc0748e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 15:15:23 +0100 Subject: [PATCH 274/434] added few logs to event --- pype/ftrack/events/event_first_version_status.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/pype/ftrack/events/event_first_version_status.py b/pype/ftrack/events/event_first_version_status.py index c147692dc4..2447a20c3e 100644 --- a/pype/ftrack/events/event_first_version_status.py +++ b/pype/ftrack/events/event_first_version_status.py @@ -58,7 +58,12 @@ class FirstVersionStatus(BaseEvent): "name": name, "status": status }) + self.task_status_map = valid_task_status_map + if not self.task_status_map: + self.log.warning(( + "Event handler `{}` don't have set presets." + ).format(self.__class__.__name__)) return result @@ -123,13 +128,16 @@ class FirstVersionStatus(BaseEvent): status["name"].lower(): status for status in statuses } + ent_path = "/".join([ent["name"] for ent in task_entity["link"]]) + statuses_by_low_name = statuses_per_type_id[type_id] new_status = statuses_by_low_name.get(found_item["status"]) if not new_status: + self.log.warning("Status `{}` was not found for `{}`.".format( + found_item["status"], ent_path + )) continue - ent_path = "/".join([ent["name"] for ent in task_entity["link"]]) - try: task_entity["status"] = new_status session.commit() From 79db4af6254ddf606fbb429106568bbe6f01d097 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 15:52:45 +0100 Subject: [PATCH 275/434] task is not changed on task but asset version --- .../events/event_first_version_status.py | 38 ++++++++++--------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/pype/ftrack/events/event_first_version_status.py b/pype/ftrack/events/event_first_version_status.py index 2447a20c3e..2e2b98ad5f 100644 --- a/pype/ftrack/events/event_first_version_status.py +++ b/pype/ftrack/events/event_first_version_status.py @@ -18,7 +18,7 @@ class FirstVersionStatus(BaseEvent): "key": "task", # speicification of name "name": "compositing", - # Status to set to the task + # Status to set to the asset version "status": "Blocking" }] @@ -84,15 +84,15 @@ class FirstVersionStatus(BaseEvent): joined_entity_ids = ",".join( ["\"{}\"".format(entity_id) for entity_id in entity_ids] ) - asset_verisons = session.query( + asset_versions = session.query( "AssetVersion where id in ({})".format(joined_entity_ids) ).all() - statuses_per_type_id = {} + asset_version_statuses = None project_schema = None - for asset_verison in asset_verisons: - task_entity = asset_verison["task"] + for asset_version in asset_versions: + task_entity = asset_version["task"] found_item = None for item in self.task_status_map: if ( @@ -117,29 +117,31 @@ class FirstVersionStatus(BaseEvent): project_schema = task_entity["project"]["project_schema"] # Get all available statuses for Task - type_id = task_entity["type_id"] - if type_id not in statuses_per_type_id: - statuses = project_schema.get_statuses( - "Task", task_entity["type_id"] - ) + if asset_version_statuses is None: + statuses = project_schema.get_statuses("AssetVersion") # map lowered status name with it's object - statuses_per_type_id[type_id] = { + asset_version_statuses = { status["name"].lower(): status for status in statuses } - ent_path = "/".join([ent["name"] for ent in task_entity["link"]]) + ent_path = "/".join( + [ent["name"] for ent in task_entity["link"]] + + [ + str(asset_version["asset"]["name"]), + str(asset_version["version"]) + ] + ) - statuses_by_low_name = statuses_per_type_id[type_id] - new_status = statuses_by_low_name.get(found_item["status"]) + new_status = asset_version_statuses.get(found_item["status"]) if not new_status: - self.log.warning("Status `{}` was not found for `{}`.".format( - found_item["status"], ent_path - )) + self.log.warning( + "AssetVersion doesn't have status `{}`." + ).format(found_item["status"]) continue try: - task_entity["status"] = new_status + asset_version["status"] = new_status session.commit() self.log.debug("[ {} ] Status updated to [ {} ]".format( ent_path, new_status['name'] From d98cb1c2c49a62dfa82350f26168a6f887617454 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 16:20:11 +0100 Subject: [PATCH 276/434] fix template access --- pype/plugins/ftrack/publish/integrate_ftrack_note.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py index bab7d1ecf5..38f7486322 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_note.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -27,7 +27,7 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): msg = "Intent is set to `{}` and was added to comment.".format( intent ) - comment = note_with_intent_template.format(**{ + comment = self.note_with_intent_template.format(**{ "intent": intent, "comment": comment }) From 73c94459b82414bb87f8a1c2b9c9bb52d3ab810f Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 18 Feb 2020 16:30:38 +0100 Subject: [PATCH 277/434] fix(nk, global): slate workflow on farm and local improvements --- .../global/publish/collect_filesequences.py | 3 +- pype/plugins/global/publish/extract_burnin.py | 40 +++++++++++-------- pype/plugins/nuke/publish/collect_writes.py | 13 +++++- 3 files changed, 36 insertions(+), 20 deletions(-) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 8b42606e4a..44098b6008 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -158,7 +158,6 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): os.environ.update(session) instance = metadata.get("instance") if instance: - instance_family = instance.get("family") pixel_aspect = instance.get("pixelAspect", 1) resolution_width = instance.get("resolutionWidth", 1920) resolution_height = instance.get("resolutionHeight", 1080) @@ -168,7 +167,6 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): slate_frame = instance.get("slateFrame") version = instance.get("version") - else: # Search in directory data = dict() @@ -217,6 +215,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): families.append("render2d") if families_data and "slate" in families_data: families.append("slate") + families.append("slate.farm") if data.get("attachTo"): # we need to attach found collections to existing diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index e50ba891d2..55896fc742 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -32,6 +32,10 @@ class ExtractBurnin(pype.api.Extractor): frame_end = int(instance.data.get("frameEnd") or 1) duration = frame_end - frame_start + 1 + if "slate.farm" in instance.data["families"]: + frame_start += 1 + duration -= 1 + prep_data = { "username": instance.context.data['user'], "asset": os.environ['AVALON_ASSET'], @@ -48,22 +52,6 @@ class ExtractBurnin(pype.api.Extractor): datetime_data = instance.context.data.get("datetimeData") or {} prep_data.update(datetime_data) - slate_frame_start = frame_start - slate_frame_end = frame_end - slate_duration = duration - - # exception for slate workflow - if "slate" in instance.data["families"]: - slate_frame_start = frame_start - 1 - slate_frame_end = frame_end - slate_duration = slate_frame_end - slate_frame_start + 1 - - prep_data.update({ - "slate_frame_start": slate_frame_start, - "slate_frame_end": slate_frame_end, - "slate_duration": slate_duration - }) - # Update data with template data template_data = instance.data.get("assumedTemplateData") or {} prep_data.update(template_data) @@ -111,6 +99,26 @@ class ExtractBurnin(pype.api.Extractor): filled_anatomy = anatomy.format_all(_prep_data) _prep_data["anatomy"] = filled_anatomy.get_solved() + # dealing with slates + slate_frame_start = frame_start + slate_frame_end = frame_end + slate_duration = duration + + # exception for slate workflow + if ("slate" in instance.data["families"]): + if "slate-frame" in repre.get("tags", []): + slate_frame_start = frame_start - 1 + slate_frame_end = frame_end + slate_duration = duration + 1 + + self.log.debug("__1 slate_frame_start: {}".format(slate_frame_start)) + + _prep_data.update({ + "slate_frame_start": slate_frame_start, + "slate_frame_end": slate_frame_end, + "slate_duration": slate_duration + }) + burnin_data = { "input": full_movie_path.replace("\\", "/"), "codec": repre.get("codec", []), diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index c3e408341e..1afda6bc6c 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -44,7 +44,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): frame_length = int( last_frame - first_frame + 1 ) - + if node["use_limit"].getValue(): handles = 0 first_frame = int(node["first"].getValue()) @@ -89,13 +89,22 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): frame_start_str = "%0{}d".format( len(str(last_frame))) % first_frame representation['frameStart'] = frame_start_str + + # in case slate is expected and not yet rendered + self.log.debug("_ frame_length: {}".format(frame_length)) + self.log.debug( + "_ collected_frames_len: {}".format( + collected_frames_len)) + # this will only run if slate frame is not already + # rendered from previews publishes if "slate" in instance.data["families"] \ - and (frame_length != collected_frames_len): + and (frame_length == collected_frames_len): frame_slate_str = "%0{}d".format( len(str(last_frame))) % (first_frame - 1) slate_frame = collected_frames[0].replace( frame_start_str, frame_slate_str) collected_frames.insert(0, slate_frame) + representation['files'] = collected_frames instance.data["representations"].append(representation) except Exception: From f59f8b142696f32748296953887af683e2d9aaad Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 17:15:14 +0100 Subject: [PATCH 278/434] added possibility to add labels to notes --- .../ftrack/publish/integrate_ftrack_note.py | 24 ++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py index 38f7486322..2621ca96ab 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_note.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -10,10 +10,14 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): order = pyblish.api.IntegratorOrder + 0.4999 label = "Integrate Ftrack note" families = ["ftrack"] - # Can be set in presets (Allows only `intent` and `comment` keys) - note_with_intent_template = "{intent}: {comment}" optional = True + # Can be set in presets: + # - Allows only `intent` and `comment` keys + note_with_intent_template = "{intent}: {comment}" + # - note label must exist in Ftrack + note_labels = [] + def process(self, instance): comment = (instance.context.data.get("comment") or "").strip() if not comment: @@ -52,8 +56,22 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): ) ) + labels = [] + if self.note_labels: + all_labels = session.query("NoteLabel").all() + labels_by_low_name = {lab["name"].lower(): lab for lab in all_labels} + for _label in self.note_labels: + label = labels_by_low_name.get(_label.lower()) + if not label: + self.log.warning( + "Note Label `{}` was not found.".format(_label) + ) + continue + + labels.append(label) + for asset_version in asset_versions: - asset_version.create_note(comment, author=user) + asset_version.create_note(comment, author=user, labels=labels) try: session.commit() From 60de315ddfb269385fccc48f590828fe3027c2b4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 18:43:28 +0100 Subject: [PATCH 279/434] fixed class name --- pype/ftrack/actions/action_delete_old_versions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py index 46f3e60d77..e418a21e53 100644 --- a/pype/ftrack/actions/action_delete_old_versions.py +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -482,4 +482,4 @@ class DeleteOldVersions(BaseAction): def register(session, plugins_presets={}): '''Register plugin. Called when used as an plugin.''' - PrepareForArchivation(session, plugins_presets).register() + DeleteOldVersions(session, plugins_presets).register() From d12fe99d1827591747b5f58a279b073e74fe82b3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 18:43:41 +0100 Subject: [PATCH 280/434] default version number is 2 --- pype/ftrack/actions/action_delete_old_versions.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py index e418a21e53..a546f380a4 100644 --- a/pype/ftrack/actions/action_delete_old_versions.py +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -102,15 +102,14 @@ class DeleteOldVersions(BaseAction): items.append({ "type": "label", "value": ( - "NOTE: We do recommend to keep 2" - " versions (even if default is 1)." + "NOTE: We do recommend to keep 2 versions." ) }) items.append({ "type": "number", "name": "last_versions_count", "label": "Versions", - "value": 1 + "value": 2 }) items.append(self.splitter_item) From 7692de229b5d0b337eeb93ac269531785f0ad4dd Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 18:44:06 +0100 Subject: [PATCH 281/434] remove publish folder is set to False by default and changed `you` to `You` --- pype/ftrack/actions/action_delete_old_versions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py index a546f380a4..bbc5dc4b73 100644 --- a/pype/ftrack/actions/action_delete_old_versions.py +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -130,8 +130,8 @@ class DeleteOldVersions(BaseAction): items.append({ "type": "boolean", "name": "force_delete_publish_folder", - "label": "Are you sure?", - "value": True + "label": "Are You sure?", + "value": False }) return { From dd83b585335557f854fea94a0b7a4eea451c7702 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 18:52:37 +0100 Subject: [PATCH 282/434] changed filter variable to update_query --- pype/ftrack/actions/action_delete_old_versions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py index bbc5dc4b73..c566198522 100644 --- a/pype/ftrack/actions/action_delete_old_versions.py +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -352,9 +352,9 @@ class DeleteOldVersions(BaseAction): if version_tags == orig_version_tags: continue - filter = {"_id": version["_id"]} + update_query = {"_id": version["_id"]} update_data = {"$set": {"data.tags": version_tags}} - mongo_changes_bulk.append(UpdateOne(filter, update_data)) + mongo_changes_bulk.append(UpdateOne(update_query, update_data)) if mongo_changes_bulk: self.dbcon.bulk_write(mongo_changes_bulk) From da4c9b5a4f7338cc2bc5a322b73377cc44a21a28 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 18 Feb 2020 18:55:43 +0100 Subject: [PATCH 283/434] changed elif to if condition --- pype/ftrack/actions/action_delete_old_versions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py index c566198522..bec21dae96 100644 --- a/pype/ftrack/actions/action_delete_old_versions.py +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -57,7 +57,7 @@ class DeleteOldVersions(BaseAction): "message": msg } - elif not os.path.exists(root): + if not os.path.exists(root): msg = "Root path does not exists \"{}\".".format(str(root)) items.append({ "type": "label", From ed1308f5c1db4d64fab8a83780d6bb9ebdb76a7b Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Tue, 18 Feb 2020 20:40:03 +0100 Subject: [PATCH 284/434] refactored workflow, removed regex sequence detection --- .../global/publish/collect_filesequences.py | 1 + .../global/publish/collect_rendered_files.py | 94 +++++++ pype/plugins/global/publish/extract_jpeg.py | 17 +- .../global/publish/submit_publish_job.py | 246 +++++++++++------- pype/plugins/maya/publish/collect_render.py | 147 ++++++++--- 5 files changed, 359 insertions(+), 146 deletions(-) create mode 100644 pype/plugins/global/publish/collect_rendered_files.py diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 33a1e82ba6..b0293c94f9 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -95,6 +95,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder - 0.0001 targets = ["filesequence"] label = "RenderedFrames" + active = False def process(self, context): pixel_aspect = 1 diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py new file mode 100644 index 0000000000..010cf44c15 --- /dev/null +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -0,0 +1,94 @@ +import os +import json + +import pyblish.api +from avalon import api + +from pypeapp import PypeLauncher + + +class CollectRenderedFiles(pyblish.api.ContextPlugin): + """ + This collector will try to find json files in provided + `PYPE_PUBLISH_DATA`. Those files _MUST_ share same context. + + """ + order = pyblish.api.CollectorOrder - 0.0001 + targets = ["filesequence"] + label = "Collect rendered frames" + + _context = None + + def _load_json(self, path): + assert os.path.isfile(path), ("path to json file doesn't exist") + data = None + with open(path, "r") as json_file: + try: + data = json.load(json_file) + except Exception as exc: + self.log.error( + "Error loading json: " + "{} - Exception: {}".format(path, exc) + ) + return data + + def _process_path(self, data): + # validate basic necessary data + data_err = "invalid json file - missing data" + required = ["asset", "user", "intent", "comment", + "job", "instances", "session", "version"] + assert all(elem in data.keys() for elem in required), data_err + + # set context by first json file + ctx = self._context.data + + ctx["asset"] = ctx.get("asset") or data.get("asset") + ctx["intent"] = ctx.get("intent") or data.get("intent") + ctx["comment"] = ctx.get("comment") or data.get("comment") + ctx["user"] = ctx.get("user") or data.get("user") + ctx["version"] = ctx.get("version") or data.get("version") + + # basic sanity check to see if we are working in same context + # if some other json file has different context, bail out. + ctx_err = "inconsistent contexts in json files - %s" + assert ctx.get("asset") == data.get("asset"), ctx_err % "asset" + assert ctx.get("intent") == data.get("intent"), ctx_err % "intent" + assert ctx.get("comment") == data.get("comment"), ctx_err % "comment" + assert ctx.get("user") == data.get("user"), ctx_err % "user" + assert ctx.get("version") == data.get("version"), ctx_err % "version" + + # ftrack credentials are passed as environment variables by Deadline + # to publish job, but Muster doesn't pass them. + if data.get("ftrack") and not os.environ.get("FTRACK_API_USER"): + ftrack = data.get("ftrack") + os.environ["FTRACK_API_USER"] = ftrack["FTRACK_API_USER"] + os.environ["FTRACK_API_KEY"] = ftrack["FTRACK_API_KEY"] + os.environ["FTRACK_SERVER"] = ftrack["FTRACK_SERVER"] + + # now we can just add instances from json file and we are done + for instance in data.get("instances"): + self.log.info(" - processing instance for {}".format( + instance.get("subset"))) + i = self._context.create_instance(instance.get("subset")) + self.log.info("remapping paths ...") + i.data["representations"] = [PypeLauncher().path_remapper( + data=r) for r in instance.get("representations")] + i.data.update(instance) + + def process(self, context): + self._context = context + + assert os.environ.get("PYPE_PUBLISH_DATA"), ( + "Missing `PYPE_PUBLISH_DATA`") + paths = os.environ["PYPE_PUBLISH_DATA"].split(os.pathsep) + + session_set = False + for path in paths: + data = self._load_json(path) + if not session_set: + self.log.info("Setting session using data from file") + api.Session.update(data.get("session")) + os.environ.update(data.get("session")) + session_set = True + assert data, "failed to load json file" + self._process_path(data) diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 4978649ba2..ce9c043c45 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -1,20 +1,11 @@ import os import pyblish.api -import clique import pype.api class ExtractJpegEXR(pyblish.api.InstancePlugin): - """Resolve any dependency issues - - This plug-in resolves any paths which, if not updated might break - the published file. - - The order of families is important, when working with lookdev you want to - first publish the texture, update the texture paths in the nodes and then - publish the shading network. Same goes for file dependent assets. - """ + """Create jpg thumbnail from sequence using ffmpeg""" label = "Extract Jpeg EXR" hosts = ["shell"] @@ -23,11 +14,6 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): enabled = False def process(self, instance): - start = instance.data.get("frameStart") - stagingdir = os.path.normpath(instance.data.get("stagingDir")) - - collected_frames = os.listdir(stagingdir) - collections, remainder = clique.assemble(collected_frames) self.log.info("subset {}".format(instance.data['subset'])) if 'crypto' in instance.data['subset']: @@ -44,6 +30,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): if 'review' not in repre['tags']: return + stagingdir = os.path.normpath(repre.get("stagingDir")) input_file = repre['files'][0] # input_file = ( diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 35d6bf5c4a..7592423a08 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -1,16 +1,27 @@ import os import json import re -import logging +from copy import copy from avalon import api, io from avalon.vendor import requests, clique import pyblish.api - +# regex for finding frame number in string R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+') +# mapping of instance properties to be transfered to new instance for every +# specified family +instance_transfer = { + "slate": ["slateFrame"], + "review": ["lutPath"], + "render.farm": ["bakeScriptPath", "bakeRenderPath", "bakeWriteNodeName"] + } + +# list of family names to transfer to new family if present +families_transfer = ["render2d", "ftrack", "slate"] + def _get_script(): """Get path to the image sequence script""" @@ -217,9 +228,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): environment["PYPE_METADATA_FILE"] = metadata_path i = 0 for index, key in enumerate(environment): - self.log.info("KEY: {}".format(key)) - self.log.info("FILTER: {}".format(self.enviro_filter)) - if key.upper() in self.enviro_filter: payload["JobInfo"].update( { @@ -235,8 +243,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): payload["JobInfo"]["Pool"] = "none" payload["JobInfo"].pop("SecondaryPool", None) - self.log.info("Submitting..") - self.log.info(json.dumps(payload, indent=4, sort_keys=True)) + self.log.info("Submitting Deadline job ...") + # self.log.info(json.dumps(payload, indent=4, sort_keys=True)) url = "{}/api/jobs".format(self.DEADLINE_REST_URL) response = requests.post(url, json=payload) @@ -251,6 +259,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): :param instance: instance to get required data from :type instance: pyblish.plugin.Instance """ + import speedcopy self.log.info("Preparing to copy ...") @@ -311,13 +320,11 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): self.log.info( "Finished copying %i files" % len(resource_files)) - def _create_instances_for_aov(self, context, instance_data, exp_files): + def _create_instances_for_aov(self, instance_data, exp_files): """ This will create new instance for every aov it can detect in expected files list. - :param context: context of orignal instance to get important data - :type context: pyblish.plugin.Context :param instance_data: skeleton data for instance (those needed) later by collector :type instance_data: pyblish.plugin.Instance @@ -326,11 +333,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): :returns: list of instances :rtype: list(publish.plugin.Instance) """ + task = os.environ["AVALON_TASK"] subset = instance_data["subset"] instances = [] # go through aovs in expected files - for aov, files in exp_files.items(): + for aov, files in exp_files[0].items(): cols, rem = clique.assemble(files) # we shouldn't have any reminders if rem: @@ -339,7 +347,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "in sequence: {}".format(rem)) # but we really expect only one collection, nothing else make sense - self.log.error("got {} sequence type".format(len(cols))) assert len(cols) == 1, "only one image sequence type is expected" # create subset name `familyTaskSubset_AOV` @@ -352,7 +359,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): start = int(instance_data.get("frameStart")) end = int(instance_data.get("frameEnd")) - new_instance = self.context.create_instance(subset_name) + self.log.info("Creating data for: {}".format(subset_name)) + app = os.environ.get("AVALON_APP", "") preview = False @@ -360,13 +368,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if aov in self.aov_filter[app]: preview = True - new_instance.data.update(instance_data) - new_instance.data["subset"] = subset_name + new_instance = copy(instance_data) + new_instance["subset"] = subset_name + ext = cols[0].tail.lstrip(".") # create represenation rep = { - "name": ext, + "name": aov, "ext": ext, "files": [os.path.basename(f) for f in list(cols[0])], "frameStart": start, @@ -374,26 +383,25 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # If expectedFile are absolute, we need only filenames "stagingDir": staging, "anatomy_template": "render", - "fps": new_instance.data.get("fps"), + "fps": new_instance.get("fps"), "tags": ["review", "preview"] if preview else [] } # add tags if preview: - if "ftrack" not in new_instance.data["families"]: + if "ftrack" not in new_instance["families"]: if os.environ.get("FTRACK_SERVER"): - new_instance.data["families"].append("ftrack") - if "review" not in new_instance.data["families"]: - new_instance.data["families"].append("review") + new_instance["families"].append("ftrack") + if "review" not in new_instance["families"]: + new_instance["families"].append("review") - new_instance.data["representations"] = [rep] - instances.append(new_instance) + new_instance["representations"] = [rep] # if extending frames from existing version, copy files from there # into our destination directory - if instance_data.get("extendFrames", False): + if new_instance.get("extendFrames", False): self._copy_extend_frames(new_instance, rep) - + instances.append(new_instance) return instances def _get_representations(self, instance, exp_files): @@ -409,9 +417,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): :returns: list of representations :rtype: list(dict) """ + representations = [] - start = int(instance.data.get("frameStart")) - end = int(instance.data.get("frameEnd")) + start = int(instance.get("frameStart")) + end = int(instance.get("frameEnd")) cols, rem = clique.assemble(exp_files) # create representation for every collected sequence for c in cols: @@ -438,15 +447,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # If expectedFile are absolute, we need only filenames "stagingDir": os.path.dirname(list(c)[0]), "anatomy_template": "render", - "fps": instance.data.get("fps"), + "fps": instance.get("fps"), "tags": ["review", "preview"] if preview else [], } representations.append(rep) - # TODO: implement extendFrame - - families = instance.data.get("families") + families = instance.get("families") # if we have one representation with preview tag # flag whole instance for review and for ftrack if preview: @@ -455,7 +462,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): families.append("ftrack") if "review" not in families: families.append("review") - instance.data["families"] = families + instance["families"] = families # add reminders as representations for r in rem: @@ -536,7 +543,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): ) relative_path = os.path.relpath(source, api.registered_root()) source = os.path.join("{root}", relative_path).replace("\\", "/") - regex = None families = ["render"] @@ -550,94 +556,138 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "fps": data.get("fps", 25), "source": source, "extendFrames": data.get("extendFrames"), - "overrideExistingFrame": data.get("overrideExistingFrame") + "overrideExistingFrame": data.get("overrideExistingFrame"), + "pixelAspect": data.get("pixelAspect", 1), + "resolutionWidth": data.get("resolutionWidth", 1920), + "resolutionHeight": data.get("resolutionHeight", 1080), } + # transfer specific families from original instance to new render + if "render2d" in instance.data.get("families", []): + instance_skeleton_data["families"] += ["render2d"] + + if "ftrack" in instance.data.get("families", []): + instance_skeleton_data["families"] += ["ftrack"] + + if "slate" in instance.data.get("families", []): + instance_skeleton_data["families"] += ["slate"] + + # transfer specific properties from original instance based on + # mapping dictionary `instance_transfer` + for key, values in instance_transfer.items(): + if key in instance.data.get("families", []): + for v in values: + instance_skeleton_data[v] = instance.data.get(v) + instances = None - if data.get("expectedFiles"): - """ - if content of `expectedFiles` are dictionaries, we will handle - it as list of AOVs, creating instance from every one of them. + assert data.get("expectedFiles"), ("Submission from old Pype version" + " - missing expectedFiles") - Example: - -------- + """ + if content of `expectedFiles` are dictionaries, we will handle + it as list of AOVs, creating instance from every one of them. - expectedFiles = [ - { - "beauty": [ - "foo_v01.0001.exr", - "foo_v01.0002.exr" - ], - "Z": [ - "boo_v01.0001.exr", - "boo_v01.0002.exr" - ] - } - ] + Example: + -------- - This will create instances for `beauty` and `Z` subset - adding those files to their respective representations. + expectedFiles = [ + { + "beauty": [ + "foo_v01.0001.exr", + "foo_v01.0002.exr" + ], - If we've got only list of files, we collect all filesequences. - More then one doesn't probably make sense, but we'll handle it - like creating one instance with multiple representations. + "Z": [ + "boo_v01.0001.exr", + "boo_v01.0002.exr" + ] + } + ] - Example: - -------- + This will create instances for `beauty` and `Z` subset + adding those files to their respective representations. - expectedFiles = [ - "foo_v01.0001.exr", - "foo_v01.0002.exr", - "xxx_v01.0001.exr", - "xxx_v01.0002.exr" - ] + If we've got only list of files, we collect all filesequences. + More then one doesn't probably make sense, but we'll handle it + like creating one instance with multiple representations. - This will result in one instance with two representations: - `foo` and `xxx` - """ - if isinstance(data.get("expectedFiles")[0], dict): - instances = self._create_instances_for_aov( - instance_skeleton_data, - data.get("expectedFiles")) - else: - representations = self._get_representations( - instance_skeleton_data, - data.get("expectedFiles") - ) + Example: + -------- - if "representations" not in instance.data: - data["representations"] = [] + expectedFiles = [ + "foo_v01.0001.exr", + "foo_v01.0002.exr", + "xxx_v01.0001.exr", + "xxx_v01.0002.exr" + ] - # add representation - data["representations"] += representations + This will result in one instance with two representations: + `foo` and `xxx` + """ + + if isinstance(data.get("expectedFiles")[0], dict): + # we cannot attach AOVs to other subsets as we consider every + # AOV subset of its own. + + if len(data.get("attachTo")) > 0: + assert len(data.get("expectedFiles")[0].keys()) > 1, ( + "attaching multiple AOVs or renderable cameras to " + "subset is not supported") + + # create instances for every AOV we found in expected files. + # note: this is done for every AOV and every render camere (if + # there are multiple renderable cameras in scene) + instances = self._create_instances_for_aov( + instance_skeleton_data, + data.get("expectedFiles")) + self.log.info("got {} instance{}".format( + len(instances), + "s" if len(instances) > 1 else "")) else: - # deprecated: passing regex is depecated. Please use - # `expectedFiles` and collect them. - if "ext" in instance.data: - ext = r"\." + re.escape(instance.data["ext"]) - else: - ext = r"\.\D+" + representations = self._get_representations( + instance_skeleton_data, + data.get("expectedFiles") + ) - regex = r"^{subset}.*\d+{ext}$".format( - subset=re.escape(subset), ext=ext) + if "representations" not in instance_skeleton_data: + instance_skeleton_data["representations"] = [] + + # add representation + instance_skeleton_data["representations"] += representations + instances = [instance_skeleton_data] + + # if we are attaching to other subsets, create copy of existing + # instances, change data to match thats subset and replace + # existing instances with modified data + if instance.data.get("attachTo"): + self.log.info("Attaching render to subset:") + new_instances = [] + for at in instance.data.get("attachTo"): + for i in instances: + new_i = copy(i) + new_i["version"] = at.get("version") + new_i["subset"] = at.get("subset") + new_i["families"].append(at.get("family")) + new_instances.append(new_i) + self.log.info(" - {} / v{}".format( + at.get("subset"), at.get("version"))) + instances = new_instances - # Write metadata for publish job # publish job file publish_job = { "asset": asset, "frameStart": start, "frameEnd": end, "fps": context.data.get("fps", None), - "families": families, "source": source, "user": context.data["user"], - "version": context.data["version"], + "version": context.data["version"], # this is workfile version "intent": context.data.get("intent"), "comment": context.data.get("comment"), "job": render_job, "session": api.Session.copy(), - "instances": instances or [data] + "instances": instances } # pass Ftrack credentials in case of Muster @@ -649,14 +699,18 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): } publish_job.update({"ftrack": ftrack}) - if regex: - publish_job["regex"] = regex - # Ensure output dir exists output_dir = instance.data["outputDir"] if not os.path.isdir(output_dir): os.makedirs(output_dir) + metadata_filename = "{}_metadata.json".format(subset) + + metadata_path = os.path.join(output_dir, metadata_filename) + self.log.info("Writing json file: {}".format(metadata_path)) + with open(metadata_path, "w") as f: + json.dump(publish_job, f, indent=4, sort_keys=True) + def _extend_frames(self, asset, subset, start, end, override): """ This will get latest version of asset and update frame range based diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 37e1d0d7b1..1188669d00 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -1,7 +1,46 @@ +""" +This collector will go through render layers in maya and prepare all data +needed to create instances and their representations for submition and +publishing on farm. + +Requires: + instance -> families + instance -> setMembers + + context -> currentFile + context -> workspaceDir + context -> user + + session -> AVALON_ASSET + +Optional: + +Provides: + instance -> label + instance -> subset + instance -> attachTo + instance -> setMembers + instance -> publish + instance -> frameStart + instance -> frameEnd + instance -> byFrameStep + instance -> renderer + instance -> family + instance -> families + instance -> asset + instance -> time + instance -> author + instance -> source + instance -> expectedFiles + instance -> resolutionWidth + instance -> resolutionHeight + instance -> pixelAspect +""" + import re import os import types -# TODO: pending python 3 upgrade +import six from abc import ABCMeta, abstractmethod from maya import cmds @@ -122,12 +161,27 @@ class CollectMayaRender(pyblish.api.ContextPlugin): # frame range exp_files = ExpectedFiles().get(renderer, layer_name) + # if we want to attach render to subset, check if we have AOV's + # in expectedFiles. If so, raise error as we cannot attach AOV + # (considered to be subset on its own) to another subset + if attachTo: + assert len(exp_files[0].keys()) == 1, ( + "attaching multiple AOVs or renderable cameras to " + "subset is not supported") + # append full path full_exp_files = [] - for ef in exp_files: - full_path = os.path.join(workspace, "renders", ef) - full_path = full_path.replace("\\", "/") - full_exp_files.append(full_path) + aov_dict = {} + + for aov, files in exp_files[0].items(): + full_paths = [] + for ef in files: + full_path = os.path.join(workspace, "renders", ef) + full_path = full_path.replace("\\", "/") + full_paths.append(full_path) + aov_dict[aov] = full_paths + + full_exp_files.append(aov_dict) self.log.info("collecting layer: {}".format(layer_name)) # Get layer specific settings, might be overrides @@ -136,12 +190,13 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "attachTo": attachTo, "setMembers": layer_name, "publish": True, - "frameStart": self.get_render_attribute("startFrame", - layer=layer_name), - "frameEnd": self.get_render_attribute("endFrame", - layer=layer_name), - "byFrameStep": self.get_render_attribute("byFrameStep", - layer=layer_name), + "frameStart": int(self.get_render_attribute("startFrame", + layer=layer_name)), + "frameEnd": int(self.get_render_attribute("endFrame", + layer=layer_name)), + "byFrameStep": int( + self.get_render_attribute("byFrameStep", + layer=layer_name)), "renderer": self.get_render_attribute("currentRenderer", layer=layer_name), @@ -155,7 +210,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin): # Add source to allow tracing back to the scene from # which was submitted originally "source": filepath, - "expectedFiles": full_exp_files + "expectedFiles": full_exp_files, + "resolutionWidth": cmds.getAttr("defaultResolution.width"), + "resolutionHeight": cmds.getAttr("defaultResolution.height"), + "pixelAspect": cmds.getAttr("defaultResolution.height") } # Apply each user defined attribute as data @@ -285,16 +343,16 @@ class ExpectedFiles: elif renderer.lower() == 'redshift': return ExpectedFilesRedshift(layer).get_files() elif renderer.lower() == 'mentalray': - renderer.ExpectedFilesMentalray(layer).get_files() + return ExpectedFilesMentalray(layer).get_files() elif renderer.lower() == 'renderman': - renderer.ExpectedFilesRenderman(layer).get_files() + return ExpectedFilesRenderman(layer).get_files() else: raise UnsupportedRendererException( "unsupported {}".format(renderer)) +@six.add_metaclass(ABCMeta) class AExpectedFiles: - __metaclass__ = ABCMeta renderer = None layer = None @@ -360,9 +418,10 @@ class AExpectedFiles: padding = int(self.get_render_attribute('extensionPadding')) resolved_path = file_prefix - for cam in renderable_cameras: - if enabled_aovs: - for aov in enabled_aovs: + if enabled_aovs: + aov_file_list = {} + for aov in enabled_aovs: + for cam in renderable_cameras: mappings = ( (R_SUBSTITUTE_SCENE_TOKEN, scene_name), @@ -380,12 +439,23 @@ class AExpectedFiles: int(end_frame) + 1, int(frame_step)): aov_files.append( - '{}.{}.{}'.format(file_prefix, - str(frame).rjust(padding, "0"), - aov[1])) - expected_files.append({aov[0]: aov_files}) + '{}.{}.{}'.format( + file_prefix, + str(frame).rjust(padding, "0"), + aov[1])) + + # if we have more then one renderable camera, append + # camera name to AOV to allow per camera AOVs. + aov_name = aov[0] + if len(renderable_cameras) > 1: + aov_name = "{}_{}".format(aov[0], cam) + + aov_file_list[aov_name] = aov_files file_prefix = resolved_path - else: + + expected_files.append(aov_file_list) + else: + for cam in renderable_cameras: mappings = ( (R_SUBSTITUTE_SCENE_TOKEN, scene_name), (R_SUBSTITUTE_LAYER_TOKEN, layer_name), @@ -475,9 +545,17 @@ class ExpectedFilesArnold(AExpectedFiles): def get_aovs(self): enabled_aovs = [] - if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode') - and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')): - # AOVs are merged in mutli-channel file + try: + if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode') + and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')): + # AOVs are merged in mutli-channel file + return enabled_aovs + except ValueError: + # this occurs when Render Setting windows was not opened yet. In + # such case there are no Arnold options created so query for AOVs + # will fail. We terminate here as there are no AOVs specified then. + # This state will most probably fail later on some Validator + # anyway. return enabled_aovs # AOVs are set to be rendered separately. We should expect @@ -515,16 +593,15 @@ class ExpectedFilesArnold(AExpectedFiles): aov_ext ) ) - if not enabled_aovs: - # if there are no AOVs, append 'beauty' as this is arnolds - # default. If token is specified and no AOVs are - # defined, this will be used. - enabled_aovs.append( - ( - 'beauty', - cmds.getAttr('defaultRenderGlobals.imfPluginKey') - ) + # Append 'beauty' as this is arnolds + # default. If token is specified and no AOVs are + # defined, this will be used. + enabled_aovs.append( + ( + u'beauty', + cmds.getAttr('defaultRenderGlobals.imfPluginKey') ) + ) return enabled_aovs From 5a851ad936024dfdc224c529287737e0699c3916 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Tue, 18 Feb 2020 20:43:35 +0100 Subject: [PATCH 285/434] adding procedural and texture paths --- .../publish/validate_ass_relative_paths.py | 40 ++++++++++++++----- 1 file changed, 29 insertions(+), 11 deletions(-) diff --git a/pype/plugins/maya/publish/validate_ass_relative_paths.py b/pype/plugins/maya/publish/validate_ass_relative_paths.py index b5e16103ad..b0fd12a550 100644 --- a/pype/plugins/maya/publish/validate_ass_relative_paths.py +++ b/pype/plugins/maya/publish/validate_ass_relative_paths.py @@ -14,7 +14,7 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin): order = pype.api.ValidateContentsOrder hosts = ['maya'] families = ['ass'] - label = "Validate ASS has relative texture paths" + label = "ASS has relative texture paths" actions = [pype.api.RepairAction] def process(self, instance): @@ -22,47 +22,65 @@ class ValidateAssRelativePaths(pyblish.api.InstancePlugin): # `defaultArnoldRenderOptions` doesn't exists try: relative_texture = cmds.getAttr( + "defaultArnoldRenderOptions.absolute_texture_paths") + relative_procedural = cmds.getAttr( "defaultArnoldRenderOptions.absolute_procedural_paths") texture_search_path = cmds.getAttr( "defaultArnoldRenderOptions.tspath" ) + procedural_search_path = cmds.getAttr( + "defaultArnoldRenderOptions.pspath" + ) except ValueError: assert False, ("Can not validate, render setting were not opened " "yet so Arnold setting cannot be validate") scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True)) scene_name, _ = os.path.splitext(scene_basename) - project_root = "{}{}{}{}".format( + project_root = "{}{}{}".format( os.environ.get("AVALON_PROJECTS"), os.path.sep, - os.environ.get("AVALON_PROJECT"), - os.pathsep + os.environ.get("AVALON_PROJECT") ) assert self.maya_is_true(relative_texture) is not True, \ - ("Texture path are set to be absolute") + ("Texture path is set to be absolute") + assert self.maya_is_true(relative_procedural) is not True, \ + ("Procedural path is set to be absolute") + + texture_search_path = texture_search_path.replace("\\", "/") + procedural_search_path = procedural_search_path.replace("\\", "/") + project_root = project_root.replace("\\", "/") - texture_search_path.replace("\\", "/") assert project_root in texture_search_path, \ ("Project root is not in texture_search_path") + assert project_root in procedural_search_path, \ + ("Project root is not in procedural_search_path") @classmethod def repair(cls, instance): texture_search_path = cmds.getAttr( "defaultArnoldRenderOptions.tspath" ) - project_root = "{}{}{}{}".format( + procedural_search_path = cmds.getAttr( + "defaultArnoldRenderOptions.pspath" + ) + + project_root = "{}{}{}".format( os.environ.get("AVALON_PROJECTS"), os.path.sep, os.environ.get("AVALON_PROJECT"), - os.pathsep - ) + ).replace("\\", "/") - project_root = project_root.replace("\\", "/") cmds.setAttr("defaultArnoldRenderOptions.tspath", - project_root + texture_search_path, + project_root + os.pathsep + texture_search_path, + type="string") + cmds.setAttr("defaultArnoldRenderOptions.pspath", + project_root + os.pathsep + procedural_search_path, type="string") cmds.setAttr("defaultArnoldRenderOptions.absolute_procedural_paths", False) + cmds.setAttr("defaultArnoldRenderOptions.absolute_texture_paths", + False) def maya_is_true(self, attr_val): """ From 85d8af5dec50fc302afbd57cc5f192715dbc0f13 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Tue, 18 Feb 2020 21:00:03 +0100 Subject: [PATCH 286/434] removing old collector --- .../global/publish/collect_filesequences.py | 512 ------------------ 1 file changed, 512 deletions(-) delete mode 100644 pype/plugins/global/publish/collect_filesequences.py diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py deleted file mode 100644 index fe2b75394c..0000000000 --- a/pype/plugins/global/publish/collect_filesequences.py +++ /dev/null @@ -1,512 +0,0 @@ -""" -Requires: - environment -> PYPE_PUBLISH_PATHS - context -> workspaceDir - -Provides: - context -> user (str) - instance -> new instance -""" - -import os -import re -import copy -import json - -import pyblish.api -from avalon import api - -from pypeapp import PypeLauncher - - -def collect(root, - regex=None, - exclude_regex=None, - frame_start=None, - frame_end=None): - """Collect sequence collections in root""" - - from avalon.vendor import clique - - files = list() - for filename in os.listdir(root): - - # Must have extension - ext = os.path.splitext(filename)[1] - if not ext: - continue - - # Only files - if not os.path.isfile(os.path.join(root, filename)): - continue - - # Include and exclude regex - if regex and not re.search(regex, filename): - continue - if exclude_regex and re.search(exclude_regex, filename): - continue - - files.append(filename) - - # Match collections - # Support filenames like: projectX_shot01_0010.tiff with this regex - pattern = r"(?P(?P0*)\d+)\.\D+\d?$" - collections, remainder = clique.assemble(files, - patterns=[pattern], - minimum_items=1) - - # Exclude any frames outside start and end frame. - for collection in collections: - for index in list(collection.indexes): - if frame_start is not None and index < frame_start: - collection.indexes.discard(index) - continue - if frame_end is not None and index > frame_end: - collection.indexes.discard(index) - continue - - # Keep only collections that have at least a single frame - collections = [c for c in collections if c.indexes] - - return collections, remainder - - -class CollectRenderedFrames(pyblish.api.ContextPlugin): - """Gather file sequences from working directory - - When "FILESEQUENCE" environment variable is set these paths (folders or - .json files) are parsed for image sequences. Otherwise the current - working directory is searched for file sequences. - - The json configuration may have the optional keys: - asset (str): The asset to publish to. If not provided fall back to - api.Session["AVALON_ASSET"] - subset (str): The subset to publish to. If not provided the sequence's - head (up to frame number) will be used. - frame_start (int): The start frame for the sequence - frame_end (int): The end frame for the sequence - root (str): The path to collect from (can be relative to the .json) - regex (str): A regex for the sequence filename - exclude_regex (str): A regex for filename to exclude from collection - metadata (dict): Custom metadata for instance.data["metadata"] - - """ - - order = pyblish.api.CollectorOrder - 0.0001 - targets = ["filesequence"] - label = "RenderedFrames" - active = False - - def process(self, context): - pixel_aspect = 1 - resolution_width = 1920 - resolution_height = 1080 - lut_path = None - slate_frame = None - families_data = None - baked_mov_path = None - subset = None - version = None - frame_start = 0 - frame_end = 0 - new_instance = None - - if os.environ.get("PYPE_PUBLISH_PATHS"): - paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep) - self.log.info("Collecting paths: {}".format(paths)) - else: - cwd = context.get("workspaceDir", os.getcwd()) - paths = [cwd] - - for path in paths: - - self.log.info("Loading: {}".format(path)) - - if path.endswith(".json"): - # Search using .json configuration - with open(path, "r") as f: - try: - data = json.load(f) - except Exception as exc: - self.log.error( - "Error loading json: " - "{} - Exception: {}".format(path, exc) - ) - raise - - cwd = os.path.dirname(path) - root_override = data.get("root") - frame_start = int(data.get("frameStart")) - frame_end = int(data.get("frameEnd")) - subset = data.get("subset") - - if root_override: - if os.path.isabs(root_override): - root = root_override - else: - root = os.path.join(cwd, root_override) - else: - root = cwd - - if data.get("ftrack"): - f = data.get("ftrack") - os.environ["FTRACK_API_USER"] = f["FTRACK_API_USER"] - os.environ["FTRACK_API_KEY"] = f["FTRACK_API_KEY"] - os.environ["FTRACK_SERVER"] = f["FTRACK_SERVER"] - - metadata = data.get("metadata") - if metadata: - session = metadata.get("session") - if session: - self.log.info("setting session using metadata") - api.Session.update(session) - os.environ.update(session) - instance = metadata.get("instance") - if instance and isinstance(instance, list): - instance_family = instance.get("family") - pixel_aspect = instance.get("pixelAspect", 1) - resolution_width = instance.get( - "resolutionWidth", 1920) - resolution_height = instance.get( - "resolutionHeight", 1080) - lut_path = instance.get("lutPath", None) - baked_mov_path = instance.get("bakeRenderPath") - families_data = instance.get("families") - slate_frame = instance.get("slateFrame") - version = instance.get("version") - else: - # Search in directory - data = dict() - root = path - - self.log.info("Collecting: {}".format(root)) - - regex = data.get("regex") - if baked_mov_path: - regex = "^{}.*$".format(subset) - - if regex: - self.log.info("Using regex: {}".format(regex)) - - if "slate" in families_data: - frame_start -= 1 - - if regex: - collections, remainder = collect( - root=root, - regex=regex, - exclude_regex=data.get("exclude_regex"), - frame_start=frame_start, - frame_end=frame_end, - ) - - self.log.info("Found collections: {}".format(collections)) - self.log.info("Found remainder: {}".format(remainder)) - - fps = data.get("fps", 25) - - # adding publish comment and intent to context - context.data["comment"] = data.get("comment", "") - context.data["intent"] = data.get("intent", "") - - if data.get("user"): - context.data["user"] = data["user"] - - if data.get("version"): - version = data.get("version") - - # Get family from the data - families = data.get("families", ["render"]) - if "ftrack" not in families: - families.append("ftrack") - if families_data and "render2d" in families_data: - families.append("render2d") - if families_data and "slate" in families_data: - families.append("slate") - - if not isinstance(instance, list): - instances = [instance] - - # attachTo must be only on single instance - if instances[0].get("attachTo"): - # we need to attach found collections to existing - # subset version as review represenation. - - for attach in instances[0].get("attachTo"): - self.log.info( - "Attaching render {}:v{}".format( - attach["subset"], attach["version"])) - new_instance = context.create_instance( - attach["subset"]) - new_instance.data.update( - { - "name": attach["subset"], - "subset": attach["subset"], - "version": attach["version"], - "family": 'review', - "families": ['review', 'ftrack'], - "asset": data.get( - "asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, - "frameStart": frame_start, - "frameEnd": frame_end, - "fps": fps, - "source": data.get("source", ""), - "pixelAspect": pixel_aspect, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height - }) - - if regex: - if "representations" not in new_instance.data: - new_instance.data["representations"] = [] - - for collection in collections: - self.log.info( - " - adding representation: {}".format( - str(collection)) - ) - ext = collection.tail.lstrip(".") - - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"], - } - new_instance.data["representations"].append( - representation) - else: - try: - representations = data["metadata"]["instance"]["representations"] # noqa: E501 - except KeyError as e: - assert False, e - new_instance.data["representations"] = representations - - elif subset: - # if we have subset - add all collections and known - # reminder as representations - - # take out review family if mov path - # this will make imagesequence none review - - if baked_mov_path: - self.log.info( - "Baked mov is available {}".format( - baked_mov_path)) - families.append("review") - - if session['AVALON_APP'] == "maya": - families.append("review") - - self.log.info( - "Adding representations to subset {}".format( - subset)) - - new_instance = context.create_instance(subset) - data = copy.deepcopy(data) - - new_instance.data.update( - { - "name": subset, - "family": families[0], - "families": list(families), - "subset": subset, - "asset": data.get( - "asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, - "frameStart": frame_start, - "frameEnd": frame_end, - "fps": fps, - "source": data.get("source", ""), - "pixelAspect": pixel_aspect, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "slateFrame": slate_frame, - "version": version - } - ) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - for collection in collections: - self.log.info(" - {}".format(str(collection))) - - ext = collection.tail.lstrip(".") - - if "slate" in instance.data["families"]: - frame_start += 1 - - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "frameStart": frame_start, - "frameEnd": frame_end, - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"] if not baked_mov_path else ["thumb-nuke"], - } - instance.data["representations"].append( - representation) - - if regex: - for collection in collections: - self.log.info(" - {}".format(str(collection))) - - ext = collection.tail.lstrip(".") - - if "slate" in new_instance.data["families"]: - frame_start += 1 - - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "frameStart": frame_start, - "frameEnd": frame_end, - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"] if not baked_mov_path else [], - } - new_instance.data["representations"].append( - representation) - - # filter out only relevant mov in case baked available - self.log.debug("__ remainder {}".format(remainder)) - if baked_mov_path: - remainder = [r for r in remainder - if r in baked_mov_path] - self.log.debug("__ remainder {}".format(remainder)) - - # process reminders - for rem in remainder: - # add only known types to representation - if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']: - self.log.info(" . {}".format(rem)) - - if "slate" in instance.data["families"]: - frame_start += 1 - - tags = ["preview"] - - if baked_mov_path: - tags.append("delete") - - representation = { - "name": rem.split(".")[-1], - "ext": "{}".format(rem.split(".")[-1]), - "files": rem, - "stagingDir": root, - "frameStart": frame_start, - "anatomy_template": "render", - "fps": fps, - "tags": tags - } - new_instance.data["representations"].append( - representation) - else: - try: - representations = data["metadata"]["instance"]["representations"] # noqa: E501 - except KeyError as e: - assert False, e - - new_instance.data["representations"] = representations - - else: - # we have no subset so we take every collection and create one - # from it - - # Ensure each instance gets a unique reference to the data - data = copy.deepcopy(data) - - # If no subset provided, get it from collection's head - subset = data.get("subset", collection.head.rstrip("_. ")) - - # If no start or end frame provided, get it from collection - indices = list(collection.indexes) - start = int(data.get("frameStart", indices[0])) - end = int(data.get("frameEnd", indices[-1])) - - new_instance.append(collection) - new_instance.context.data["fps"] = fps - - if "representations" not in new_instance.data: - new_instance.data["representations"] = [] - - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "frameStart": start, - "frameEnd": end, - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"], - } - new_instance.data["representations"].append( - representation) - - # temporary ... allow only beauty on ftrack - if session['AVALON_APP'] == "maya": - AOV_filter = ['beauty'] - for aov in AOV_filter: - if aov not in new_instance.data['subset']: - new_instance.data['families'].remove( - 'review') - new_instance.data['families'].remove( - 'ftrack') - representation["tags"].remove('review') - else: - data = copy.deepcopy(data) - if not isinstance(data["metadata"]["instance"], list): - instances = [data["metadata"]["instance"]] - for instance in instances: - subset = instance["subset"] - task = data["metadata"]["session"]["AVALON_TASK"] - new_subset_name = 'render{}{}{}{}'.format( - task[0].upper(), task[1:], - subset[0].upper(), subset[1:]) - - self.log.info( - "Creating new subset: {}".format(new_subset_name)) - new_instance = context.create_instance(new_subset_name) - - new_instance.data.update( - { - "name": new_subset_name, - "family": 'render', - "families": data["metadata"]["families"], - "subset": new_subset_name, - "asset": data.get( - "asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, - "frameStart": frame_start, - "frameEnd": frame_end, - "fps": fps, - "source": data.get("source", ""), - "pixelAspect": pixel_aspect, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "slateFrame": slate_frame - } - ) - new_instance.data["representations"] = instance["representations"] # noqa: E501 - - if new_instance is not None: - self.log.info("remapping paths ...") - new_instance.data["representations"] = [PypeLauncher().path_remapper(data=r) for r in new_instance.data["representations"]] # noqa: E501 - self.log.debug( - "__ representations {}".format( - new_instance.data["representations"])) - self.log.debug( - "__ instance.data {}".format(new_instance.data)) - else: - self.log.error("nothing collected") From f572d97fe0a31fce1dae89ffec45c99a9d73098e Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Tue, 18 Feb 2020 21:25:01 +0100 Subject: [PATCH 287/434] move few things to class to make it overridable by presets --- .../global/publish/submit_publish_job.py | 48 ++++++++----------- 1 file changed, 20 insertions(+), 28 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 7592423a08..7b5dac28d4 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -8,20 +8,6 @@ from avalon.vendor import requests, clique import pyblish.api -# regex for finding frame number in string -R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+') - -# mapping of instance properties to be transfered to new instance for every -# specified family -instance_transfer = { - "slate": ["slateFrame"], - "review": ["lutPath"], - "render.farm": ["bakeScriptPath", "bakeRenderPath", "bakeWriteNodeName"] - } - -# list of family names to transfer to new family if present -families_transfer = ["render2d", "ftrack", "slate"] - def _get_script(): """Get path to the image sequence script""" @@ -145,9 +131,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): - publishJobState (str, Optional): "Active" or "Suspended" This defaults to "Suspended" - This requires a "frameStart" and "frameEnd" to be present in instance.data - or in context.data. - """ label = "Submit image sequence jobs to Deadline or Muster" @@ -175,6 +158,20 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # pool used to do the publishing job deadline_pool = "" + # regex for finding frame number in string + R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+') + + # mapping of instance properties to be transfered to new instance for every + # specified family + instance_transfer = { + "slate": ["slateFrame"], + "review": ["lutPath"], + "render.farm": ["bakeScriptPath", "bakeRenderPath", "bakeWriteNodeName"] + } + + # list of family names to transfer to new family if present + families_transfer = ["render2d", "ftrack", "slate"] + def _submit_deadline_post_job(self, instance, job): """ Deadline specific code separated from :meth:`process` for sake of @@ -289,12 +286,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): resource_files = [] r_filename = os.path.basename( representation.get("files")[0]) # first file - op = re.search(R_FRAME_NUMBER, r_filename) + op = re.search(self.R_FRAME_NUMBER, r_filename) pre = r_filename[:op.start("frame")] post = r_filename[op.end("frame"):] assert op is not None, "padding string wasn't found" for frame in list(r_col): - fn = re.search(R_FRAME_NUMBER, frame) + fn = re.search(self.R_FRAME_NUMBER, frame) # silencing linter as we need to compare to True, not to # type assert fn is not None, "padding string wasn't found" @@ -563,18 +560,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): } # transfer specific families from original instance to new render - if "render2d" in instance.data.get("families", []): - instance_skeleton_data["families"] += ["render2d"] - - if "ftrack" in instance.data.get("families", []): - instance_skeleton_data["families"] += ["ftrack"] - - if "slate" in instance.data.get("families", []): - instance_skeleton_data["families"] += ["slate"] + for item in self.families_transfer: + if item in instance.data.get("families", []): + instance_skeleton_data["families"] += [item] # transfer specific properties from original instance based on # mapping dictionary `instance_transfer` - for key, values in instance_transfer.items(): + for key, values in self.instance_transfer.items(): if key in instance.data.get("families", []): for v in values: instance_skeleton_data[v] = instance.data.get(v) From 022862d63291899a1ad19c4626c1a32fee727fa0 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Wed, 19 Feb 2020 13:29:14 +0100 Subject: [PATCH 288/434] fixed redshift support --- pype/plugins/maya/publish/collect_render.py | 224 +++++++++++++----- .../maya/publish/validate_rendersettings.py | 53 ++--- 2 files changed, 190 insertions(+), 87 deletions(-) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 1188669d00..7d0e798ae9 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -59,6 +59,7 @@ R_LAYER_TOKEN = re.compile( r'.*%l.*|.*.*|.*.*', re.IGNORECASE) R_AOV_TOKEN = re.compile(r'.*%a.*|.*.*|.*.*', re.IGNORECASE) R_SUBSTITUTE_AOV_TOKEN = re.compile(r'%a||', re.IGNORECASE) +R_REMOVE_AOV_TOKEN = re.compile(r'_%a|_|_', re.IGNORECASE) R_SUBSTITUTE_LAYER_TOKEN = re.compile( r'%l||', re.IGNORECASE) R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) @@ -160,6 +161,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): # return all expected files for all cameras and aovs in given # frame range exp_files = ExpectedFiles().get(renderer, layer_name) + assert exp_files, ("no file names were generated, this is bug") # if we want to attach render to subset, check if we have AOV's # in expectedFiles. If so, raise error as we cannot attach AOV @@ -173,16 +175,32 @@ class CollectMayaRender(pyblish.api.ContextPlugin): full_exp_files = [] aov_dict = {} - for aov, files in exp_files[0].items(): + # we either get AOVs or just list of files. List of files can + # mean two things - there are no AOVs enabled or multipass EXR + # is produced. In either case we treat those as `beauty`. + if isinstance(exp_files[0], dict): + for aov, files in exp_files[0].items(): + full_paths = [] + for ef in files: + full_path = os.path.join(workspace, "renders", ef) + full_path = full_path.replace("\\", "/") + full_paths.append(full_path) + aov_dict[aov] = full_paths + else: full_paths = [] - for ef in files: + for ef in exp_files: full_path = os.path.join(workspace, "renders", ef) full_path = full_path.replace("\\", "/") full_paths.append(full_path) - aov_dict[aov] = full_paths + aov_dict["beauty"] = full_paths full_exp_files.append(aov_dict) + from pprint import pprint + print("=" * 40) + pprint(full_exp_files) + print("=" * 40) + self.log.info("collecting layer: {}".format(layer_name)) # Get layer specific settings, might be overrides data = { @@ -363,7 +381,15 @@ class AExpectedFiles: def get_aovs(self): pass - def get_files(self): + def get_renderer_prefix(self): + try: + file_prefix = cmds.getAttr(ImagePrefixes[self.renderer]) + except KeyError: + raise UnsupportedRendererException( + "Unsupported renderer {}".format(self.renderer)) + return file_prefix + + def _get_layer_data(self): # ______________________________________________ # ____________________/ ____________________________________________/ # 1 - get scene name /__________________/ @@ -381,11 +407,7 @@ class AExpectedFiles: # __________________/ ______________________________________________/ # 3 - image prefix /__________________/ # __________________/ - try: - file_prefix = cmds.getAttr(ImagePrefixes[renderer]) - except KeyError: - raise UnsupportedRendererException( - "Unsupported renderer {}".format(renderer)) + file_prefix = self.get_renderer_prefix() if not file_prefix: raise RuntimeError("Image prefix not set") @@ -397,6 +419,9 @@ class AExpectedFiles: # 4 - get renderable cameras_____________/ # __________________/ + # if we have token in prefix path we'll expect output for + # every renderable camera in layer. + renderable_cameras = self.get_renderable_cameras() # ________________________________________________ # __________________/ ______________________________________________/ @@ -404,11 +429,11 @@ class AExpectedFiles: # __________________/ enabled_aovs = self.get_aovs() + from pprint import pprint + print("-" * 40) + pprint(enabled_aovs) + print("-" * 40) - # if we have token in prefix path we'll expect output for - # every renderable camera in layer. - - expected_files = [] layer_name = self.layer if self.layer.startswith("rs_"): layer_name = self.layer[3:] @@ -417,62 +442,104 @@ class AExpectedFiles: frame_step = int(self.get_render_attribute('byFrameStep')) padding = int(self.get_render_attribute('extensionPadding')) - resolved_path = file_prefix - if enabled_aovs: - aov_file_list = {} - for aov in enabled_aovs: - for cam in renderable_cameras: + scene_data = { + "frameStart": start_frame, + "frameEnd": end_frame, + "frameStep": frame_step, + "padding": padding, + "cameras": renderable_cameras, + "sceneName": scene_name, + "layerName": layer_name, + "renderer": renderer, + "defaultExt": default_ext, + "filePrefix": file_prefix, + "enabledAOVs": enabled_aovs + } + return scene_data - mappings = ( - (R_SUBSTITUTE_SCENE_TOKEN, scene_name), - (R_SUBSTITUTE_LAYER_TOKEN, layer_name), - (R_SUBSTITUTE_CAMERA_TOKEN, cam), - (R_SUBSTITUTE_AOV_TOKEN, aov[0]) - ) + def _generate_single_file_sequence(self, layer_data): + expected_files = [] + file_prefix = layer_data["filePrefix"] + for cam in layer_data["cameras"]: + mappings = ( + (R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]), + (R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]), + (R_SUBSTITUTE_CAMERA_TOKEN, cam), + # this is required to remove unfilled aov token, for example + # in Redshift + (R_REMOVE_AOV_TOKEN, "") + ) - for regex, value in mappings: - file_prefix = re.sub(regex, value, file_prefix) + for regex, value in mappings: + file_prefix = re.sub(regex, value, file_prefix) - aov_files = [] - for frame in range( - int(start_frame), - int(end_frame) + 1, - int(frame_step)): - aov_files.append( - '{}.{}.{}'.format( - file_prefix, - str(frame).rjust(padding, "0"), - aov[1])) + for frame in range( + int(layer_data["frameStart"]), + int(layer_data["frameEnd"]) + 1, + int(layer_data["frameStep"])): + expected_files.append( + '{}.{}.{}'.format(file_prefix, + str(frame).rjust( + layer_data["padding"], "0"), + layer_data["defaultExt"])) + return expected_files - # if we have more then one renderable camera, append - # camera name to AOV to allow per camera AOVs. - aov_name = aov[0] - if len(renderable_cameras) > 1: - aov_name = "{}_{}".format(aov[0], cam) + def _generate_aov_file_sequences(self, layer_data): + expected_files = [] + aov_file_list = {} + file_prefix = layer_data["filePrefix"] + for aov in layer_data["enabledAOVs"]: + for cam in layer_data["cameras"]: - aov_file_list[aov_name] = aov_files - file_prefix = resolved_path - - expected_files.append(aov_file_list) - else: - for cam in renderable_cameras: mappings = ( - (R_SUBSTITUTE_SCENE_TOKEN, scene_name), - (R_SUBSTITUTE_LAYER_TOKEN, layer_name), - (R_SUBSTITUTE_CAMERA_TOKEN, cam) + (R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]), + (R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]), + (R_SUBSTITUTE_CAMERA_TOKEN, cam), + (R_SUBSTITUTE_AOV_TOKEN, aov[0]) ) for regex, value in mappings: file_prefix = re.sub(regex, value, file_prefix) + aov_files = [] for frame in range( - int(start_frame), - int(end_frame) + 1, - int(frame_step)): - expected_files.append( - '{}.{}.{}'.format(file_prefix, - str(frame).rjust(padding, "0"), - default_ext)) + int(layer_data["frameStart"]), + int(layer_data["frameEnd"]) + 1, + int(layer_data["frameStep"])): + aov_files.append( + '{}.{}.{}'.format( + file_prefix, + str(frame).rjust(layer_data["padding"], "0"), + aov[1])) + + # if we have more then one renderable camera, append + # camera name to AOV to allow per camera AOVs. + aov_name = aov[0] + if len(layer_data["cameras"]) > 1: + aov_name = "{}_{}".format(aov[0], cam) + + aov_file_list[aov_name] = aov_files + file_prefix = layer_data["filePrefix"] + + expected_files.append(aov_file_list) + return expected_files + + def get_files(self): + """ + This method will return list of expected files. + + It will translate render token strings ('', etc.) to + their values. This task is tricky as every renderer deals with this + differently. It depends on `get_aovs()` abstract method implemented + for every supported renderer. + """ + layer_data = self._get_layer_data() + + expected_files = [] + if layer_data.get("enabledAOVs"): + expected_files = self._generate_aov_file_sequences(layer_data) + else: + expected_files = self._generate_single_file_sequence(layer_data) return expected_files @@ -656,13 +723,53 @@ class ExpectedFilesVray(AExpectedFiles): class ExpectedFilesRedshift(AExpectedFiles): + # mapping redshift extension dropdown values to strings + ext_mapping = ['iff', 'exr', 'tif', 'png', 'tga', 'jpg'] + def __init__(self, layer): super(ExpectedFilesRedshift, self).__init__(layer) self.renderer = 'redshift' + def get_renderer_prefix(self): + prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix() + prefix = "{}_".format(prefix) + return prefix + + def get_files(self): + expected_files = super(ExpectedFilesRedshift, self).get_files() + + # we need to add one sequence for plain beauty if AOVs are enabled. + # as redshift output beauty without 'beauty' in filename. + + layer_data = self._get_layer_data() + if layer_data.get("enabledAOVs"): + expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501 + + return expected_files + def get_aovs(self): enabled_aovs = [] - default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey') + + try: + if self.maya_is_true( + cmds.getAttr("redshiftOptions.exrForceMultilayer")): + # AOVs are merged in mutli-channel file + print("*" * 40) + print(cmds.getAttr("redshiftOptions.exrForceMultilayer")) + print("*" * 40) + return enabled_aovs + except ValueError: + # this occurs when Render Setting windows was not opened yet. In + # such case there are no Arnold options created so query for AOVs + # will fail. We terminate here as there are no AOVs specified then. + # This state will most probably fail later on some Validator + # anyway. + print("+" * 40) + return enabled_aovs + + default_ext = self.ext_mapping[ + cmds.getAttr('redshiftOptions.imageFormat') + ] rs_aovs = [n for n in cmds.ls(type='RedshiftAOV')] # todo: find out how to detect multichannel exr for redshift @@ -674,7 +781,6 @@ class ExpectedFilesRedshift(AExpectedFiles): enabled = self.maya_is_true(override) if enabled: - # todo: find how redshift set format for AOVs enabled_aovs.append( ( cmds.getAttr('%s.name' % aov), diff --git a/pype/plugins/maya/publish/validate_rendersettings.py b/pype/plugins/maya/publish/validate_rendersettings.py index b74199352a..923ee185c6 100644 --- a/pype/plugins/maya/publish/validate_rendersettings.py +++ b/pype/plugins/maya/publish/validate_rendersettings.py @@ -48,6 +48,13 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): 'redshift': 'defaultRenderGlobals.imageFilePrefix' } + ImagePrefixTokens = { + + 'arnold': 'maya///_//', + 'vray': 'maya///' + } + R_AOV_TOKEN = re.compile( r'%a||', re.IGNORECASE) R_LAYER_TOKEN = re.compile( @@ -99,26 +106,31 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): "doesn't have: '' or " "'' token".format(prefix)) - if not re.search(cls.R_AOV_TOKEN, prefix): - invalid = True - cls.log.error("Wrong image prefix [ {} ] - " - "doesn't have: '' or " - "'' token".format(prefix)) - if len(cameras) > 1: if not re.search(cls.R_CAMERA_TOKEN, prefix): invalid = True cls.log.error("Wrong image prefix [ {} ] - " "doesn't have: '' token".format(prefix)) + # renderer specific checks if renderer == "vray": - if prefix.lower() != cls.VRAY_PREFIX.lower(): - cls.log.warning("warning: prefix differs from " - "recommended {}".format(cls.VRAY_PREFIX)) + # no vray checks implemented yet + pass + elif renderer == "redshift": + # no redshift check implemented yet + pass else: - if prefix.lower() != cls.DEFAULT_PREFIX.lower(): - cls.log.warning("warning: prefix differs from " - "recommended {}".format(cls.DEFAULT_PREFIX)) + if not re.search(cls.R_AOV_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' or " + "token".format(prefix)) + + # prefix check + if prefix.lower() != cls.ImagePrefixTokens[renderer].lower(): + cls.log.warning("warning: prefix differs from " + "recommended {}".format( + cls.ImagePrefixTokens[renderer])) if padding != cls.DEFAULT_PADDING: invalid = True @@ -127,21 +139,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): return invalid - @classmethod - def get_prefix(cls, renderer): - prefix = cls.RENDERER_PREFIX.get(renderer, cls.DEFAULT_PREFIX) - # maya.cmds and pymel.core return only default project directory and - # not the current one but only default. - output_path = os.path.join( - mel.eval("workspace -q -rd;"), pm.workspace.fileRules["images"] - ) - # Workfile paths can be configured to have host name in file path. - # In this case we want to avoid duplicate folder names. - if "maya" in output_path.lower(): - prefix = prefix.replace("maya/", "") - - return prefix - @classmethod def repair(cls, instance): @@ -156,7 +153,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): node = render_attrs["node"] prefix_attr = render_attrs["prefix"] - fname_prefix = cls.get_prefix(renderer) + fname_prefix = cls.ImagePrefixTokens[renderer] cmds.setAttr("{}.{}".format(node, prefix_attr), fname_prefix, type="string") From ffca94db6d2f1857bdc73aee6bd94ce9b14e9a20 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Wed, 19 Feb 2020 14:09:20 +0100 Subject: [PATCH 289/434] fixed vray support --- pype/plugins/maya/publish/collect_render.py | 47 ++++++++++++++++++--- 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 7d0e798ae9..4d87c9b0f6 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -674,14 +674,53 @@ class ExpectedFilesArnold(AExpectedFiles): class ExpectedFilesVray(AExpectedFiles): + # V-ray file extension mapping + # 5 - exr + # 6 - multichannel exr + # 13 - deep exr + def __init__(self, layer): super(ExpectedFilesVray, self).__init__(layer) self.renderer = 'vray' - def get_aovs(self): + def get_renderer_prefix(self): + prefix = super(ExpectedFilesVray, self).get_renderer_prefix() + prefix = "{}_".format(prefix) + return prefix - default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey') + def get_files(self): + expected_files = super(ExpectedFilesVray, self).get_files() + + # we need to add one sequence for plain beauty if AOVs are enabled. + # as vray output beauty without 'beauty' in filename. + + layer_data = self._get_layer_data() + if layer_data.get("enabledAOVs"): + expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501 + + return expected_files + + def get_aovs(self): enabled_aovs = [] + + try: + # really? do we set it in vray just by selecting multichannel exr? + if cmds.getAttr( + "vraySettings.imageFormatStr") == "exr (multichannel)": + # AOVs are merged in mutli-channel file + return enabled_aovs + except ValueError: + # this occurs when Render Setting windows was not opened yet. In + # such case there are no Arnold options created so query for AOVs + # will fail. We terminate here as there are no AOVs specified then. + # This state will most probably fail later on some Validator + # anyway. + return enabled_aovs + + default_ext = cmds.getAttr('vraySettings.imageFormatStr') + if default_ext == "exr (multichannel)" or default_ext == "exr (deep)": + default_ext = "exr" + vr_aovs = [n for n in cmds.ls( type=["VRayRenderElement", "VRayRenderElementSet"])] @@ -754,9 +793,6 @@ class ExpectedFilesRedshift(AExpectedFiles): if self.maya_is_true( cmds.getAttr("redshiftOptions.exrForceMultilayer")): # AOVs are merged in mutli-channel file - print("*" * 40) - print(cmds.getAttr("redshiftOptions.exrForceMultilayer")) - print("*" * 40) return enabled_aovs except ValueError: # this occurs when Render Setting windows was not opened yet. In @@ -764,7 +800,6 @@ class ExpectedFilesRedshift(AExpectedFiles): # will fail. We terminate here as there are no AOVs specified then. # This state will most probably fail later on some Validator # anyway. - print("+" * 40) return enabled_aovs default_ext = self.ext_mapping[ From 43b82cf583ff86af3c5e28aca07dc2f4e746b97d Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Wed, 19 Feb 2020 16:24:21 +0100 Subject: [PATCH 290/434] fixed renderman support --- pype/plugins/maya/publish/collect_render.py | 81 +++++++++++++++---- .../maya/publish/validate_rendersettings.py | 27 ++++++- 2 files changed, 92 insertions(+), 16 deletions(-) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 4d87c9b0f6..f76ee1120f 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -60,6 +60,10 @@ R_LAYER_TOKEN = re.compile( R_AOV_TOKEN = re.compile(r'.*%a.*|.*.*|.*.*', re.IGNORECASE) R_SUBSTITUTE_AOV_TOKEN = re.compile(r'%a||', re.IGNORECASE) R_REMOVE_AOV_TOKEN = re.compile(r'_%a|_|_', re.IGNORECASE) +# to remove unused renderman tokens +R_CLEAN_FRAME_TOKEN = re.compile(r'\.?\.?', re.IGNORECASE) +R_CLEAN_EXT_TOKEN = re.compile(r'\.?\.?', re.IGNORECASE) + R_SUBSTITUTE_LAYER_TOKEN = re.compile( r'%l||', re.IGNORECASE) R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) @@ -78,7 +82,7 @@ ImagePrefixes = { 'mentalray': 'defaultRenderGlobals.imageFilePrefix', 'vray': 'vraySettings.fileNamePrefix', 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'rmanGlobals.imageFileFormat', 'redshift': 'defaultRenderGlobals.imageFilePrefix' } @@ -195,12 +199,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): aov_dict["beauty"] = full_paths full_exp_files.append(aov_dict) - - from pprint import pprint - print("=" * 40) - pprint(full_exp_files) - print("=" * 40) - + self.log.info(full_exp_files) self.log.info("collecting layer: {}".format(layer_name)) # Get layer specific settings, might be overrides data = { @@ -429,10 +428,6 @@ class AExpectedFiles: # __________________/ enabled_aovs = self.get_aovs() - from pprint import pprint - print("-" * 40) - pprint(enabled_aovs) - print("-" * 40) layer_name = self.layer if self.layer.startswith("rs_"): @@ -467,7 +462,9 @@ class AExpectedFiles: (R_SUBSTITUTE_CAMERA_TOKEN, cam), # this is required to remove unfilled aov token, for example # in Redshift - (R_REMOVE_AOV_TOKEN, "") + (R_REMOVE_AOV_TOKEN, ""), + (R_CLEAN_FRAME_TOKEN, ""), + (R_CLEAN_EXT_TOKEN, "") ) for regex, value in mappings: @@ -495,7 +492,9 @@ class AExpectedFiles: (R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]), (R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]), (R_SUBSTITUTE_CAMERA_TOKEN, cam), - (R_SUBSTITUTE_AOV_TOKEN, aov[0]) + (R_SUBSTITUTE_AOV_TOKEN, aov[0]), + (R_CLEAN_FRAME_TOKEN, ""), + (R_CLEAN_EXT_TOKEN, "") ) for regex, value in mappings: @@ -829,7 +828,58 @@ class ExpectedFilesRedshift(AExpectedFiles): class ExpectedFilesRenderman(AExpectedFiles): def __init__(self, layer): - raise UnimplementedRendererException('Renderman not implemented') + super(ExpectedFilesRenderman, self).__init__(layer) + self.renderer = 'renderman' + + def get_aovs(self): + enabled_aovs = [] + + default_ext = "exr" + displays = cmds.listConnections("rmanGlobals.displays") + for aov in displays: + aov_name = str(aov) + if aov_name == "rmanDefaultDisplay": + aov_name = "beauty" + + enabled = self.maya_is_true( + cmds.getAttr("{}.enable".format(aov))) + for override in self.get_layer_overrides( + '{}.enable'.format(aov), self.layer): + enabled = self.maya_is_true(override) + + if enabled: + enabled_aovs.append( + ( + aov_name, + default_ext + ) + ) + + return enabled_aovs + + def get_files(self): + """ + In renderman we hack it with prepending path. This path would + normally be translated from `rmanGlobals.imageOutputDir`. We skip + this and harcode prepend path we expect. There is no place for user + to mess around with this settings anyway and it is enforced in + render settings validator. + """ + layer_data = self._get_layer_data() + new_aovs = {} + + expected_files = super(ExpectedFilesRenderman, self).get_files() + # we always get beauty + for aov, files in expected_files[0].items(): + new_files = [] + for file in files: + new_file = "{}/{}/{}".format(layer_data["sceneName"], + layer_data["layerName"], + file) + new_files.append(new_file) + new_aovs[aov] = new_files + + return [new_aovs] class ExpectedFilesMentalray(AExpectedFiles): @@ -837,6 +887,9 @@ class ExpectedFilesMentalray(AExpectedFiles): def __init__(self, layer): raise UnimplementedRendererException('Mentalray not implemented') + def get_aovs(self): + return [] + class AOVError(Exception): pass diff --git a/pype/plugins/maya/publish/validate_rendersettings.py b/pype/plugins/maya/publish/validate_rendersettings.py index 923ee185c6..107916df94 100644 --- a/pype/plugins/maya/publish/validate_rendersettings.py +++ b/pype/plugins/maya/publish/validate_rendersettings.py @@ -44,7 +44,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): 'mentalray': 'defaultRenderGlobals.imageFilePrefix', 'vray': 'vraySettings.fileNamePrefix', 'arnold': 'defaultRenderGlobals.imageFilePrefix', - 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'rmanGlobals.imageFileFormat', 'redshift': 'defaultRenderGlobals.imageFilePrefix' } @@ -52,9 +52,19 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): 'arnold': 'maya///_//', - 'vray': 'maya///' + 'vray': 'maya///', + 'renderman': '_..' } + # WARNING: There is bug? in renderman, translating token + # to something left behind mayas default image prefix. So instead + # `SceneName_v01` it translates to: + # `SceneName_v01//` that means + # for example: + # `SceneName_v01/Main/Main_`. Possible solution is to define + # custom token like to point to determined scene name. + RendermanDirPrefix = "/renders/maya//" + R_AOV_TOKEN = re.compile( r'%a||', re.IGNORECASE) R_LAYER_TOKEN = re.compile( @@ -119,6 +129,19 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): elif renderer == "redshift": # no redshift check implemented yet pass + elif renderer == "renderman": + file_prefix = cmds.getAttr("rmanGlobals.imageFileFormat") + dir_prefix = cmds.getAttr("rmanGlobals.imageOutputDir") + + if file_prefix.lower() != cls.ImagePrefixTokens[renderer].lower(): + invalid = True + cls.log.error("Wrong image prefix [ {} ]".format(file_prefix)) + + if dir_prefix.lower() != cls.RendermanDirPrefix.lower(): + invalid = True + cls.log.error("Wrong directory prefix [ {} ]".format( + dir_prefix)) + else: if not re.search(cls.R_AOV_TOKEN, prefix): invalid = True From 0ba0fa8785a3ae8a09e0c5d53ad2e3c72baf7e26 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Wed, 19 Feb 2020 16:34:18 +0100 Subject: [PATCH 291/434] renderman in validate render settings --- .../maya/publish/validate_rendersettings.py | 27 ++++++++++++------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/pype/plugins/maya/publish/validate_rendersettings.py b/pype/plugins/maya/publish/validate_rendersettings.py index 107916df94..d6cbea8b2d 100644 --- a/pype/plugins/maya/publish/validate_rendersettings.py +++ b/pype/plugins/maya/publish/validate_rendersettings.py @@ -173,14 +173,23 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): render_attrs = lib.RENDER_ATTRS.get(renderer, default) # Repair prefix - node = render_attrs["node"] - prefix_attr = render_attrs["prefix"] + if renderer != "renderman": + node = render_attrs["node"] + prefix_attr = render_attrs["prefix"] - fname_prefix = cls.ImagePrefixTokens[renderer] - cmds.setAttr("{}.{}".format(node, prefix_attr), - fname_prefix, type="string") + fname_prefix = cls.ImagePrefixTokens[renderer] + cmds.setAttr("{}.{}".format(node, prefix_attr), + fname_prefix, type="string") - # Repair padding - padding_attr = render_attrs["padding"] - cmds.setAttr("{}.{}".format(node, padding_attr), - cls.DEFAULT_PADDING) + # Repair padding + padding_attr = render_attrs["padding"] + cmds.setAttr("{}.{}".format(node, padding_attr), + cls.DEFAULT_PADDING) + else: + # renderman handles stuff differently + cmds.setAttr("rmanGlobals.imageFileFormat", + cls.ImagePrefixTokens[renderer], + type="string") + cmds.setAttr("rmanGlobals.imageOutputDir", + cls.RendermanDirPrefix, + type="string") From b47365c49fc9576ed196b6dbe899ed3e4e68b2ca Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 19 Feb 2020 19:12:52 +0100 Subject: [PATCH 292/434] thumbail is also added to asset entity --- pype/plugins/global/publish/integrate_thumbnail.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/pype/plugins/global/publish/integrate_thumbnail.py b/pype/plugins/global/publish/integrate_thumbnail.py index 1c4399b386..b623fa9072 100644 --- a/pype/plugins/global/publish/integrate_thumbnail.py +++ b/pype/plugins/global/publish/integrate_thumbnail.py @@ -137,3 +137,12 @@ class IntegrateThumbnails(pyblish.api.InstancePlugin): self.log.debug("Setting thumbnail for version \"{}\" <{}>".format( version["name"], str(version["_id"]) )) + + asset_entity = instance.data["assetEntity"] + io.update_many( + {"_id": asset_entity["_id"]}, + {"$set": {"data.thumbnail_id": thumbnail_id}} + ) + self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format( + asset_entity["name"], str(version["_id"]) + )) From bed02e694d0372fbe5c52935e61fe4b4ab4a3f6d Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 20 Feb 2020 11:44:09 +0100 Subject: [PATCH 293/434] feat(nks): adding functions for loading --- pype/nukestudio/lib.py | 184 +++++++++++++++++++++++++++++++ pype/nukestudio/precomp_clip.py | 188 -------------------------------- 2 files changed, 184 insertions(+), 188 deletions(-) delete mode 100644 pype/nukestudio/precomp_clip.py diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py index c71e2cb999..04139d7416 100644 --- a/pype/nukestudio/lib.py +++ b/pype/nukestudio/lib.py @@ -1,4 +1,5 @@ import os +import re import sys import hiero import pyblish.api @@ -361,3 +362,186 @@ def CreateNukeWorkfile(nodes=None, nodes=nuke_script.getNodes(), **kwargs ) + + +def create_nk_workfile_clips(nk_workfiles, seq=None): + ''' + nk_workfile is list of dictionaries like: + [{ + 'path': 'P:/Jakub_testy_pipeline/test_v01.nk', + 'name': 'test', + 'handleStart': 15, # added asymetrically to handles + 'handleEnd': 10, # added asymetrically to handles + "clipIn": 16, + "frameStart": 991, + "frameEnd": 1023, + 'task': 'Comp-tracking', + 'work_dir': 'VFX_PR', + 'shot': '00010' + }] + ''' + + proj = hiero.core.projects()[-1] + root = proj.clipsBin() + + if not seq: + seq = hiero.core.Sequence('NewSequences') + root.addItem(hiero.core.BinItem(seq)) + # todo will ned to define this better + # track = seq[1] # lazy example to get a destination# track + clips_lst = [] + for nk in nk_workfiles: + task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']]) + bin = create_bin_in_project(task_path, proj) + + if nk['task'] not in seq.videoTracks(): + track = hiero.core.VideoTrack(nk['task']) + seq.addTrack(track) + else: + track = seq.tracks(nk['task']) + + # create slip media + print("__ path: `{}`".format(nk['path'])) + + media = hiero.core.MediaSource(nk['path']) + media_in = int(media.startTime() or 0) + media_duration = int(media.duration() or 0) + + handle_start = nk.get("handleStart") + handle_end = nk.get("handleEnd") + + if media_in: + source_in = media_in + handle_start + else: + source_in = nk["frameStart"] + handle_start + + if media_duration: + source_out = (media_in + media_duration - 1) - handle_end + else: + source_out = nk["frameEnd"] - handle_end + + print("__ media: `{}`".format(media)) + print("__ media_in: `{}`".format(media_in)) + print("__ media_duration : `{}`".format(media_duration)) + print("__ source_in: `{}`".format(source_in)) + print("__ source_out : `{}`".format(source_out)) + + source = hiero.core.Clip(media) + print("__ source : `{}`".format(source)) + print("__ source.sourceIn(): `{}`".format(source.sourceIn())) + + name = os.path.basename(os.path.splitext(nk['path'])[0]) + split_name = split_by_client_version(name)[0] or name + + print("__ split_name: `{}`".format(split_name)) + + # add to bin as clip item + items_in_bin = [b.name() for b in bin.items()] + if split_name not in items_in_bin: + binItem = hiero.core.BinItem(source) + bin.addItem(binItem) + + print("__ bin.items(): `{}`".format(bin.items())) + + new_source = [ + item for item in bin.items() if split_name in item.name() + ][0].items()[0].item() + + print("__ new_source: `{}`".format(new_source)) + print("__ new_source: `{}`".format(new_source)) + + # add to track as clip item + trackItem = hiero.core.TrackItem( + split_name, hiero.core.TrackItem.kVideo) + trackItem.setSource(new_source) + trackItem.setSourceIn(source_in) + trackItem.setSourceOut(source_out) + trackItem.setSourceIn(source_in) + trackItem.setTimelineIn(nk["clipIn"]) + trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in)) + track.addTrackItem(trackItem) + track.addTrackItem(trackItem) + clips_lst.append(trackItem) + + return clips_lst + + +def create_bin_in_project(bin_name='', project=''): + ''' + create bin in project and + if the bin_name is "bin1/bin2/bin3" it will create whole depth + ''' + + if not project: + # get the first loaded project + project = hiero.core.projects()[-1] + if not bin_name: + return None + if '/' in bin_name: + bin_name = bin_name.split('/') + else: + bin_name = [bin_name] + + clipsBin = project.clipsBin() + + done_bin_lst = [] + for i, b in enumerate(bin_name): + if i == 0 and len(bin_name) > 1: + if b in [bin.name() for bin in clipsBin.bins()]: + bin = [bin for bin in clipsBin.bins() if b in bin.name()][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + clipsBin.addItem(create_bin) + done_bin_lst.append(create_bin) + + elif i >= 1 and i < len(bin_name) - 1: + if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: + bin = [ + bin for bin in done_bin_lst[i - 1].bins() + if b in bin.name() + ][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + done_bin_lst[i - 1].addItem(create_bin) + done_bin_lst.append(create_bin) + + elif i == len(bin_name) - 1: + if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: + bin = [ + bin for bin in done_bin_lst[i - 1].bins() + if b in bin.name() + ][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + done_bin_lst[i - 1].addItem(create_bin) + done_bin_lst.append(create_bin) + # print [bin.name() for bin in clipsBin.bins()] + return done_bin_lst[-1] + + +def split_by_client_version(string): + regex = r"[/_.]v\d+" + try: + matches = re.findall(regex, string, re.IGNORECASE) + return string.split(matches[0]) + except Exception as e: + print(e) + return None + + +# nk_workfiles = [{ +# 'path': 'C:/Users/hubert/_PYPE_testing/projects/D001_projectx/episodes/ep120/ep120sq01/120sh020/publish/plates/platesMain/v023/prjx_120sh020_platesMain_v023.nk', +# 'name': '120sh020_platesMain', +# 'handles': 10, +# 'handleStart': 10, +# 'handleEnd': 10, +# "clipIn": 16, +# "frameStart": 991, +# "frameEnd": 1023, +# 'task': 'platesMain', +# 'work_dir': 'shots', +# 'shot': '120sh020' +# }] diff --git a/pype/nukestudio/precomp_clip.py b/pype/nukestudio/precomp_clip.py deleted file mode 100644 index b544b6e654..0000000000 --- a/pype/nukestudio/precomp_clip.py +++ /dev/null @@ -1,188 +0,0 @@ -import hiero.core -import hiero.ui - -import re -import os - - -def create_nk_script_clips(script_lst, seq=None): - ''' - nk_scripts is list of dictionaries like: - [{ - 'path': 'P:/Jakub_testy_pipeline/test_v01.nk', - 'name': 'test', - 'handles': 10, - 'handleStart': 15, # added asymetrically to handles - 'handleEnd': 10, # added asymetrically to handles - "clipIn": 16, - "frameStart": 991, - "frameEnd": 1023, - 'task': 'Comp-tracking', - 'work_dir': 'VFX_PR', - 'shot': '00010' - }] - ''' - - proj = hiero.core.projects()[-1] - root = proj.clipsBin() - - if not seq: - seq = hiero.core.Sequence('NewSequences') - root.addItem(hiero.core.BinItem(seq)) - # todo will ned to define this better - # track = seq[1] # lazy example to get a destination# track - clips_lst = [] - for nk in script_lst: - task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']]) - bin = create_bin_in_project(task_path, proj) - - if nk['task'] not in seq.videoTracks(): - track = hiero.core.VideoTrack(nk['task']) - seq.addTrack(track) - else: - track = seq.tracks(nk['task']) - - # create slip media - print("__ path: `{}`".format(nk['path'])) - - media = hiero.core.MediaSource(nk['path']) - media_in = int(media.startTime() or 0) - media_duration = int(media.duration() or 0) - - handle_start = nk.get("handleStart") or nk['handles'] - handle_end = nk.get("handleEnd") or nk['handles'] - - if media_in: - source_in = media_in + handle_start - else: - source_in = nk["frameStart"] + handle_start - - if media_duration: - source_out = (media_in + media_duration - 1) - handle_end - else: - source_out = nk["frameEnd"] - handle_end - - print("__ media: `{}`".format(media)) - print("__ media_in: `{}`".format(media_in)) - print("__ media_duration : `{}`".format(media_duration)) - print("__ source_in: `{}`".format(source_in)) - print("__ source_out : `{}`".format(source_out)) - - source = hiero.core.Clip(media) - print("__ source : `{}`".format(source)) - print("__ source.sourceIn(): `{}`".format(source.sourceIn())) - - name = os.path.basename(os.path.splitext(nk['path'])[0]) - split_name = split_by_client_version(name)[0] or name - - print("__ split_name: `{}`".format(split_name)) - - # add to bin as clip item - items_in_bin = [b.name() for b in bin.items()] - if split_name not in items_in_bin: - binItem = hiero.core.BinItem(source) - bin.addItem(binItem) - - print("__ bin.items(): `{}`".format(bin.items())) - - new_source = [ - item for item in bin.items() if split_name in item.name() - ][0].items()[0].item() - - print("__ new_source: `{}`".format(new_source)) - print("__ new_source: `{}`".format(new_source)) - - # add to track as clip item - trackItem = hiero.core.TrackItem(split_name, hiero.core.TrackItem.kVideo) - trackItem.setSource(new_source) - trackItem.setSourceIn(source_in) - trackItem.setSourceOut(source_out) - trackItem.setSourceIn(source_in) - trackItem.setTimelineIn(nk["clipIn"]) - trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in)) - track.addTrackItem(trackItem) - track.addTrackItem(trackItem) - clips_lst.append(trackItem) - - return clips_lst - - -def create_bin_in_project(bin_name='', project=''): - ''' - create bin in project and - if the bin_name is "bin1/bin2/bin3" it will create whole depth - ''' - - if not project: - # get the first loaded project - project = hiero.core.projects()[-1] - if not bin_name: - return None - if '/' in bin_name: - bin_name = bin_name.split('/') - else: - bin_name = [bin_name] - - clipsBin = project.clipsBin() - - done_bin_lst = [] - for i, b in enumerate(bin_name): - if i == 0 and len(bin_name) > 1: - if b in [bin.name() for bin in clipsBin.bins()]: - bin = [bin for bin in clipsBin.bins() if b in bin.name()][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - clipsBin.addItem(create_bin) - done_bin_lst.append(create_bin) - - elif i >= 1 and i < len(bin_name) - 1: - if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: - bin = [ - bin for bin in done_bin_lst[i - 1].bins() - if b in bin.name() - ][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - done_bin_lst[i - 1].addItem(create_bin) - done_bin_lst.append(create_bin) - - elif i == len(bin_name) - 1: - if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: - bin = [ - bin for bin in done_bin_lst[i - 1].bins() - if b in bin.name() - ][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - done_bin_lst[i - 1].addItem(create_bin) - done_bin_lst.append(create_bin) - # print [bin.name() for bin in clipsBin.bins()] - return done_bin_lst[-1] - - -def split_by_client_version(string): - regex = r"[/_.]v\d+" - try: - matches = re.findall(regex, string, re.IGNORECASE) - return string.split(matches[0]) - except Exception as e: - print(e) - return None - - -script_lst = [{ - 'path': 'C:/Users/hubert/_PYPE_testing/projects/D001_projectx/episodes/ep120/ep120sq01/120sh020/publish/plates/platesMain/v023/prjx_120sh020_platesMain_v023.nk', - 'name': '120sh020_platesMain', - 'handles': 10, - 'handleStart': 10, - 'handleEnd': 10, - "clipIn": 16, - "frameStart": 991, - "frameEnd": 1023, - 'task': 'platesMain', - 'work_dir': 'shots', - 'shot': '120sh020' -}] From 2a1e057716df52f88fc6cac6e6086a3d5d26ce34 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Thu, 20 Feb 2020 13:18:08 +0100 Subject: [PATCH 294/434] [bugfix] - integrato wasn't comparing intance name with context name --- pype/plugins/global/publish/integrate_new.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index a2343ce8a9..2b11185595 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -111,15 +111,16 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): project_entity = instance.data["projectEntity"] + context_asset_name = context.data["assetEntity"]["name"] + asset_name = instance.data["asset"] asset_entity = instance.data.get("assetEntity") - if not asset_entity: + if not asset_entity or asset_entity["name"] != context_asset_name: asset_entity = io.find_one({ "type": "asset", "name": asset_name, "parent": project_entity["_id"] }) - assert asset_entity, ( "No asset found by the name \"{0}\" in project \"{1}\"" ).format(asset_name, project_entity["name"]) From e33727fcbfd603aeb5c639ff5f90e20b3354e14f Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 20 Feb 2020 17:14:49 +0100 Subject: [PATCH 295/434] feat(nks): loading workflow wip --- pype/nukestudio/lib.py | 92 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 91 insertions(+), 1 deletion(-) diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py index 04139d7416..3115428d8c 100644 --- a/pype/nukestudio/lib.py +++ b/pype/nukestudio/lib.py @@ -8,7 +8,6 @@ from avalon.vendor.Qt import (QtWidgets, QtGui) import pype.api as pype from pypeapp import Logger - log = Logger().get_logger(__name__, "nukestudio") cached_process = None @@ -364,6 +363,97 @@ def CreateNukeWorkfile(nodes=None, ) +class ClipsLoader: + + def __init__(self, representations, **kwargs): + """ Initialize object + + Arguments: + hiero_workfile_name (str): name of workfile + representations (dict): representations for processing + example: {assetName_subsetName_representationName: { + "_id": ObjectId("5as5d54fa56dfa56s6d56asddf4as"), + "path": "path/to/file/created/by/get_repr..", + "binPath": "projectBinPath", + "context": { + "subset": "subsetName", + "task": "taskName", + "family": "write", + "hierarchy": "parent/subparent", + "frame": "0996", + "project": { + "code": "j01test", + "name": "J01_jakub_test" + }, + "version": 1, + "asset": "assetName", + "representation": "representationName", + "root": "projectsRootPath" + } + } + } + """ + self.representations = representations + self.kwargs = kwargs + self.active_project = self.get_active_project() + self.project_bin = self.active_project.clipsBin() + + # inject asset data to representation dict + self.get_asset_data() + + def get_active_project(self): + """ Get hiero active project object + """ + fname = self.kwargs.get("hiero_workfile_name", "") + + return next((p for p in hiero.core.projects() + if fname in p.name()), + hiero.core.projects()[-1]) + + def get_asset_data(self): + """ Get all available asset data + + joint `data` key with asset.data dict into the representaion + + """ + for name, data in self.representations.items(): + asset_name = data["context"]["asset"] + data["data"] = pype.get_asset(asset_name)["data"] + + def make_project_bin(self, hierarchy): + """ Creare bins by given hierarchy path + + It will also make sure no duplicit bins will be created + + Arguments: + hierarchy (str): path devided by slashes "bin0/bin1/bin2" + + Returns: + bin (hiero.core.BinItem): with the bin to be used for mediaItem + """ + pass + + def make_track_item(self): + """ Create track item with """ + pass + + def set_clip_color(self, last_version=True): + """ Sets color of clip on clip/track item + + Arguments: + last_version (bool): True = green | False = red + """ + pass + + def set_container_tag(self, item, metadata): + """ Sets container tag to given clip/track item + + Arguments: + item (hiero.core.BinItem or hiero.core.TrackItem) + metadata (dict): data to be added to tag + """ + pass + def create_nk_workfile_clips(nk_workfiles, seq=None): ''' nk_workfile is list of dictionaries like: From 8eb6ad4fe68a19bfcd55d7700f54ac74848d2c89 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 20 Feb 2020 17:15:12 +0100 Subject: [PATCH 296/434] feat(nks): adding loader plugin --- pype/plugins/nukestudio/load/load_sequence.py | 343 ++++++++++++++++++ 1 file changed, 343 insertions(+) create mode 100644 pype/plugins/nukestudio/load/load_sequence.py diff --git a/pype/plugins/nukestudio/load/load_sequence.py b/pype/plugins/nukestudio/load/load_sequence.py new file mode 100644 index 0000000000..e3637d4aed --- /dev/null +++ b/pype/plugins/nukestudio/load/load_sequence.py @@ -0,0 +1,343 @@ +import re +import nuke +import contextlib + +from avalon import api, io +from pype.nuke import presets + + +@contextlib.contextmanager +def preserve_trim(node): + """Preserve the relative trim of the Loader tool. + + This tries to preserve the loader's trim (trim in and trim out) after + the context by reapplying the "amount" it trims on the clip's length at + start and end. + + """ + # working script frame range + script_start = nuke.root()["first_frame"].value() + + start_at_frame = None + offset_frame = None + if node['frame_mode'].value() == "start at": + start_at_frame = node['frame'].value() + if node['frame_mode'].value() == "offset": + offset_frame = node['frame'].value() + + try: + yield + finally: + if start_at_frame: + node['frame_mode'].setValue("start at") + node['frame'].setValue(str(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) + + if offset_frame: + node['frame_mode'].setValue("offset") + node['frame'].setValue(str((script_start + offset_frame))) + print("start frame of Read was set to" + "{}".format(script_start)) + + +def loader_shift(node, frame, relative=True): + """Shift global in time by i preserving duration + + This moves the loader by i frames preserving global duration. When relative + is False it will shift the global in to the start frame. + + Args: + loader (tool): The fusion loader tool. + frame (int): The amount of frames to move. + relative (bool): When True the shift is relative, else the shift will + change the global in to frame. + + Returns: + int: The resulting relative frame change (how much it moved) + + """ + # working script frame range + script_start = nuke.root()["first_frame"].value() + + if relative: + node['frame_mode'].setValue("start at") + node['frame'].setValue(str(frame)) + + return int(script_start) + + +class LoadSequence(api.Loader): + """Load image sequence into Nuke""" + + families = ["render2d", "source", "plate", "render"] + representations = ["exr", "dpx", "jpg", "jpeg", "png"] + + label = "Load sequence" + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name, namespace, data): + from avalon.nuke import ( + containerise, + viewer_update_and_undo_stop + ) + + version = context['version'] + version_data = version.get("data", {}) + import logging + self.log.setLevel(logging.DEBUG) + self.log.info("version_data: {}\n".format(version_data)) + self.log.error("version_data: {}\n".format(version_data)) + self.log.debug("__ context: {}\n".format(context)) + self.log.warning( + "__ representation: {}\n".format(context["representation"])) + + self.first_frame = int(nuke.root()["first_frame"].getValue()) + self.handle_start = version_data.get("handleStart", 0) + self.handle_end = version_data.get("handleEnd", 0) + + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + + # Fallback to asset name when namespace is None + if namespace is None: + namespace = context['asset']['name'] + + first -= self.handle_start + last += self.handle_end + + file = self.fname + + if not file: + repr_id = context["representation"]["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") + + repr_cont = context["representation"]["context"] + if "#" not in file: + frame = repr_cont.get("frame") + padding = len(frame) + file = file.replace(frame, "#"*padding) + + read_name = "Read_{0}_{1}_{2}".format( + repr_cont["asset"], + repr_cont["subset"], + repr_cont["representation"]) + + # Create the Loader with the filename path set + with viewer_update_and_undo_stop(): + # TODO: it might be universal read to img/geo/camera + r = nuke.createNode( + "Read", + "name {}".format(read_name)) + r["file"].setValue(file) + + # Set colorspace defined in version data + colorspace = context["version"]["data"].get("colorspace") + if colorspace: + r["colorspace"].setValue(str(colorspace)) + + # load nuke presets for Read's colorspace + read_clrs_presets = presets.get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + if preset_clrsp is not None: + r["colorspace"].setValue(str(preset_clrsp)) + + loader_shift(r, first, relative=True) + r["origfirst"].setValue(int(first)) + r["first"].setValue(int(first)) + r["origlast"].setValue(int(last)) + r["last"].setValue(int(last)) + + # add additional metadata from the version to imprint Avalon knob + add_keys = ["frameStart", "frameEnd", + "source", "colorspace", "author", "fps", "version", + "handleStart", "handleEnd"] + + data_imprint = {} + for k in add_keys: + if k == 'version': + data_imprint.update({k: context["version"]['name']}) + else: + data_imprint.update( + {k: context["version"]['data'].get(k, str(None))}) + + data_imprint.update({"objectName": read_name}) + + r["tile_color"].setValue(int("0x4ecd25ff", 16)) + + if version_data.get("retime", None): + speed = version_data.get("speed", 1) + time_warp_nodes = version_data.get("timewarps", []) + self.make_retimes(r, speed, time_warp_nodes) + + return containerise(r, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def make_retimes(self, node, speed, time_warp_nodes): + ''' Create all retime and timewarping nodes with coppied animation ''' + if speed != 1: + rtn = nuke.createNode( + "Retime", + "speed {}".format(speed)) + rtn["before"].setValue("continue") + rtn["after"].setValue("continue") + rtn["input.first_lock"].setValue(True) + rtn["input.first"].setValue( + self.handle_start + self.first_frame + ) + + if time_warp_nodes != []: + for timewarp in time_warp_nodes: + twn = nuke.createNode(timewarp["Class"], + "name {}".format(timewarp["name"])) + if isinstance(timewarp["lookup"], list): + # if array for animation + twn["lookup"].setAnimated() + for i, value in enumerate(timewarp["lookup"]): + twn["lookup"].setValueAt( + (self.first_frame + i) + value, + (self.first_frame + i)) + else: + # if static value `int` + twn["lookup"].setValue(timewarp["lookup"]) + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """Update the Loader's path + + Nuke automatically tries to reset some variables when changing + the loader's path to a new file. These automatic changes are to its + inputs: + + """ + + from avalon.nuke import ( + update_container + ) + + node = nuke.toNode(container['objectName']) + + assert node.Class() == "Read", "Must be Read" + + repr_cont = representation["context"] + + file = self.fname + + if not file: + repr_id = representation["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") + + if "#" not in file: + frame = repr_cont.get("frame") + padding = len(frame) + file = file.replace(frame, "#"*padding) + + # Get start frame from version data + version = io.find_one({ + "type": "version", + "_id": representation["parent"] + }) + + # get all versions in list + versions = io.find({ + "type": "version", + "parent": version["parent"] + }).distinct('name') + + max_version = max(versions) + + version_data = version.get("data", {}) + + self.first_frame = int(nuke.root()["first_frame"].getValue()) + self.handle_start = version_data.get("handleStart", 0) + self.handle_end = version_data.get("handleEnd", 0) + + first = version_data.get("frameStart") + last = version_data.get("frameEnd") + + if first is None: + self.log.warning("Missing start frame for updated version" + "assuming starts at frame 0 for: " + "{} ({})".format( + node['name'].value(), representation)) + first = 0 + + first -= self.handle_start + last += self.handle_end + + # Update the loader's path whilst preserving some values + with preserve_trim(node): + node["file"].setValue(file) + self.log.info("__ node['file']: {}".format(node["file"].value())) + + # Set the global in to the start frame of the sequence + loader_shift(node, first, relative=True) + node["origfirst"].setValue(int(first)) + node["first"].setValue(int(first)) + node["origlast"].setValue(int(last)) + node["last"].setValue(int(last)) + + updated_dict = {} + updated_dict.update({ + "representation": str(representation["_id"]), + "frameStart": str(first), + "frameEnd": str(last), + "version": str(version.get("name")), + "colorspace": version_data.get("colorspace"), + "source": version_data.get("source"), + "handleStart": str(self.handle_start), + "handleEnd": str(self.handle_end), + "fps": str(version_data.get("fps")), + "author": version_data.get("author"), + "outputDir": version_data.get("outputDir"), + }) + + # change color of node + if version.get("name") not in [max_version]: + node["tile_color"].setValue(int("0xd84f20ff", 16)) + else: + node["tile_color"].setValue(int("0x4ecd25ff", 16)) + + if version_data.get("retime", None): + speed = version_data.get("speed", 1) + time_warp_nodes = version_data.get("timewarps", []) + self.make_retimes(node, speed, time_warp_nodes) + + # Update the imprinted representation + update_container( + node, + updated_dict + ) + self.log.info("udated to version: {}".format(version.get("name"))) + + def remove(self, container): + + from avalon.nuke import viewer_update_and_undo_stop + + node = nuke.toNode(container['objectName']) + assert node.Class() == "Read", "Must be Read" + + with viewer_update_and_undo_stop(): + nuke.delete(node) From c4995356d3ad33dc9aee21ced89ee95f8a1d6fc2 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 20 Feb 2020 18:19:03 +0100 Subject: [PATCH 297/434] feat(nks): initial plugin for loading sequence to timeline --- pype/plugins/nukestudio/load/load_sequence.py | 343 ------------------ ...load_sequences_to_timeline_asset_origin.py | 33 ++ 2 files changed, 33 insertions(+), 343 deletions(-) delete mode 100644 pype/plugins/nukestudio/load/load_sequence.py create mode 100644 pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py diff --git a/pype/plugins/nukestudio/load/load_sequence.py b/pype/plugins/nukestudio/load/load_sequence.py deleted file mode 100644 index e3637d4aed..0000000000 --- a/pype/plugins/nukestudio/load/load_sequence.py +++ /dev/null @@ -1,343 +0,0 @@ -import re -import nuke -import contextlib - -from avalon import api, io -from pype.nuke import presets - - -@contextlib.contextmanager -def preserve_trim(node): - """Preserve the relative trim of the Loader tool. - - This tries to preserve the loader's trim (trim in and trim out) after - the context by reapplying the "amount" it trims on the clip's length at - start and end. - - """ - # working script frame range - script_start = nuke.root()["first_frame"].value() - - start_at_frame = None - offset_frame = None - if node['frame_mode'].value() == "start at": - start_at_frame = node['frame'].value() - if node['frame_mode'].value() == "offset": - offset_frame = node['frame'].value() - - try: - yield - finally: - if start_at_frame: - node['frame_mode'].setValue("start at") - node['frame'].setValue(str(script_start)) - print("start frame of Read was set to" - "{}".format(script_start)) - - if offset_frame: - node['frame_mode'].setValue("offset") - node['frame'].setValue(str((script_start + offset_frame))) - print("start frame of Read was set to" - "{}".format(script_start)) - - -def loader_shift(node, frame, relative=True): - """Shift global in time by i preserving duration - - This moves the loader by i frames preserving global duration. When relative - is False it will shift the global in to the start frame. - - Args: - loader (tool): The fusion loader tool. - frame (int): The amount of frames to move. - relative (bool): When True the shift is relative, else the shift will - change the global in to frame. - - Returns: - int: The resulting relative frame change (how much it moved) - - """ - # working script frame range - script_start = nuke.root()["first_frame"].value() - - if relative: - node['frame_mode'].setValue("start at") - node['frame'].setValue(str(frame)) - - return int(script_start) - - -class LoadSequence(api.Loader): - """Load image sequence into Nuke""" - - families = ["render2d", "source", "plate", "render"] - representations = ["exr", "dpx", "jpg", "jpeg", "png"] - - label = "Load sequence" - order = -10 - icon = "code-fork" - color = "orange" - - def load(self, context, name, namespace, data): - from avalon.nuke import ( - containerise, - viewer_update_and_undo_stop - ) - - version = context['version'] - version_data = version.get("data", {}) - import logging - self.log.setLevel(logging.DEBUG) - self.log.info("version_data: {}\n".format(version_data)) - self.log.error("version_data: {}\n".format(version_data)) - self.log.debug("__ context: {}\n".format(context)) - self.log.warning( - "__ representation: {}\n".format(context["representation"])) - - self.first_frame = int(nuke.root()["first_frame"].getValue()) - self.handle_start = version_data.get("handleStart", 0) - self.handle_end = version_data.get("handleEnd", 0) - - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) - - # Fallback to asset name when namespace is None - if namespace is None: - namespace = context['asset']['name'] - - first -= self.handle_start - last += self.handle_end - - file = self.fname - - if not file: - repr_id = context["representation"]["_id"] - self.log.warning( - "Representation id `{}` is failing to load".format(repr_id)) - return - - file = file.replace("\\", "/") - - repr_cont = context["representation"]["context"] - if "#" not in file: - frame = repr_cont.get("frame") - padding = len(frame) - file = file.replace(frame, "#"*padding) - - read_name = "Read_{0}_{1}_{2}".format( - repr_cont["asset"], - repr_cont["subset"], - repr_cont["representation"]) - - # Create the Loader with the filename path set - with viewer_update_and_undo_stop(): - # TODO: it might be universal read to img/geo/camera - r = nuke.createNode( - "Read", - "name {}".format(read_name)) - r["file"].setValue(file) - - # Set colorspace defined in version data - colorspace = context["version"]["data"].get("colorspace") - if colorspace: - r["colorspace"].setValue(str(colorspace)) - - # load nuke presets for Read's colorspace - read_clrs_presets = presets.get_colorspace_preset().get( - "nuke", {}).get("read", {}) - - # check if any colorspace presets for read is mathing - preset_clrsp = next((read_clrs_presets[k] - for k in read_clrs_presets - if bool(re.search(k, file))), - None) - if preset_clrsp is not None: - r["colorspace"].setValue(str(preset_clrsp)) - - loader_shift(r, first, relative=True) - r["origfirst"].setValue(int(first)) - r["first"].setValue(int(first)) - r["origlast"].setValue(int(last)) - r["last"].setValue(int(last)) - - # add additional metadata from the version to imprint Avalon knob - add_keys = ["frameStart", "frameEnd", - "source", "colorspace", "author", "fps", "version", - "handleStart", "handleEnd"] - - data_imprint = {} - for k in add_keys: - if k == 'version': - data_imprint.update({k: context["version"]['name']}) - else: - data_imprint.update( - {k: context["version"]['data'].get(k, str(None))}) - - data_imprint.update({"objectName": read_name}) - - r["tile_color"].setValue(int("0x4ecd25ff", 16)) - - if version_data.get("retime", None): - speed = version_data.get("speed", 1) - time_warp_nodes = version_data.get("timewarps", []) - self.make_retimes(r, speed, time_warp_nodes) - - return containerise(r, - name=name, - namespace=namespace, - context=context, - loader=self.__class__.__name__, - data=data_imprint) - - def make_retimes(self, node, speed, time_warp_nodes): - ''' Create all retime and timewarping nodes with coppied animation ''' - if speed != 1: - rtn = nuke.createNode( - "Retime", - "speed {}".format(speed)) - rtn["before"].setValue("continue") - rtn["after"].setValue("continue") - rtn["input.first_lock"].setValue(True) - rtn["input.first"].setValue( - self.handle_start + self.first_frame - ) - - if time_warp_nodes != []: - for timewarp in time_warp_nodes: - twn = nuke.createNode(timewarp["Class"], - "name {}".format(timewarp["name"])) - if isinstance(timewarp["lookup"], list): - # if array for animation - twn["lookup"].setAnimated() - for i, value in enumerate(timewarp["lookup"]): - twn["lookup"].setValueAt( - (self.first_frame + i) + value, - (self.first_frame + i)) - else: - # if static value `int` - twn["lookup"].setValue(timewarp["lookup"]) - - def switch(self, container, representation): - self.update(container, representation) - - def update(self, container, representation): - """Update the Loader's path - - Nuke automatically tries to reset some variables when changing - the loader's path to a new file. These automatic changes are to its - inputs: - - """ - - from avalon.nuke import ( - update_container - ) - - node = nuke.toNode(container['objectName']) - - assert node.Class() == "Read", "Must be Read" - - repr_cont = representation["context"] - - file = self.fname - - if not file: - repr_id = representation["_id"] - self.log.warning( - "Representation id `{}` is failing to load".format(repr_id)) - return - - file = file.replace("\\", "/") - - if "#" not in file: - frame = repr_cont.get("frame") - padding = len(frame) - file = file.replace(frame, "#"*padding) - - # Get start frame from version data - version = io.find_one({ - "type": "version", - "_id": representation["parent"] - }) - - # get all versions in list - versions = io.find({ - "type": "version", - "parent": version["parent"] - }).distinct('name') - - max_version = max(versions) - - version_data = version.get("data", {}) - - self.first_frame = int(nuke.root()["first_frame"].getValue()) - self.handle_start = version_data.get("handleStart", 0) - self.handle_end = version_data.get("handleEnd", 0) - - first = version_data.get("frameStart") - last = version_data.get("frameEnd") - - if first is None: - self.log.warning("Missing start frame for updated version" - "assuming starts at frame 0 for: " - "{} ({})".format( - node['name'].value(), representation)) - first = 0 - - first -= self.handle_start - last += self.handle_end - - # Update the loader's path whilst preserving some values - with preserve_trim(node): - node["file"].setValue(file) - self.log.info("__ node['file']: {}".format(node["file"].value())) - - # Set the global in to the start frame of the sequence - loader_shift(node, first, relative=True) - node["origfirst"].setValue(int(first)) - node["first"].setValue(int(first)) - node["origlast"].setValue(int(last)) - node["last"].setValue(int(last)) - - updated_dict = {} - updated_dict.update({ - "representation": str(representation["_id"]), - "frameStart": str(first), - "frameEnd": str(last), - "version": str(version.get("name")), - "colorspace": version_data.get("colorspace"), - "source": version_data.get("source"), - "handleStart": str(self.handle_start), - "handleEnd": str(self.handle_end), - "fps": str(version_data.get("fps")), - "author": version_data.get("author"), - "outputDir": version_data.get("outputDir"), - }) - - # change color of node - if version.get("name") not in [max_version]: - node["tile_color"].setValue(int("0xd84f20ff", 16)) - else: - node["tile_color"].setValue(int("0x4ecd25ff", 16)) - - if version_data.get("retime", None): - speed = version_data.get("speed", 1) - time_warp_nodes = version_data.get("timewarps", []) - self.make_retimes(node, speed, time_warp_nodes) - - # Update the imprinted representation - update_container( - node, - updated_dict - ) - self.log.info("udated to version: {}".format(version.get("name"))) - - def remove(self, container): - - from avalon.nuke import viewer_update_and_undo_stop - - node = nuke.toNode(container['objectName']) - assert node.Class() == "Read", "Must be Read" - - with viewer_update_and_undo_stop(): - nuke.delete(node) diff --git a/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py new file mode 100644 index 0000000000..4b94a941e7 --- /dev/null +++ b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py @@ -0,0 +1,33 @@ +from avalon import api + + +class LoadSequencesToTimelineAssetOrigin(api.Loader): + """Load image sequence into Hiero timeline + + Place clip to timeline on its asset origin timings collected + during conforming to project + """ + + families = ["render2d", "source", "plate", "render"] + representations = ["exr", "dpx", "jpg", "jpeg", "png"] + + label = "Load to timeline with shot origin timing" + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name, namespace, data): + pass + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """ Updating previously loaded clips + """ + pass + + def remove(self, container): + """ Removing previously loaded clips + """ + pass From 3f4b89f6854210612ac4ce71028aa440c4915373 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 20 Feb 2020 18:19:43 +0100 Subject: [PATCH 298/434] feat(nks): loader object wip --- pype/nukestudio/lib.py | 97 +++++++++++++++++++++++++++++------------- 1 file changed, 68 insertions(+), 29 deletions(-) diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py index 3115428d8c..6b255a0b40 100644 --- a/pype/nukestudio/lib.py +++ b/pype/nukestudio/lib.py @@ -363,44 +363,84 @@ def CreateNukeWorkfile(nodes=None, ) -class ClipsLoader: +class ClipLoader: + data = dict() - def __init__(self, representations, **kwargs): + def __init__(self, plugin_cls, context, **kwargs): """ Initialize object Arguments: - hiero_workfile_name (str): name of workfile - representations (dict): representations for processing - example: {assetName_subsetName_representationName: { - "_id": ObjectId("5as5d54fa56dfa56s6d56asddf4as"), - "path": "path/to/file/created/by/get_repr..", - "binPath": "projectBinPath", - "context": { - "subset": "subsetName", - "task": "taskName", - "family": "write", - "hierarchy": "parent/subparent", - "frame": "0996", - "project": { - "code": "j01test", - "name": "J01_jakub_test" - }, - "version": 1, - "asset": "assetName", - "representation": "representationName", - "root": "projectsRootPath" - } - } - } + plugin_cls (api.Loader): plugin object + context (dict): loader plugin context + kwargs (dict)[optional]: possible keys: + project_bin_path: "path/to/binItem" + hiero_workfile_name: "name_of_hiero_project_file_no_extension" + """ - self.representations = representations + self.cls = plugin_cls + self.context = context self.kwargs = kwargs self.active_project = self.get_active_project() self.project_bin = self.active_project.clipsBin() + assert self.set_data(), str("Cannot Load selected data, look into " + "database or call your supervisor") + # inject asset data to representation dict self.get_asset_data() + def set_data(self): + """ Gets context and convert it to self.data + data structure: + { + "name": "assetName_subsetName_representationName" + "path": "path/to/file/created/by/get_repr..", + "binPath": "projectBinPath", + } + """ + # create name + repr = self.context["representaion"] + repr_cntx = repr["context"] + asset = repr_cntx["asset"] + subset = repr_cntx["subset"] + representation = repr_cntx["representation"] + self.data["name"] = "_".join([asset, subset, representation]) + + # gets file path + file = self.cls.fname + + if not file: + repr_id = repr["_id"] + log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return None + + self.data["path"] = file.replace("\\", "/") + + if repr_cntx.get("frame"): + self.fix_path_hashes() + + # solve project bin structure path + hierarchy = "/".join(( + "Loader", + repr_cntx["hierarchy"].replace("\\", "/"), + asset + )) + self.data["binPath"] = self.kwargs.get( + "project_bin_path", + hierarchy + ) + + def fix_path_hashes(self): + """ Convert file path where it is needed padding with hashes + """ + file = self.data["path"] + if "#" not in file: + frame = self.context["representaion"]["context"].get("frame") + padding = len(frame) + file = file.replace(frame, "#"*padding) + self.data["path"] = file + def get_active_project(self): """ Get hiero active project object """ @@ -416,9 +456,8 @@ class ClipsLoader: joint `data` key with asset.data dict into the representaion """ - for name, data in self.representations.items(): - asset_name = data["context"]["asset"] - data["data"] = pype.get_asset(asset_name)["data"] + asset_name = self.context["representaion"]["context"]["asset"] + self.data["assetData"] = pype.get_asset(asset_name)["data"] def make_project_bin(self, hierarchy): """ Creare bins by given hierarchy path From 1f6bdb44aa07959f439a1fac5093654b9057f704 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 21 Feb 2020 11:55:13 +0100 Subject: [PATCH 299/434] added custom exception message when project is not found --- pype/plugins/ftrack/publish/collect_ftrack_api.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/pype/plugins/ftrack/publish/collect_ftrack_api.py b/pype/plugins/ftrack/publish/collect_ftrack_api.py index f79d74453b..dd9f5f5184 100644 --- a/pype/plugins/ftrack/publish/collect_ftrack_api.py +++ b/pype/plugins/ftrack/publish/collect_ftrack_api.py @@ -35,7 +35,17 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): # Find project entity project_query = 'Project where full_name is "{0}"'.format(project_name) self.log.debug("Project query: < {0} >".format(project_query)) - project_entity = session.query(project_query).one() + project_entity = list(session.query(project_query).all()) + if len(project_entity) == 0: + raise AssertionError( + "Project \"{0}\" not found in Ftrack.".format(project_name) + ) + # QUESTION Is possible to happen? + elif len(project_entity) > 1: + raise AssertionError(( + "Found more than one project with name \"{0}\" in Ftrack." + ).format(project_name)) + self.log.debug("Project found: {0}".format(project_entity)) # Find asset entity From 9f6a9ed496d5978970f1207481a54d61f0a4b30a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 21 Feb 2020 11:55:56 +0100 Subject: [PATCH 300/434] ftrack task query won't crash but log warning if task not found --- pype/plugins/ftrack/publish/collect_ftrack_api.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/pype/plugins/ftrack/publish/collect_ftrack_api.py b/pype/plugins/ftrack/publish/collect_ftrack_api.py index dd9f5f5184..94bc88b983 100644 --- a/pype/plugins/ftrack/publish/collect_ftrack_api.py +++ b/pype/plugins/ftrack/publish/collect_ftrack_api.py @@ -63,8 +63,15 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): 'Task where name is "{0}" and parent_id is "{1}"' ).format(task_name, asset_entity["id"]) self.log.debug("Task entity query: < {0} >".format(task_query)) - task_entity = session.query(task_query).one() - self.log.debug("Task entity found: {0}".format(task_entity)) + task_entity = session.query(task_query).first() + if not task_entity: + self.log.warning( + "Task entity with name \"{0}\" was not found.".format( + task_name + ) + ) + else: + self.log.debug("Task entity found: {0}".format(task_entity)) else: task_entity = None From 2562dcfc7fde41e4767a43c1df260dcbf0e67deb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 21 Feb 2020 11:56:40 +0100 Subject: [PATCH 301/434] typed context entities ignore tasks and added custom exception messages --- .../ftrack/publish/collect_ftrack_api.py | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/pype/plugins/ftrack/publish/collect_ftrack_api.py b/pype/plugins/ftrack/publish/collect_ftrack_api.py index 94bc88b983..47a6cc3826 100644 --- a/pype/plugins/ftrack/publish/collect_ftrack_api.py +++ b/pype/plugins/ftrack/publish/collect_ftrack_api.py @@ -54,7 +54,25 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): ' and name is "{1}"' ).format(project_entity["id"], asset_name) self.log.debug("Asset entity query: < {0} >".format(entity_query)) - asset_entity = session.query(entity_query).one() + asset_entities = [] + for entity in session.query(entity_query).all(): + # Skip tasks + if entity.entity_type.lower() != "task": + asset_entities.append(entity) + + if len(asset_entities) == 0: + raise AssertionError(( + "Entity with name \"{0}\" not found" + " in Ftrack project \"{1}\"." + ).format(asset_name, project_name)) + + elif len(asset_entities) > 1: + raise AssertionError(( + "Found more than one entity with name \"{0}\"" + " in Ftrack project \"{1}\"." + ).format(asset_name, project_name)) + + asset_entity = asset_entities[0] self.log.debug("Asset found: {0}".format(asset_entity)) # Find task entity if task is set From 21956523445684110bf2786a9f9d12b0fb738641 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Fri, 21 Feb 2020 13:28:50 +0100 Subject: [PATCH 302/434] fixed start frame if it doesn't match render --- .../global/publish/collect_filesequences.py | 16 ++++++++++++++++ pype/plugins/global/publish/extract_review.py | 3 +++ 2 files changed, 19 insertions(+) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 8b42606e4a..f7ce5fab00 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -256,10 +256,16 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): ) ext = collection.tail.lstrip(".") + detected_start = min(collection.indexes) + detected_end = max(collection.indexes) + representation = { "name": ext, "ext": "{}".format(ext), "files": list(collection), + "frameStart": frame_start, + "detectedStart": detected_start, + "detectedEnd": detected_end, "stagingDir": root, "anatomy_template": "render", "fps": fps, @@ -323,12 +329,17 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): if "slate" in instance.data["families"]: frame_start += 1 + detected_start = min(collection.indexes) + detected_end = max(collection.indexes) + representation = { "name": ext, "ext": "{}".format(ext), "files": list(collection), "frameStart": frame_start, "frameEnd": frame_end, + "detectedStart": detected_start, + "detectedEnd": detected_end, "stagingDir": root, "anatomy_template": "render", "fps": fps, @@ -394,6 +405,9 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): if "review" not in families: families.append("review") + detected_start = min(collection.indexes) + detected_end = max(collection.indexes) + instance.data.update( { "name": str(collection), @@ -428,6 +442,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): "files": list(collection), "frameStart": start, "frameEnd": end, + "detectedStart": detected_start, + "detectedEnd": detected_end, "stagingDir": root, "anatomy_template": "render", "fps": fps, diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 4d63e2c641..4f96491638 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -149,6 +149,9 @@ class ExtractReview(pyblish.api.InstancePlugin): # necessary input data # adds start arg only if image sequence if isinstance(repre["files"], list): + + if start_frame != repre.get("detectedStart", start_frame): + start_frame = repre.get("detectedStart") input_args.append( "-start_number {0} -framerate {1}".format( start_frame, fps)) From 4435eb8c1454e1cd4748f66d0d3a1f27eef29e9c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 21 Feb 2020 17:36:17 +0100 Subject: [PATCH 303/434] current frame now works correctly and has correct position --- pype/scripts/otio_burnin.py | 58 ++++++++++++++++++++++++++++--------- 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index c61ea66d2d..46b2d1421c 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -1,17 +1,14 @@ import os import sys import re -import datetime import subprocess import json import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins from pypeapp.lib import config -from pype import api as pype -from subprocess import Popen, PIPE -# FFmpeg in PATH is required +from pypeapp import Logger -log = pype.Logger().get_logger("BurninWrapper", "burninwrap") +log = Logger().get_logger("BurninWrapper", "burninwrap") ffmpeg_path = os.environ.get("FFMPEG_PATH") @@ -41,6 +38,7 @@ TIMECODE = ( MISSING_KEY_VALUE = "N/A" CURRENT_FRAME_KEY = "{current_frame}" +CURRENT_FRAME_SPLITTER = "_-_CURRENT_FRAME_-_" TIME_CODE_KEY = "{timecode}" @@ -136,7 +134,9 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): if options_init: self.options_init.update(options_init) - def add_text(self, text, align, frame_start=None, options=None): + def add_text( + self, text, align, frame_start=None, frame_end=None, options=None + ): """ Adding static text to a filter. @@ -152,11 +152,15 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): if frame_start: options["frame_offset"] = frame_start + # `frame_end` is only for meassurements of text position + if frame_end: + options["frame_end"] = frame_end + self._add_burnin(text, align, options, DRAWTEXT) def add_timecode( - self, align, frame_start=None, frame_start_tc=None, text=None, - options=None + self, align, frame_start=None, frame_end=None, frame_start_tc=None, + text=None, options=None ): """ Convenience method to create the frame number expression. @@ -174,6 +178,10 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): if frame_start: options["frame_offset"] = frame_start + # `frame_end` is only for meassurements of text position + if frame_end: + options["frame_end"] = frame_end + if not frame_start_tc: frame_start_tc = options["frame_offset"] @@ -197,10 +205,31 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): :param enum align: alignment, must use provided enum flags :param dict options: """ + + final_text = text + text_for_size = text + if CURRENT_FRAME_SPLITTER in text: + frame_start = options["frame_offset"] + frame_end = options.get("frame_end", frame_start) + if not frame_start: + replacement_final = replacement_size = str(MISSING_KEY_VALUE) + else: + replacement_final = "\\'{}\\'".format( + r'%%{eif\:n+%d\:d}' % frame_start + ) + replacement_size = str(frame_end) + + final_text = final_text.replace( + CURRENT_FRAME_SPLITTER, replacement_final + ) + text_for_size = text_for_size.replace( + CURRENT_FRAME_SPLITTER, replacement_size + ) + resolution = self.resolution data = { 'text': ( - text + final_text .replace(",", r"\,") .replace(':', r'\:') ), @@ -208,7 +237,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): 'size': options['font_size'] } timecode_text = options.get("timecode") or "" - text_for_size = text + timecode_text + text_for_size += timecode_text data.update(options) data.update( ffmpeg_burnins._drawtext(align, resolution, text_for_size, options) @@ -272,7 +301,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): ) print(command) - proc = Popen(command, shell=True) + proc = subprocess.Popen(command, shell=True) proc.communicate() if proc.returncode != 0: raise RuntimeError("Failed to render '%s': %s'" @@ -368,6 +397,7 @@ def burnins_from_data( burnin = ModifiedBurnins(input_path, options_init=options_init) frame_start = data.get("frame_start") + frame_end = data.get("frame_end") frame_start_tc = data.get('frame_start_tc', frame_start) stream = burnin._streams[0] @@ -382,7 +412,7 @@ def burnins_from_data( # Check frame start and add expression if is available if frame_start is not None: - data[CURRENT_FRAME_KEY[1:-1]] = r'%%{eif\:n+%d\:d}' % frame_start + data[CURRENT_FRAME_KEY[1:-1]] = CURRENT_FRAME_SPLITTER if frame_start_tc is not None: data[TIME_CODE_KEY[1:-1]] = TIME_CODE_KEY @@ -432,7 +462,7 @@ def burnins_from_data( # Handle timecode differently if has_timecode: - args = [align, frame_start, frame_start_tc] + args = [align, frame_start, frame_end, frame_start_tc] if not value.startswith(TIME_CODE_KEY): value_items = value.split(TIME_CODE_KEY) text = value_items[0].format(**data) @@ -442,7 +472,7 @@ def burnins_from_data( continue text = value.format(**data) - burnin.add_text(text, align, frame_start) + burnin.add_text(text, align, frame_start, frame_end) codec_args = "" if codec_data: From 7b70a6e52dab4dbe84718865cc12a6c5bd10fc80 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 21 Feb 2020 17:44:52 +0100 Subject: [PATCH 304/434] bugfix: missing io.install in colelctavalonentities --- pype/plugins/global/publish/collect_avalon_entities.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/plugins/global/publish/collect_avalon_entities.py b/pype/plugins/global/publish/collect_avalon_entities.py index c256dffd52..35852587d5 100644 --- a/pype/plugins/global/publish/collect_avalon_entities.py +++ b/pype/plugins/global/publish/collect_avalon_entities.py @@ -19,6 +19,7 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): label = "Collect Avalon Entities" def process(self, context): + is.install() project_name = api.Session["AVALON_PROJECT"] asset_name = api.Session["AVALON_ASSET"] From 03fbbf7d10a55f0dd943ae3a3a731ee8e419c681 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 21 Feb 2020 17:47:33 +0100 Subject: [PATCH 305/434] typo --- pype/plugins/global/publish/collect_avalon_entities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_avalon_entities.py b/pype/plugins/global/publish/collect_avalon_entities.py index 35852587d5..a429b3fc84 100644 --- a/pype/plugins/global/publish/collect_avalon_entities.py +++ b/pype/plugins/global/publish/collect_avalon_entities.py @@ -19,7 +19,7 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): label = "Collect Avalon Entities" def process(self, context): - is.install() + io.install() project_name = api.Session["AVALON_PROJECT"] asset_name = api.Session["AVALON_ASSET"] From 8ae23bae93c1b11a0500a9900db4e2c12f9f7a8f Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Fri, 21 Feb 2020 21:12:00 +0100 Subject: [PATCH 306/434] fixed preview tag --- pype/plugins/global/publish/submit_publish_job.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 7b5dac28d4..f6f5d5abba 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -381,7 +381,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "stagingDir": staging, "anatomy_template": "render", "fps": new_instance.get("fps"), - "tags": ["review", "preview"] if preview else [] + "tags": ["review"] if preview else [] } # add tags From 86d3f4d7d2e3d0d95e28b06b1523d0073bc8d7e3 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Sat, 22 Feb 2020 00:19:50 +0100 Subject: [PATCH 307/434] prioritize instance version in burnins --- pype/plugins/global/publish/extract_burnin.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index b95c15f340..d09ba91f72 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -26,8 +26,8 @@ class ExtractBurnin(pype.api.Extractor): if "representations" not in instance.data: raise RuntimeError("Burnin needs already created mov to work on.") - version = instance.context.data.get( - 'version', instance.data.get('version')) + version = instance.data.get( + 'version', instance.context.data.get('version')) frame_start = int(instance.data.get("frameStart") or 0) frame_end = int(instance.data.get("frameEnd") or 1) duration = frame_end - frame_start + 1 From 66a2bc8201dab971a2d98934e541519dba39406b Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Sat, 22 Feb 2020 00:20:56 +0100 Subject: [PATCH 308/434] append renders and fix representation names --- pype/plugins/global/publish/submit_publish_job.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index f6f5d5abba..df00803326 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -170,7 +170,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): } # list of family names to transfer to new family if present - families_transfer = ["render2d", "ftrack", "slate"] + families_transfer = ["render3d", "render2d", "ftrack", "slate"] def _submit_deadline_post_job(self, instance, job): """ @@ -372,7 +372,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # create represenation rep = { - "name": aov, + "name": ext, "ext": ext, "files": [os.path.basename(f) for f in list(cols[0])], "frameStart": start, @@ -617,12 +617,14 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): `foo` and `xxx` """ + self.log.info(data.get("expectedFiles")) + if isinstance(data.get("expectedFiles")[0], dict): # we cannot attach AOVs to other subsets as we consider every # AOV subset of its own. if len(data.get("attachTo")) > 0: - assert len(data.get("expectedFiles")[0].keys()) > 1, ( + assert len(data.get("expectedFiles")[0].keys()) == 1, ( "attaching multiple AOVs or renderable cameras to " "subset is not supported") @@ -660,7 +662,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): new_i = copy(i) new_i["version"] = at.get("version") new_i["subset"] = at.get("subset") - new_i["families"].append(at.get("family")) + new_i["family"] = at.get("family") + new_i["append"] = True new_instances.append(new_i) self.log.info(" - {} / v{}".format( at.get("subset"), at.get("version"))) From 61f909c3827853d395b40a281e54181699e65cfd Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Sat, 22 Feb 2020 00:22:52 +0100 Subject: [PATCH 309/434] simplify version determination --- .../maya/publish/determine_future_version.py | 63 +------------------ 1 file changed, 3 insertions(+), 60 deletions(-) diff --git a/pype/plugins/maya/publish/determine_future_version.py b/pype/plugins/maya/publish/determine_future_version.py index 72dbf719d7..afa249aca2 100644 --- a/pype/plugins/maya/publish/determine_future_version.py +++ b/pype/plugins/maya/publish/determine_future_version.py @@ -1,6 +1,4 @@ import pyblish -from avalon import api, io - class DetermineFutureVersion(pyblish.api.InstancePlugin): """ @@ -20,66 +18,11 @@ class DetermineFutureVersion(pyblish.api.InstancePlugin): for i in context: if i.data["subset"] in attach_to_subsets: - latest_version = self._get_latest_version(i.data["subset"]) - - # this will get corresponding subset in attachTo list - # so we can set version there + # # this will get corresponding subset in attachTo list + # # so we can set version there sub = next(item for item in instance.data['attachTo'] if item["subset"] == i.data["subset"]) # noqa: E501 - if not latest_version: - # if latest_version is None, subset is not yet in - # database so we'll check its instance to see if version - # is there and use that, or we'll just stay with v1 - latest_version = i.data.get("version", 1) - - sub["version"] = latest_version + sub["version"] = i.data.get("version", 1) self.log.info("render will be attached to {} v{}".format( sub["subset"], sub["version"] )) - - def _get_latest_version(self, subset): - latest_version = None - - project_name = api.Session["AVALON_PROJECT"] - asset_name = api.Session["AVALON_ASSET"] - - project_entity = io.find_one({ - "type": "project", - "name": project_name - }) - - assert project_entity, ( - "Project '{0}' was not found." - ).format(project_name) - - asset_entity = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project_entity["_id"] - }) - assert asset_entity, ( - "No asset found by the name '{0}' in project '{1}'" - ).format(asset_name, project_name) - - if asset_entity: - subset_entity = io.find_one({ - "type": "subset", - "name": subset, - "parent": asset_entity["_id"] - }) - - if subset_entity is None: - self.log.info("Subset entity does not exist yet.") - pass - - else: - version_entity = io.find_one( - { - "type": "version", - "parent": subset_entity["_id"] - }, - sort=[("name", -1)] - ) - if version_entity: - latest_version = version_entity["name"] - return latest_version From 284da78ccdbebc76fba70189bf9e8a9dd0a181a8 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Sat, 22 Feb 2020 00:23:23 +0100 Subject: [PATCH 310/434] allow multichanel rendering --- .../maya/publish/validate_rendersettings.py | 20 +++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/pype/plugins/maya/publish/validate_rendersettings.py b/pype/plugins/maya/publish/validate_rendersettings.py index d6cbea8b2d..c98f0f8cdc 100644 --- a/pype/plugins/maya/publish/validate_rendersettings.py +++ b/pype/plugins/maya/publish/validate_rendersettings.py @@ -50,7 +50,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): ImagePrefixTokens = { - 'arnold': 'maya///_//_', 'redshift': 'maya///', 'vray': 'maya///', 'renderman': '_..' @@ -143,11 +143,19 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): dir_prefix)) else: - if not re.search(cls.R_AOV_TOKEN, prefix): - invalid = True - cls.log.error("Wrong image prefix [ {} ] - " - "doesn't have: '' or " - "token".format(prefix)) + multichannel = cmds.getAttr("defaultArnoldDriver.mergeAOVs") + if multichannel: + if re.search(cls.R_AOV_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "You can't use '' token " + "with merge AOVs turned on".format(prefix)) + else: + if not re.search(cls.R_AOV_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' or " + "token".format(prefix)) # prefix check if prefix.lower() != cls.ImagePrefixTokens[renderer].lower(): From 6f8a860f00c76a5d3b93d96155bec8fda0343889 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Sat, 22 Feb 2020 01:19:32 +0100 Subject: [PATCH 311/434] rendering is done by default from published file --- .../global/publish/submit_publish_job.py | 2 +- pype/plugins/maya/publish/collect_render.py | 4 ++ .../maya/publish/submit_maya_deadline.py | 57 +++++++++++++++++-- 3 files changed, 57 insertions(+), 6 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index df00803326..29dce58101 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -662,8 +662,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): new_i = copy(i) new_i["version"] = at.get("version") new_i["subset"] = at.get("subset") - new_i["family"] = at.get("family") new_i["append"] = True + new_i["families"].append(at.get("family")) new_instances.append(new_i) self.log.info(" - {} / v{}".format( at.get("subset"), at.get("version"))) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index f76ee1120f..07eec4192f 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -101,6 +101,10 @@ class CollectMayaRender(pyblish.api.ContextPlugin): render_instance = instance render_instance.data["remove"] = True + # make sure workfile instance publishing is enabled + if 'workfile' in instance.data['families']: + instance.data["publish"] = True + if not render_instance: self.log.info("No render instance found, skipping render " "layer collection.") diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index 4c6399a880..2f236be424 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -117,6 +117,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): else: optional = True + use_published = True + def process(self, instance): DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL", @@ -125,21 +127,66 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): context = instance.context workspace = context.data["workspaceDir"] + anatomy = context.data['anatomy'] filepath = None + if self.use_published: + for i in context: + if "workfile" in i.data["families"]: + assert i.data["publish"] is True, ( + "Workfile (scene) must be published along") + template_data = i.data.get("anatomyData") + rep = i.data.get("representations")[0].get("name") + template_data["representation"] = rep + template_data["ext"] = rep + template_data["comment"] = None + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled["publish"]["path"] + filepath = os.path.normpath(template_filled) + self.log.info("Using published scene for render {}".format( + filepath)) + + # now we need to switch scene in expected files + # because token will now point to published + # scene file and that might differ from current one + new_scene = os.path.splitext( + os.path.basename(filepath))[0] + orig_scene = os.path.splitext( + os.path.basename(context.data["currentFile"]))[0] + exp = instance.data.get("expectedFiles") + + if isinstance(exp[0], dict): + # we have aovs and we need to iterate over them + new_exp = {} + for aov, files in exp[0].items(): + replaced_files = [] + for f in files: + replaced_files.append( + f.replace(orig_scene, new_scene) + ) + new_exp[aov] = replaced_files + instance.data["expectedFiles"] = [new_exp] + else: + new_exp = [] + for f in exp: + new_exp.append( + f.replace(orig_scene, new_scene) + ) + instance.data["expectedFiles"] = [new_exp] + self.log.info("Scene name was switched {} -> {}".format( + orig_scene, new_scene + )) + allInstances = [] for result in context.data["results"]: if (result["instance"] is not None and result["instance"] not in allInstances): allInstances.append(result["instance"]) - for inst in allInstances: - print(inst) - if inst.data['family'] == 'scene': - filepath = inst.data['destination_list'][0] - + # fallback if nothing was set if not filepath: + self.log.warning("Falling back to workfile") filepath = context.data["currentFile"] self.log.debug(filepath) From b0a1b1a50376cde95a2bd3656f16c7b0c8b888b5 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Sat, 22 Feb 2020 01:22:47 +0100 Subject: [PATCH 312/434] fix missing import after merge --- pype/scripts/otio_burnin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 945fcad3ec..7a724e22bf 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -6,7 +6,7 @@ import json import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins from pypeapp.lib import config from pypeapp import Logger - +import pype.lib log = Logger().get_logger("BurninWrapper", "burninwrap") From cf6f213a967724d1fb879583ce6068ad2e5afe5c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 24 Feb 2020 15:18:01 +0100 Subject: [PATCH 313/434] added fixes of crashing sync to avalon event from develop to master --- pype/ftrack/events/event_sync_to_avalon.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 49ac50c1db..cfeec248fb 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -28,7 +28,7 @@ class SyncToAvalonEvent(BaseEvent): ignore_entTypes = [ "socialfeed", "socialnotification", "note", "assetversion", "job", "user", "reviewsessionobject", "timer", - "timelog", "auth_userrole", "appointment" + "timelog", "auth_userrole", "appointment", "notelabellink" ] ignore_ent_types = ["Milestone"] ignore_keys = ["statusid", "thumbid"] @@ -1545,6 +1545,14 @@ class SyncToAvalonEvent(BaseEvent): entity_type_conf_ids[entity_type] = configuration_id break + if not configuration_id: + self.log.warning( + "BUG REPORT: Missing configuration for `{} < {} >`".format( + entity_type, ent_info["entityType"] + ) + ) + continue + _entity_key = collections.OrderedDict({ "configuration_id": configuration_id, "entity_id": ftrack_id From 5db4d90e474bee119915b680d8b15bd575757734 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 24 Feb 2020 16:51:01 +0100 Subject: [PATCH 314/434] get project from list --- pype/plugins/ftrack/publish/collect_ftrack_api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/plugins/ftrack/publish/collect_ftrack_api.py b/pype/plugins/ftrack/publish/collect_ftrack_api.py index 47a6cc3826..0aad3b2433 100644 --- a/pype/plugins/ftrack/publish/collect_ftrack_api.py +++ b/pype/plugins/ftrack/publish/collect_ftrack_api.py @@ -46,6 +46,7 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): "Found more than one project with name \"{0}\" in Ftrack." ).format(project_name)) + project_entity = project_entity[0] self.log.debug("Project found: {0}".format(project_entity)) # Find asset entity From 679acb519affbe0a5e0327afc8c322f0423b0dcc Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 25 Feb 2020 14:46:01 +0100 Subject: [PATCH 315/434] feat(nks): adding loader to menu --- pype/nukestudio/menu.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pype/nukestudio/menu.py b/pype/nukestudio/menu.py index a996389524..816ae40523 100644 --- a/pype/nukestudio/menu.py +++ b/pype/nukestudio/menu.py @@ -104,11 +104,11 @@ def install(): # 'function': creator.show, # 'icon': QIcon('icons:ColorAdd.png') # }, - # { - # 'action': QAction('Load...', None), - # 'function': cbloader.show, - # 'icon': QIcon('icons:CopyRectangle.png') - # }, + { + 'action': QAction('Load...', None), + 'function': cbloader.show, + 'icon': QIcon('icons:CopyRectangle.png') + }, { 'action': QAction('Publish...', None), 'function': publish.show, From 61ca2dee62c47a0900489c3a24c7be7aecddd864 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 25 Feb 2020 14:46:41 +0100 Subject: [PATCH 316/434] feat(nks): wip of loader plugin --- .../load_sequences_to_timeline_asset_origin.py | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py index 4b94a941e7..62ed2e1271 100644 --- a/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py +++ b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py @@ -1,4 +1,7 @@ from avalon import api +import hiero +from pype.nukestudio import lib +reload(lib) class LoadSequencesToTimelineAssetOrigin(api.Loader): @@ -17,7 +20,18 @@ class LoadSequencesToTimelineAssetOrigin(api.Loader): color = "orange" def load(self, context, name, namespace, data): - pass + + data.update({ + # "projectBinPath": "Loaded", + "hieroWorkfileName": hiero.ui.activeProject().name() + }) + + self.log.info("data: `{}`".format(data)) + + clip_loader = lib.ClipLoader(self, context, **data) + clip_loader.load() + + self.log.info("Loader done: `{}`".format(name)) def switch(self, container, representation): self.update(container, representation) From 9ec1fecda26d5c6ef6002cc8eea7b1332564fc8a Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 25 Feb 2020 14:47:00 +0100 Subject: [PATCH 317/434] feat(nks): wip of loader class --- pype/nukestudio/lib.py | 189 ++++++++++++++++++++++++++++++----------- 1 file changed, 140 insertions(+), 49 deletions(-) diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py index 6b255a0b40..9cc4df1683 100644 --- a/pype/nukestudio/lib.py +++ b/pype/nukestudio/lib.py @@ -364,32 +364,40 @@ def CreateNukeWorkfile(nodes=None, class ClipLoader: - data = dict() - def __init__(self, plugin_cls, context, **kwargs): + active_bin = None + + def __init__(self, plugin_cls, context, sequence=None, track=None, **kwargs): """ Initialize object Arguments: plugin_cls (api.Loader): plugin object context (dict): loader plugin context + sequnce (hiero.core.Sequence): sequence object + track (hiero.core.Track): track object kwargs (dict)[optional]: possible keys: - project_bin_path: "path/to/binItem" - hiero_workfile_name: "name_of_hiero_project_file_no_extension" + projectBinPath: "path/to/binItem" + hieroWorkfileName: "name_of_hiero_project_file_no_extension" """ self.cls = plugin_cls self.context = context self.kwargs = kwargs - self.active_project = self.get_active_project() + self.active_project = self._get_active_project() self.project_bin = self.active_project.clipsBin() + self.active_sequence = self._get_active_sequence(sequence) + self.active_track = self._get_active_track(track) - assert self.set_data(), str("Cannot Load selected data, look into " + self.data = dict() + + assert self._set_data(), str("Cannot Load selected data, look into " "database or call your supervisor") # inject asset data to representation dict - self.get_asset_data() + self._get_asset_data() + log.debug("__init__ self.data: `{}`".format(self.data)) - def set_data(self): + def _set_data(self): """ Gets context and convert it to self.data data structure: { @@ -399,67 +407,69 @@ class ClipLoader: } """ # create name - repr = self.context["representaion"] + repr = self.context["representation"] repr_cntx = repr["context"] - asset = repr_cntx["asset"] - subset = repr_cntx["subset"] - representation = repr_cntx["representation"] + asset = str(repr_cntx["asset"]) + subset = str(repr_cntx["subset"]) + representation = str(repr_cntx["representation"]) self.data["name"] = "_".join([asset, subset, representation]) # gets file path file = self.cls.fname - if not file: repr_id = repr["_id"] log.warning( "Representation id `{}` is failing to load".format(repr_id)) return None - self.data["path"] = file.replace("\\", "/") + # convert to hashed path if repr_cntx.get("frame"): - self.fix_path_hashes() + self._fix_path_hashes() # solve project bin structure path - hierarchy = "/".join(( + hierarchy = str("/".join(( "Loader", repr_cntx["hierarchy"].replace("\\", "/"), asset - )) + ))) + self.data["binPath"] = self.kwargs.get( - "project_bin_path", + "projectBinPath", hierarchy ) - def fix_path_hashes(self): + return True + + def _fix_path_hashes(self): """ Convert file path where it is needed padding with hashes """ file = self.data["path"] if "#" not in file: - frame = self.context["representaion"]["context"].get("frame") + frame = self.context["representation"]["context"].get("frame") padding = len(frame) file = file.replace(frame, "#"*padding) self.data["path"] = file - def get_active_project(self): + def _get_active_project(self): """ Get hiero active project object """ - fname = self.kwargs.get("hiero_workfile_name", "") + fname = self.kwargs.get("hieroWorkfileName", "") return next((p for p in hiero.core.projects() if fname in p.name()), hiero.core.projects()[-1]) - def get_asset_data(self): + def _get_asset_data(self): """ Get all available asset data joint `data` key with asset.data dict into the representaion """ - asset_name = self.context["representaion"]["context"]["asset"] + asset_name = self.context["representation"]["context"]["asset"] self.data["assetData"] = pype.get_asset(asset_name)["data"] - def make_project_bin(self, hierarchy): + def _make_project_bin(self, hierarchy): """ Creare bins by given hierarchy path It will also make sure no duplicit bins will be created @@ -470,13 +480,45 @@ class ClipLoader: Returns: bin (hiero.core.BinItem): with the bin to be used for mediaItem """ - pass + if self.active_bin: + return self.active_bin - def make_track_item(self): + assert hierarchy != "", "Please add hierarchy!" + log.debug("__ hierarchy1: `{}`".format(hierarchy)) + if '/' in hierarchy: + hierarchy = hierarchy.split('/') + else: + hierarchy = [hierarchy] + + parent_bin = None + for i, name in enumerate(hierarchy): + # if first index and list is more then one long + if i == 0: + bin = next((bin for bin in self.project_bin.bins() + if name in bin.name()), None) + if not bin: + bin = hiero.core.Bin(name) + self.project_bin.addItem(bin) + log.debug("__ bin.name: `{}`".format(bin.name())) + parent_bin = bin + + # if second to prelast + elif (i >= 1) and (i <= (len(hierarchy) - 1)): + bin = next((bin for bin in parent_bin.bins() + if name in bin.name()), None) + if not bin: + bin = hiero.core.Bin(name) + parent_bin.addItem(bin) + + parent_bin = bin + + return parent_bin + + def _make_track_item(self): """ Create track item with """ pass - def set_clip_color(self, last_version=True): + def _set_clip_color(self, last_version=True): """ Sets color of clip on clip/track item Arguments: @@ -484,7 +526,7 @@ class ClipLoader: """ pass - def set_container_tag(self, item, metadata): + def _set_container_tag(self, item, metadata): """ Sets container tag to given clip/track item Arguments: @@ -493,6 +535,74 @@ class ClipLoader: """ pass + def _get_active_sequence(self, sequence): + if not sequence: + return hiero.ui.activeSequence() + else: + return sequence + + def _get_active_track(self, track): + if not track: + track_name = self.data["name"] + + if track_name not in self.active_sequence.videoTracks(): + track = hiero.core.VideoTrack(track_name) + self.active_sequence.addTrack(track) + + return track + + def load(self): + log.debug("__ active_project: `{}`".format(self.active_project)) + log.debug("__ active_sequence: `{}`".format(self.active_sequence)) + + # create project bin for the media to be imported into + self.active_bin = self._make_project_bin(self.data["binPath"]) + log.debug("__ active_bin: `{}`".format(self.active_bin)) + + # create mediaItem in active project bin + # create clip media + media = hiero.core.MediaSource(self.data["path"]) + media_in = int(media.startTime()) + media_duration = int(media.duration()) + + handle_start = self.data["assetData"]["handleStart"] + handle_end = self.data["assetData"]["handleEnd"] + + if media_in: + source_in = media_in + handle_start + else: + source_in = self.data["assetData"]["frameStart"] + handle_start + + if media_duration: + source_out = (media_in + media_duration - 1) - handle_end + else: + source_out = self.data["assetData"]["frameEnd"]- handle_end + + source = hiero.core.Clip(media) + + # add to bin as clip item + items_in_bin = [b.name() for b in bin.items()] + if self.data["name"] not in items_in_bin: + binItem = hiero.core.BinItem(source) + bin.addItem(binItem) + + new_source = [ + item for item in bin.items() if split_name in item.name() + ][0].items()[0].item() + + # add to track as clip item + trackItem = hiero.core.TrackItem( + self.data["name"], hiero.core.TrackItem.kVideo) + trackItem.setSource(new_source) + trackItem.setSourceIn(self.data["assetData"]["sourceIn"]) + trackItem.setSourceOut(self.data["assetData"]["sourceOut"]) + trackItem.setTimelineIn(self.data["assetData"]["clipIn"]) + trackItem.setTimelineOut(self.data["assetData"]["clipOut"]) + self.active_track.addTrackItem(trackItem) + + log.info("Loading clips: `{}`".format(self.data["name"])) + + def create_nk_workfile_clips(nk_workfiles, seq=None): ''' nk_workfile is list of dictionaries like: @@ -529,9 +639,7 @@ def create_nk_workfile_clips(nk_workfiles, seq=None): else: track = seq.tracks(nk['task']) - # create slip media - print("__ path: `{}`".format(nk['path'])) - + # create clip media media = hiero.core.MediaSource(nk['path']) media_in = int(media.startTime() or 0) media_duration = int(media.duration() or 0) @@ -549,47 +657,30 @@ def create_nk_workfile_clips(nk_workfiles, seq=None): else: source_out = nk["frameEnd"] - handle_end - print("__ media: `{}`".format(media)) - print("__ media_in: `{}`".format(media_in)) - print("__ media_duration : `{}`".format(media_duration)) - print("__ source_in: `{}`".format(source_in)) - print("__ source_out : `{}`".format(source_out)) - source = hiero.core.Clip(media) - print("__ source : `{}`".format(source)) - print("__ source.sourceIn(): `{}`".format(source.sourceIn())) name = os.path.basename(os.path.splitext(nk['path'])[0]) split_name = split_by_client_version(name)[0] or name - print("__ split_name: `{}`".format(split_name)) - # add to bin as clip item items_in_bin = [b.name() for b in bin.items()] if split_name not in items_in_bin: binItem = hiero.core.BinItem(source) bin.addItem(binItem) - print("__ bin.items(): `{}`".format(bin.items())) - new_source = [ item for item in bin.items() if split_name in item.name() ][0].items()[0].item() - print("__ new_source: `{}`".format(new_source)) - print("__ new_source: `{}`".format(new_source)) - # add to track as clip item trackItem = hiero.core.TrackItem( split_name, hiero.core.TrackItem.kVideo) trackItem.setSource(new_source) trackItem.setSourceIn(source_in) trackItem.setSourceOut(source_out) - trackItem.setSourceIn(source_in) trackItem.setTimelineIn(nk["clipIn"]) trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in)) track.addTrackItem(trackItem) - track.addTrackItem(trackItem) clips_lst.append(trackItem) return clips_lst From c55a8c0ec6c62ca1735a2b8e712f0c6136c169d4 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 26 Feb 2020 09:43:47 +0100 Subject: [PATCH 318/434] entityTypes are not ignored but are specified which will be processed --- pype/ftrack/events/event_sync_to_avalon.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index cfeec248fb..bb4bba1324 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -25,11 +25,7 @@ class SyncToAvalonEvent(BaseEvent): dbcon = DbConnector() - ignore_entTypes = [ - "socialfeed", "socialnotification", "note", - "assetversion", "job", "user", "reviewsessionobject", "timer", - "timelog", "auth_userrole", "appointment", "notelabellink" - ] + interest_entTypes = ["show", "task"] ignore_ent_types = ["Milestone"] ignore_keys = ["statusid", "thumbid"] @@ -477,7 +473,7 @@ class SyncToAvalonEvent(BaseEvent): found_actions = set() for ent_info in entities_info: entityType = ent_info["entityType"] - if entityType in self.ignore_entTypes: + if entityType not in self.interest_entTypes: continue entity_type = ent_info.get("entity_type") From d51cc4b2ec9284592bb454d921904bd4c7f3880b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 26 Feb 2020 13:53:06 +0100 Subject: [PATCH 319/434] don't care about missing proj mainly when prepare projects is launched --- pype/ftrack/events/event_sync_to_avalon.py | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index bb4bba1324..6f6e86ee20 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -102,9 +102,10 @@ class SyncToAvalonEvent(BaseEvent): if self._avalon_ents_by_id is None: self._avalon_ents_by_id = {} proj, ents = self.avalon_entities - self._avalon_ents_by_id[proj["_id"]] = proj - for ent in ents: - self._avalon_ents_by_id[ent["_id"]] = ent + if proj: + self._avalon_ents_by_id[proj["_id"]] = proj + for ent in ents: + self._avalon_ents_by_id[ent["_id"]] = ent return self._avalon_ents_by_id @property @@ -124,13 +125,14 @@ class SyncToAvalonEvent(BaseEvent): if self._avalon_ents_by_ftrack_id is None: self._avalon_ents_by_ftrack_id = {} proj, ents = self.avalon_entities - ftrack_id = proj["data"]["ftrackId"] - self._avalon_ents_by_ftrack_id[ftrack_id] = proj - for ent in ents: - ftrack_id = ent["data"].get("ftrackId") - if ftrack_id is None: - continue - self._avalon_ents_by_ftrack_id[ftrack_id] = ent + if proj: + ftrack_id = proj["data"]["ftrackId"] + self._avalon_ents_by_ftrack_id[ftrack_id] = proj + for ent in ents: + ftrack_id = ent["data"].get("ftrackId") + if ftrack_id is None: + continue + self._avalon_ents_by_ftrack_id[ftrack_id] = ent return self._avalon_ents_by_ftrack_id @property From 54cc0781932af210e7346316795da53395ecbb54 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 26 Feb 2020 15:14:35 +0100 Subject: [PATCH 320/434] allow create project structure for project managers --- pype/ftrack/actions/action_create_project_structure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/actions/action_create_project_structure.py b/pype/ftrack/actions/action_create_project_structure.py index 4589802f3a..6124ebe843 100644 --- a/pype/ftrack/actions/action_create_project_structure.py +++ b/pype/ftrack/actions/action_create_project_structure.py @@ -19,7 +19,7 @@ class CreateProjectFolders(BaseAction): #: Action description. description = 'Creates folder structure' #: roles that are allowed to register this action - role_list = ['Pypeclub', 'Administrator'] + role_list = ['Pypeclub', 'Administrator', 'Project Manager'] icon = '{}/ftrack/action_icons/CreateProjectFolders.svg'.format( os.environ.get('PYPE_STATICS_SERVER', '') ) From 09ef82cfd2e1b7c53c0f19010d7371b8b36dfe5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Wed, 26 Feb 2020 16:05:33 +0100 Subject: [PATCH 321/434] switching context will now change content to new asset --- pype/maya/__init__.py | 1 + pype/maya/lib.py | 26 ++++++++++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/pype/maya/__init__.py b/pype/maya/__init__.py index f027893a0e..726fd14faa 100644 --- a/pype/maya/__init__.py +++ b/pype/maya/__init__.py @@ -218,3 +218,4 @@ def on_task_changed(*args): # Run maya.pipeline._on_task_changed() + lib.update_content_on_context_change() diff --git a/pype/maya/lib.py b/pype/maya/lib.py index dafc281903..5333b75359 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2452,3 +2452,29 @@ class shelf(): cmds.deleteUI(each) else: cmds.shelfLayout(self.name, p="ShelfLayout") + + +def update_content_on_context_change(): + """ + This will update scene content to match new asset on context change + """ + scene_sets = cmds.listSets(allSets=True) + new_asset = api.Session["AVALON_ASSET"] + new_data = lib.get_asset()["data"] + for s in scene_sets: + try: + if cmds.getAttr("{}.id".format(s)) == "pyblish.avalon.instance": + attr = cmds.listAttr(s) + print(s) + if "asset" in attr: + print(" - setting asset to: [ {} ]".format(new_asset)) + cmds.setAttr("{}.asset".format(s), + new_asset, type="string") + if "frameStart" in attr: + cmds.setAttr("{}.frameStart".format(s), + new_data["frameStart"]) + if "frameEnd" in attr: + cmds.setAttr("{}.frameEnd".format(s), + new_data["frameEnd"],) + except ValueError: + pass From c005090874f8f140b799a6a73e1d3b408a11199c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 26 Feb 2020 18:50:09 +0100 Subject: [PATCH 322/434] nukestudio menu build was changed from actions list to hardcoded list with removed unused imports and removed library loader --- pype/nukestudio/menu.py | 114 +++++++++++----------------------------- 1 file changed, 30 insertions(+), 84 deletions(-) diff --git a/pype/nukestudio/menu.py b/pype/nukestudio/menu.py index a996389524..a97b24c3fe 100644 --- a/pype/nukestudio/menu.py +++ b/pype/nukestudio/menu.py @@ -50,14 +50,7 @@ def install(): """ # here is the best place to add menu - from avalon.tools import ( - creator, - publish, - cbloader, - cbsceneinventory, - contextmanager, - libraryloader - ) + from avalon.tools import publish menu_name = os.environ['AVALON_LABEL'] @@ -80,81 +73,34 @@ def install(): else: menu = check_made_menu.menu() - actions = [ - { - 'parent': context_label, - 'action': QAction('Set Context', None), - 'function': contextmanager.show, - 'icon': QIcon('icons:Position.png') - }, - "separator", - { - 'action': QAction("Work Files...", None), - 'function': set_workfiles, - 'icon': QIcon('icons:Position.png') - }, - { - 'action': QAction('Create Default Tags..', None), - 'function': add_tags_from_presets, - 'icon': QIcon('icons:Position.png') - }, - "separator", - # { - # 'action': QAction('Create...', None), - # 'function': creator.show, - # 'icon': QIcon('icons:ColorAdd.png') - # }, - # { - # 'action': QAction('Load...', None), - # 'function': cbloader.show, - # 'icon': QIcon('icons:CopyRectangle.png') - # }, - { - 'action': QAction('Publish...', None), - 'function': publish.show, - 'icon': QIcon('icons:Output.png') - }, - # { - # 'action': QAction('Manage...', None), - # 'function': cbsceneinventory.show, - # 'icon': QIcon('icons:ModifyMetaData.png') - # }, - { - 'action': QAction('Library...', None), - 'function': libraryloader.show, - 'icon': QIcon('icons:ColorAdd.png') - }, - "separator", - { - 'action': QAction('Reload pipeline...', None), - 'function': reload_config, - 'icon': QIcon('icons:ColorAdd.png') - }] + context_label_action = menu.addAction(context_label) + context_label_action.setEnabled(False) - # Create menu items - for a in actions: - add_to_menu = menu - if isinstance(a, dict): - # create action - for k in a.keys(): - if 'parent' in k: - submenus = [sm for sm in a[k].split('/')] - submenu = None - for sm in submenus: - if submenu: - submenu.addMenu(sm) - else: - submenu = menu.addMenu(sm) - add_to_menu = submenu - if 'action' in k: - action = a[k] - elif 'function' in k: - action.triggered.connect(a[k]) - elif 'icon' in k: - action.setIcon(a[k]) + menu.addSeparator() - # add action to menu - add_to_menu.addAction(action) - hiero.ui.registerAction(action) - elif isinstance(a, str): - add_to_menu.addSeparator() + workfiles_action = menu.addAction("Work Files...") + workfiles_action.setIcon("icons:Position.png") + workfiles_action.triggered.connect(set_workfiles) + + default_tags_action = menu.addAction("Create Default Tags...") + default_tags_action.setIcon("icons:Position.png") + default_tags_action.triggered.connect(add_tags_from_presets) + + menu.addSeparator() + + publish_action = menu.addAction("Publish...") + publish_action.setIcon("icons:Output.png") + publish_action.triggered.connect(publish.show) + + menu.addSeparator() + + reload_action = menu.addAction("Reload pipeline...") + reload_action.setIcon("icons:ColorAdd.png") + reload_action.triggered.connect(reload_config) + + self.context_label_action = context_label_action + self.workfile_actions = workfiles_action + self.default_tags_action = default_tags_action + self.publish_action = publish_action + self.reload_action = reload_action + # hiero.ui.registerAction(action) From fd3fcbd3ffc731e0b2f40debd3e4895398220d65 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Wed, 26 Feb 2020 18:50:33 +0100 Subject: [PATCH 323/434] nukestudio workio expect session by new avalon-core change --- pype/nukestudio/workio.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/nukestudio/workio.py b/pype/nukestudio/workio.py index c7484b826b..1c7c77dab9 100644 --- a/pype/nukestudio/workio.py +++ b/pype/nukestudio/workio.py @@ -73,5 +73,5 @@ def current_file(): return normalised -def work_root(): - return os.path.normpath(api.Session["AVALON_WORKDIR"]).replace("\\", "/") +def work_root(session): + return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") From 856f67e113df2cbc13450123e8aadc83c2522e5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Wed, 26 Feb 2020 21:17:12 +0100 Subject: [PATCH 324/434] added reset frame range on task change --- pype/maya/__init__.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/maya/__init__.py b/pype/maya/__init__.py index cbdd3696be..417f617784 100644 --- a/pype/maya/__init__.py +++ b/pype/maya/__init__.py @@ -229,4 +229,6 @@ def on_task_changed(*args): # Run maya.pipeline._on_task_changed() - lib.update_content_on_context_change() + with maya.suspended_refresh(): + lib.set_context_settings() + lib.update_content_on_context_change() From 69ca74f27bb5c6c47af5e227c7310e166f862fbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Wed, 26 Feb 2020 21:47:17 +0100 Subject: [PATCH 325/434] added info message on context change --- pype/maya/__init__.py | 4 ++++ pype/maya/lib.py | 15 +++++++++++++++ pype/widgets/message_window.py | 9 +++++---- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/pype/maya/__init__.py b/pype/maya/__init__.py index 417f617784..fdc061f069 100644 --- a/pype/maya/__init__.py +++ b/pype/maya/__init__.py @@ -232,3 +232,7 @@ def on_task_changed(*args): with maya.suspended_refresh(): lib.set_context_settings() lib.update_content_on_context_change() + + lib.show_message("Context was changed", + ("Context was changed to {}".format( + avalon.Session["AVALON_ASSET"]))) diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 470b9b4167..6ea108e801 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2611,3 +2611,18 @@ def update_content_on_context_change(): new_data["frameEnd"],) except ValueError: pass + + +def show_message(title, msg): + from avalon.vendor.Qt import QtWidgets + from ..widgets import message_window + + # Find maya main window + top_level_widgets = {w.objectName(): w for w in + QtWidgets.QApplication.topLevelWidgets()} + + parent = top_level_widgets.get("MayaWindow", None) + if parent is None: + pass + else: + message_window.message(title=title, message=msg, parent=parent) diff --git a/pype/widgets/message_window.py b/pype/widgets/message_window.py index 72e655cf5c..3532d2df44 100644 --- a/pype/widgets/message_window.py +++ b/pype/widgets/message_window.py @@ -7,7 +7,7 @@ log = logging.getLogger(__name__) class Window(QtWidgets.QWidget): def __init__(self, parent, title, message, level): - super().__init__() + super(Window, self).__init__() self.parent = parent self.title = title self.message = message @@ -48,9 +48,10 @@ class Window(QtWidgets.QWidget): return -def message(title=None, message=None, level="info"): - global app - app = QtWidgets.QApplication(sys.argv) +def message(title=None, message=None, level="info", parent=None): + app = parent + if not app: + app = QtWidgets.QApplication(sys.argv) ex = Window(app, title, message, level) ex.show() # sys.exit(app.exec_()) From 2f1fe373b784fedbcc15eac722364d744a036294 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 27 Feb 2020 09:39:19 +0100 Subject: [PATCH 326/434] feat(nks): wip loader in lib --- pype/nukestudio/lib.py | 65 +++++++++++++++++++++++++++++------------- 1 file changed, 45 insertions(+), 20 deletions(-) diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py index 9cc4df1683..6caae770e6 100644 --- a/pype/nukestudio/lib.py +++ b/pype/nukestudio/lib.py @@ -385,8 +385,6 @@ class ClipLoader: self.kwargs = kwargs self.active_project = self._get_active_project() self.project_bin = self.active_project.clipsBin() - self.active_sequence = self._get_active_sequence(sequence) - self.active_track = self._get_active_track(track) self.data = dict() @@ -397,6 +395,10 @@ class ClipLoader: self._get_asset_data() log.debug("__init__ self.data: `{}`".format(self.data)) + # add active components to class + self.active_sequence = self._get_active_sequence(sequence) + self.active_track = self._get_active_track(track) + def _set_data(self): """ Gets context and convert it to self.data data structure: @@ -564,40 +566,63 @@ class ClipLoader: media = hiero.core.MediaSource(self.data["path"]) media_in = int(media.startTime()) media_duration = int(media.duration()) + log.debug("__ media_in: `{}`".format(media_in)) + log.debug("__ media_duration: `{}`".format(media_duration)) handle_start = self.data["assetData"]["handleStart"] handle_end = self.data["assetData"]["handleEnd"] + fps = self.data["assetData"]["fps"] + if media_in: - source_in = media_in + handle_start + source_in = int(media_in + handle_start) else: - source_in = self.data["assetData"]["frameStart"] + handle_start + source_in = int(self.data["assetData"]["frameStart"] + handle_start) if media_duration: - source_out = (media_in + media_duration - 1) - handle_end + source_out = int((media_in + media_duration - 1) - handle_end) else: - source_out = self.data["assetData"]["frameEnd"]- handle_end + source_out = int(self.data["assetData"]["frameEnd"] - handle_end) - source = hiero.core.Clip(media) + log.debug("__ source_in: `{}`".format(source_in)) + log.debug("__ source_out: `{}`".format(source_out)) + log.debug("__ handle_start: `{}`".format(handle_start)) + log.debug("__ handle_end: `{}`".format(handle_end)) - # add to bin as clip item - items_in_bin = [b.name() for b in bin.items()] - if self.data["name"] not in items_in_bin: - binItem = hiero.core.BinItem(source) - bin.addItem(binItem) + # create Clip from Media + _clip = hiero.core.Clip(media) + _clip.setName(self.data["name"]) - new_source = [ - item for item in bin.items() if split_name in item.name() - ][0].items()[0].item() + # add Clip to bin if not there yet + if self.data["name"] not in [b.name() for b in self.active_bin.items()]: + binItem = hiero.core.BinItem(_clip) + self.active_bin.addItem(binItem) + + _source = next((item for item in self.active_bin.items() + if self.data["name"] in item.name()), None) + + if not _source: + log.warning("Problem with created Source clip: `{}`".format( + self.data["name"])) + + version = next((s for s in _source.items()), None) + clip = version.item() # add to track as clip item trackItem = hiero.core.TrackItem( self.data["name"], hiero.core.TrackItem.kVideo) - trackItem.setSource(new_source) - trackItem.setSourceIn(self.data["assetData"]["sourceIn"]) - trackItem.setSourceOut(self.data["assetData"]["sourceOut"]) - trackItem.setTimelineIn(self.data["assetData"]["clipIn"]) - trackItem.setTimelineOut(self.data["assetData"]["clipOut"]) + + log.info("clip: `{}`".format(clip)) + log.info("_clip: `{}`".format(_clip)) + log.info("clip.sourceIn(): `{}`".format(clip.sourceIn())) + log.info("clip.sourceOut(): `{}`".format(clip.sourceOut())) + + trackItem.setSource(clip) + # trackItem.setTimelineIn(self.data["assetData"]["clipIn"]) + trackItem.setSourceIn(5) + # trackItem.setTimelineOut(self.data["assetData"]["clipOut"]) + trackItem.setSourceOut(10) + self.active_track.addTrackItem(trackItem) log.info("Loading clips: `{}`".format(self.data["name"])) From f67fb7f79df98e6f373a5330773fdb3d19aa6d67 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 27 Feb 2020 09:39:45 +0100 Subject: [PATCH 327/434] feat(nks): project helpers from dotStudio --- .../Python/Startup/project_helpers.py | 235 ++++++++++++++++++ 1 file changed, 235 insertions(+) create mode 100644 setup/nukestudio/hiero_plugin_path/Python/Startup/project_helpers.py diff --git a/setup/nukestudio/hiero_plugin_path/Python/Startup/project_helpers.py b/setup/nukestudio/hiero_plugin_path/Python/Startup/project_helpers.py new file mode 100644 index 0000000000..7e274bd0a3 --- /dev/null +++ b/setup/nukestudio/hiero_plugin_path/Python/Startup/project_helpers.py @@ -0,0 +1,235 @@ +try: + from PySide.QtGui import * + from PySide.QtCore import * +except: + from PySide2.QtGui import * + from PySide2.QtWidgets import * + from PySide2.QtCore import * + +from hiero.core.util import uniquify, version_get, version_set +import hiero.core +import hiero.ui +import nuke + +# A globally variable for storing the current Project +gTrackedActiveProject = None + +# This selection handler will track changes in items selected/deselected in the Bin/Timeline/Spreadsheet Views + + +def __trackActiveProjectHandler(event): + global gTrackedActiveProject + selection = event.sender.selection() + binSelection = selection + if len(binSelection) > 0 and hasattr(binSelection[0], 'project'): + proj = binSelection[0].project() + + # We only store this if its a valid, active User Project + if proj in hiero.core.projects(hiero.core.Project.kUserProjects): + gTrackedActiveProject = proj + + +hiero.core.events.registerInterest( + 'kSelectionChanged/kBin', __trackActiveProjectHandler) +hiero.core.events.registerInterest( + 'kSelectionChanged/kTimeline', __trackActiveProjectHandler) +hiero.core.events.registerInterest( + 'kSelectionChanged/Spreadsheet', __trackActiveProjectHandler) + + +def activeProject(): + """hiero.ui.activeProject() -> returns the current Project + + Note: There is not technically a notion of a 'active' Project in Hiero/NukeStudio, as it is a multi-project App. + This method determines what is 'active' by going down the following rules... + + # 1 - If the current Viewer (hiero.ui.currentViewer) contains a Clip or Sequence, this item is assumed to give the active Project + # 2 - If nothing is currently in the Viewer, look to the active View, determine project from active selection + # 3 - If no current selection can be determined, fall back to a globally tracked last selection from trackActiveProjectHandler + # 4 - If all those rules fail, fall back to the last project in the list of hiero.core.projects() + + @return: hiero.core.Project""" + global gTrackedActiveProject + activeProject = None + + # Case 1 : Look for what the current Viewr tells us - this might not be what we want, and relies on hiero.ui.currentViewer() being robust. + cv = hiero.ui.currentViewer().player().sequence() + if hasattr(cv, 'project'): + activeProject = cv.project() + else: + # Case 2: We can't determine a project from the current Viewer, so try seeing what's selected in the activeView + # Note that currently, if you run activeProject from the Script Editor, the activeView is always None, so this will rarely get used! + activeView = hiero.ui.activeView() + if activeView: + # We can determine an active View.. see what's being worked with + selection = activeView.selection() + + # Handle the case where nothing is selected in the active view + if len(selection) == 0: + # It's possible that there is no selection in a Timeline/Spreadsheet, but these views have 'sequence' method, so try that... + if isinstance(hiero.ui.activeView(), (hiero.ui.TimelineEditor, hiero.ui.SpreadsheetView)): + activeSequence = activeView.sequence() + if hasattr(currentItem, 'project'): + activeProject = activeSequence.project() + + # The active view has a selection... assume that the first item in the selection has the active Project + else: + currentItem = selection[0] + if hasattr(currentItem, 'project'): + activeProject = currentItem.project() + + # Finally, Cases 3 and 4... + if not activeProject: + activeProjects = hiero.core.projects(hiero.core.Project.kUserProjects) + if gTrackedActiveProject in activeProjects: + activeProject = gTrackedActiveProject + else: + activeProject = activeProjects[-1] + + return activeProject + +# Method to get all recent projects + + +def recentProjects(): + """hiero.core.recentProjects() -> Returns a list of paths to recently opened projects + + Hiero stores up to 5 recent projects in uistate.ini with the [recentFile]/# key. + + @return: list of paths to .hrox Projects""" + + appSettings = hiero.core.ApplicationSettings() + recentProjects = [] + for i in range(0, 5): + proj = appSettings.value('recentFile/%i' % i) + if len(proj) > 0: + recentProjects.append(proj) + return recentProjects + +# Method to get recent project by index + + +def recentProject(k=0): + """hiero.core.recentProject(k) -> Returns the recent project path, specified by integer k (0-4) + + @param: k (optional, default = 0) - an integer from 0-4, relating to the index of recent projects. + + @return: hiero.core.Project""" + + appSettings = hiero.core.ApplicationSettings() + proj = appSettings.value('recentFile/%i' % int(k), None) + return proj + +# Method to get open project by index + + +def openRecentProject(k=0): + """hiero.core.openRecentProject(k) -> Opens the most the recent project as listed in the Open Recent list. + + @param: k (optional, default = 0) - an integer from 0-4, relating to the index of recent projects. + @return: hiero.core.Project""" + + appSettings = hiero.core.ApplicationSettings() + proj = appSettings.value('recentFile/%i' % int(k), None) + proj = hiero.core.openProject(proj) + return proj + + +# Duck punch these methods into the relevant ui/core namespaces +hiero.ui.activeProject = activeProject +hiero.core.recentProjects = recentProjects +hiero.core.recentProject = recentProject +hiero.core.openRecentProject = openRecentProject + + +# Method to Save a new Version of the activeHrox Project +class SaveAllProjects(QAction): + + def __init__(self): + QAction.__init__(self, "Save All Projects", None) + self.triggered.connect(self.projectSaveAll) + hiero.core.events.registerInterest( + "kShowContextMenu/kBin", self.eventHandler) + + def projectSaveAll(self): + allProjects = hiero.core.projects() + for proj in allProjects: + try: + proj.save() + print 'Saved Project: %s to: %s ' % (proj.name(), proj.path()) + except: + print 'Unable to save Project: %s to: %s. Check file permissions.' % (proj.name(), proj.path()) + + def eventHandler(self, event): + event.menu.addAction(self) + +# For projects with v# in the path name, saves out a new Project with v#+1 + + +class SaveNewProjectVersion(QAction): + + def __init__(self): + QAction.__init__(self, "Save New Version...", None) + self.triggered.connect(self.saveNewVersion) + hiero.core.events.registerInterest( + "kShowContextMenu/kBin", self.eventHandler) + self.selectedProjects = [] + + def saveNewVersion(self): + if len(self.selectedProjects) > 0: + projects = self.selectedProjects + else: + projects = [hiero.ui.activeProject()] + + if len(projects) < 1: + return + + for proj in projects: + oldName = proj.name() + path = proj.path() + v = None + prefix = None + try: + (prefix, v) = version_get(path, 'v') + except ValueError, msg: + print msg + + if (prefix is not None) and (v is not None): + v = int(v) + newPath = version_set(path, prefix, v, v + 1) + try: + proj.saveAs(newPath) + print 'Saved new project version: %s to: %s ' % (oldName, newPath) + except: + print 'Unable to save Project: %s. Check file permissions.' % (oldName) + else: + newPath = path.replace(".hrox", "_v01.hrox") + answer = nuke.ask( + '%s does not contain a version number.\nDo you want to save as %s?' % (proj, newPath)) + if answer: + try: + proj.saveAs(newPath) + print 'Saved new project version: %s to: %s ' % (oldName, newPath) + except: + print 'Unable to save Project: %s. Check file permissions.' % (oldName) + + def eventHandler(self, event): + self.selectedProjects = [] + if hasattr(event.sender, 'selection') and event.sender.selection() is not None and len(event.sender.selection()) != 0: + selection = event.sender.selection() + self.selectedProjects = uniquify( + [item.project() for item in selection]) + event.menu.addAction(self) + + +# Instantiate the actions +saveAllAct = SaveAllProjects() +saveNewAct = SaveNewProjectVersion() + +fileMenu = hiero.ui.findMenuAction("foundry.menu.file") +importAct = hiero.ui.findMenuAction("foundry.project.importFiles") +hiero.ui.insertMenuAction(saveNewAct, fileMenu.menu(), + before="Import File(s)...") +hiero.ui.insertMenuAction(saveAllAct, fileMenu.menu(), + before="Import File(s)...") +fileMenu.menu().insertSeparator(importAct) From 923021b02f20031bc29c60ac89629e885f70592e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 27 Feb 2020 09:55:12 +0100 Subject: [PATCH 328/434] fixed action registering and missing QIcons --- pype/nukestudio/menu.py | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/pype/nukestudio/menu.py b/pype/nukestudio/menu.py index a97b24c3fe..36ce4df34e 100644 --- a/pype/nukestudio/menu.py +++ b/pype/nukestudio/menu.py @@ -5,13 +5,6 @@ from pypeapp import Logger from avalon.api import Session from hiero.ui import findMenuAction -# this way we secure compatibility between nuke 10 and 11 -try: - from PySide.QtGui import * -except Exception: - from PySide2.QtGui import * - from PySide2.QtWidgets import * - from .tags import add_tags_from_presets from .lib import ( @@ -51,6 +44,7 @@ def install(): # here is the best place to add menu from avalon.tools import publish + from avalon.vendor.Qt import QtGui menu_name = os.environ['AVALON_LABEL'] @@ -60,16 +54,14 @@ def install(): self._change_context_menu = context_label - # Grab Hiero's MenuBar - M = hiero.ui.menuBar() - try: check_made_menu = findMenuAction(menu_name) except Exception: - pass + check_made_menu = None if not check_made_menu: - menu = M.addMenu(menu_name) + # Grab Hiero's MenuBar + menu = hiero.ui.menuBar().addMenu(menu_name) else: menu = check_made_menu.menu() @@ -79,28 +71,36 @@ def install(): menu.addSeparator() workfiles_action = menu.addAction("Work Files...") - workfiles_action.setIcon("icons:Position.png") + workfiles_action.setIcon(QtGui.QIcon("icons:Position.png")) workfiles_action.triggered.connect(set_workfiles) default_tags_action = menu.addAction("Create Default Tags...") - default_tags_action.setIcon("icons:Position.png") + default_tags_action.setIcon(QtGui.QIcon("icons:Position.png")) default_tags_action.triggered.connect(add_tags_from_presets) menu.addSeparator() publish_action = menu.addAction("Publish...") - publish_action.setIcon("icons:Output.png") - publish_action.triggered.connect(publish.show) + publish_action.setIcon(QtGui.QIcon("icons:Output.png")) + publish_action.triggered.connect( + lambda *args: publish.show(hiero.ui.mainWindow()) + ) menu.addSeparator() reload_action = menu.addAction("Reload pipeline...") - reload_action.setIcon("icons:ColorAdd.png") + reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) reload_action.triggered.connect(reload_config) + # Is this required? + hiero.ui.registerAction(context_label_action) + hiero.ui.registerAction(workfiles_action) + hiero.ui.registerAction(default_tags_action) + hiero.ui.registerAction(publish_action) + hiero.ui.registerAction(reload_action) + self.context_label_action = context_label_action self.workfile_actions = workfiles_action self.default_tags_action = default_tags_action self.publish_action = publish_action self.reload_action = reload_action - # hiero.ui.registerAction(action) From 7df1f34ce9541dd75c5e33a79f70da61ca0b4674 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 27 Feb 2020 10:05:32 +0100 Subject: [PATCH 329/434] set AVALON_HIERARCHY environment when application is launcher --- pype/ftrack/lib/ftrack_app_handler.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pype/ftrack/lib/ftrack_app_handler.py b/pype/ftrack/lib/ftrack_app_handler.py index 9dc735987d..2b46dd43d8 100644 --- a/pype/ftrack/lib/ftrack_app_handler.py +++ b/pype/ftrack/lib/ftrack_app_handler.py @@ -193,6 +193,8 @@ class AppAction(BaseHandler): if parents: hierarchy = os.path.join(*parents) + os.environ["AVALON_HIERARCHY"] = hierarchy + application = avalonlib.get_application(os.environ["AVALON_APP_NAME"]) data = { From ee7c691b61b1a1642955390dd17bba9317800301 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 27 Feb 2020 10:09:32 +0100 Subject: [PATCH 330/434] fix(nks): old code were blocking processing --- pype/plugins/nukestudio/publish/collect_handles.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_handles.py b/pype/plugins/nukestudio/publish/collect_handles.py index 28f502d846..c16f1a5803 100644 --- a/pype/plugins/nukestudio/publish/collect_handles.py +++ b/pype/plugins/nukestudio/publish/collect_handles.py @@ -55,8 +55,6 @@ class CollectClipHandles(api.ContextPlugin): # debug printing self.log.debug("_ s_asset_data: `{}`".format( s_asset_data)) - self.log.debug("_ instance.data[handles]: `{}`".format( - instance.data["handles"])) self.log.debug("_ instance.data[handleStart]: `{}`".format( instance.data["handleStart"])) self.log.debug("_ instance.data[handleEnd]: `{}`".format( From 9f5f1403fc7172a1bb845dfbaeb221fa24403979 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 27 Feb 2020 10:57:59 +0100 Subject: [PATCH 331/434] extract_hierarchy_avalon also checks for existence of archived_entities --- .../publish/extract_hierarchy_avalon.py | 44 +++++++++++++++++-- 1 file changed, 40 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/extract_hierarchy_avalon.py b/pype/plugins/global/publish/extract_hierarchy_avalon.py index 318a6db105..17041a8e09 100644 --- a/pype/plugins/global/publish/extract_hierarchy_avalon.py +++ b/pype/plugins/global/publish/extract_hierarchy_avalon.py @@ -51,6 +51,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): data["visualParent"] = visualParent data["parents"] = parents + update_data = True # Process project if entity_type.lower() == "project": entity = io.find_one({"type": "project"}) @@ -70,16 +71,51 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): # Else process assset else: entity = io.find_one({"type": "asset", "name": name}) - # Create entity if doesn"t exist if entity is None: - entity = self.create_avalon_asset(name, data) + # Skip updating data + update_data = False - # Update entity data with input data - io.update_many({"_id": entity["_id"]}, {"$set": {"data": data}}) + archived_entities = io.find({ + "type": "archived_asset", + "name": name + }) + unarchive_entity = None + for archived_entity in archived_entities: + if data["parents"] == archived_entity: + unarchive_entity = archived_entity + break + + if unarchive_entity is None: + # Create entity if doesn"t exist + entity = self.create_avalon_asset(name, data) + else: + # Unarchive if entity was archived + entity = self.unarchive_entity(unarchive_entity, data) + + if update_data: + # Update entity data with input data + io.update_many( + {"_id": entity["_id"]}, + {"$set": {"data": data}} + ) if "childs" in entity_data: self.import_to_avalon(entity_data["childs"], entity) + def unarchive_entity(self, entity, data): + new_entity = { + "_id": entity["_id"], + "schema": "avalon-core:asset-3.0", + "name": entity["name"], + "parent": self.project["_id"], + "type": "asset", + "data": data + } + io.replace_one( + {"_id": entity["_id"]}, + new_entity + ) + def create_avalon_asset(self, name, data): item = { "schema": "avalon-core:asset-3.0", From 904be8c66684da1fce1be8ee87a73450474ff4fa Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 27 Feb 2020 11:23:05 +0100 Subject: [PATCH 332/434] fixed parents check --- pype/plugins/global/publish/extract_hierarchy_avalon.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_hierarchy_avalon.py b/pype/plugins/global/publish/extract_hierarchy_avalon.py index 17041a8e09..795a1b51fa 100644 --- a/pype/plugins/global/publish/extract_hierarchy_avalon.py +++ b/pype/plugins/global/publish/extract_hierarchy_avalon.py @@ -81,7 +81,12 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): }) unarchive_entity = None for archived_entity in archived_entities: - if data["parents"] == archived_entity: + archived_parents = ( + archived_entity + .get("data", {}) + .get("parents") + ) + if data["parents"] == archived_parents: unarchive_entity = archived_entity break From 4bb9eef68829c28a2ab43b1e2bf7a3889a6e0482 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 27 Feb 2020 15:32:40 +0100 Subject: [PATCH 333/434] raise our TypeError when value for burnins is dictionary, list or tuple --- pype/scripts/otio_burnin.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 7a724e22bf..8d0b925089 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -418,6 +418,13 @@ def burnins_from_data( if not value: continue + if isinstance(value, (dict, list, tuple)): + raise TypeError(( + "Expected string or number type." + " Got: {} - \"{}\"" + " (Make sure you have new burnin presets)." + ).format(str(type(value)), str(value))) + has_timecode = TIME_CODE_KEY in value align = None From 3e87fb0dfe758ae2e2945f202229f610bd9863fb Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Thu, 27 Feb 2020 16:24:24 +0100 Subject: [PATCH 334/434] bugfix: farm rendering wasn't accepting project overrides --- pype/scripts/publish_filesequence.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py index 620ee3d851..fe795564a5 100644 --- a/pype/scripts/publish_filesequence.py +++ b/pype/scripts/publish_filesequence.py @@ -6,6 +6,7 @@ import argparse import logging import subprocess import platform +import json try: from shutil import which @@ -24,6 +25,18 @@ log.setLevel(logging.DEBUG) error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" +def _load_json(path): + assert os.path.isfile(path), ("path to json file doesn't exist") + data = None + with open(path, "r") as json_file: + try: + data = json.load(json_file) + except Exception as exc: + log.error( + "Error loading json: " + "{} - Exception: {}".format(path, exc) + ) + return data def __main__(): parser = argparse.ArgumentParser() @@ -77,6 +90,12 @@ def __main__(): paths = kwargs.paths or [os.environ.get("PYPE_METADATA_FILE")] or [os.getcwd()] # noqa + for path in paths: + data = _load_json(path) + log.info("Setting session using data from file") + os.environ["AVALON_PROJECT"] = data["session"]["AVALON_PROJECT"] + break + args = [ os.path.join(pype_root, pype_command), "publish", From 61ee78a1c8f5b67fde8456015672dc89e95cf19a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 27 Feb 2020 16:46:44 +0100 Subject: [PATCH 335/434] fixed check of top hierarchy entities in sync to avalon --- pype/ftrack/lib/avalon_sync.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index f5b4c4b8c3..6f928914bf 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -2067,9 +2067,10 @@ class SyncEntitiesFactory: # different hierarchy - can't recreate entity continue - _vis_parent = str(deleted_entity["data"]["visualParent"]) + _vis_parent = deleted_entity["data"]["visualParent"] if _vis_parent is None: _vis_parent = self.avalon_project_id + _vis_parent = str(_vis_parent) ftrack_parent_id = self.avalon_ftrack_mapper[_vis_parent] self.create_ftrack_ent_from_avalon_ent( deleted_entity, ftrack_parent_id From d5faf524b2acf082dd62a42b56d03549f2fc2c31 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 27 Feb 2020 17:15:42 +0100 Subject: [PATCH 336/434] added project manager to allowed roles for create project structure --- pype/ftrack/actions/action_create_project_structure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/actions/action_create_project_structure.py b/pype/ftrack/actions/action_create_project_structure.py index 4589802f3a..6124ebe843 100644 --- a/pype/ftrack/actions/action_create_project_structure.py +++ b/pype/ftrack/actions/action_create_project_structure.py @@ -19,7 +19,7 @@ class CreateProjectFolders(BaseAction): #: Action description. description = 'Creates folder structure' #: roles that are allowed to register this action - role_list = ['Pypeclub', 'Administrator'] + role_list = ['Pypeclub', 'Administrator', 'Project Manager'] icon = '{}/ftrack/action_icons/CreateProjectFolders.svg'.format( os.environ.get('PYPE_STATICS_SERVER', '') ) From a1275fedd54dabd9391678ce5b77d7e4e97f235c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 27 Feb 2020 18:06:35 +0100 Subject: [PATCH 337/434] set tasks, visualParent and inputs only for assets not for project --- .../publish/extract_hierarchy_avalon.py | 33 +++++++++++-------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/pype/plugins/global/publish/extract_hierarchy_avalon.py b/pype/plugins/global/publish/extract_hierarchy_avalon.py index 795a1b51fa..61ff20bd9a 100644 --- a/pype/plugins/global/publish/extract_hierarchy_avalon.py +++ b/pype/plugins/global/publish/extract_hierarchy_avalon.py @@ -36,20 +36,25 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): for k, val in entity_data.get("custom_attributes", {}).items(): data[k] = val - # Tasks. - tasks = entity_data.get("tasks", []) - if tasks is not None or len(tasks) > 0: - data["tasks"] = tasks - parents = [] - visualParent = None - # do not store project"s id as visualParent (silo asset) - if self.project is not None: - if self.project["_id"] != parent["_id"]: - visualParent = parent["_id"] - parents.extend(parent.get("data", {}).get("parents", [])) - parents.append(parent["name"]) - data["visualParent"] = visualParent - data["parents"] = parents + if entity_type.lower() != "project": + data["inputs"] = entity_data.get("inputs", []) + + # Tasks. + tasks = entity_data.get("tasks", []) + if tasks is not None or len(tasks) > 0: + data["tasks"] = tasks + parents = [] + visualParent = None + # do not store project"s id as visualParent (silo asset) + if self.project is not None: + if self.project["_id"] != parent["_id"]: + visualParent = parent["_id"] + parents.extend( + parent.get("data", {}).get("parents", []) + ) + parents.append(parent["name"]) + data["visualParent"] = visualParent + data["parents"] = parents update_data = True # Process project From 984f26a55d7f836730e2f3e4a5215a23be9cc605 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 27 Feb 2020 18:06:53 +0100 Subject: [PATCH 338/434] data are not overriden but only updated --- .../global/publish/extract_hierarchy_avalon.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/pype/plugins/global/publish/extract_hierarchy_avalon.py b/pype/plugins/global/publish/extract_hierarchy_avalon.py index 61ff20bd9a..c02eb2ca8e 100644 --- a/pype/plugins/global/publish/extract_hierarchy_avalon.py +++ b/pype/plugins/global/publish/extract_hierarchy_avalon.py @@ -28,8 +28,6 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): entity_type = entity_data["entity_type"] data = {} - - data["inputs"] = entity_data.get("inputs", []) data["entityType"] = entity_type # Custom attributes. @@ -64,8 +62,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): assert (entity is not None), "Did not find project in DB" # get data from already existing project - for key, value in entity.get("data", {}).items(): - data[key] = value + entity_data = entity.get("data") or {} + entity_data.update(data) + data = entity_data self.project = entity # Raise error if project or parent are not set @@ -76,7 +75,12 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): # Else process assset else: entity = io.find_one({"type": "asset", "name": name}) - if entity is None: + if entity: + # Do not override data, only update + entity_data = entity.get("data") or {} + entity_data.update(data) + data = entity_data + else: # Skip updating data update_data = False @@ -113,6 +117,10 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): self.import_to_avalon(entity_data["childs"], entity) def unarchive_entity(self, entity, data): + entity_data = entity.get("data") or {} + entity_data.update(data) + data = entity_data + new_entity = { "_id": entity["_id"], "schema": "avalon-core:asset-3.0", From d8226f1d273d25c987e73688b88b018b1134b948 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 28 Feb 2020 12:15:47 +0100 Subject: [PATCH 339/434] fixed variable name --- .../global/publish/extract_hierarchy_avalon.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pype/plugins/global/publish/extract_hierarchy_avalon.py b/pype/plugins/global/publish/extract_hierarchy_avalon.py index c02eb2ca8e..cdc8b28913 100644 --- a/pype/plugins/global/publish/extract_hierarchy_avalon.py +++ b/pype/plugins/global/publish/extract_hierarchy_avalon.py @@ -62,9 +62,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): assert (entity is not None), "Did not find project in DB" # get data from already existing project - entity_data = entity.get("data") or {} - entity_data.update(data) - data = entity_data + cur_entity_data = entity.get("data") or {} + cur_entity_data.update(data) + data = cur_entity_data self.project = entity # Raise error if project or parent are not set @@ -77,9 +77,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): entity = io.find_one({"type": "asset", "name": name}) if entity: # Do not override data, only update - entity_data = entity.get("data") or {} - entity_data.update(data) - data = entity_data + cur_entity_data = entity.get("data") or {} + cur_entity_data.update(data) + data = cur_entity_data else: # Skip updating data update_data = False From 5fe6854f87ec17e33b860595572ae8796554d78a Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 28 Feb 2020 12:26:25 +0100 Subject: [PATCH 340/434] fix(nk): adding new compatibility code submition from nuke to deadline --- .../nuke/publish/submit_nuke_deadline.py | 65 ++++++++++++++++++- 1 file changed, 63 insertions(+), 2 deletions(-) diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index 71108189c0..d69d5ba8bc 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -5,7 +5,6 @@ import getpass from avalon import api from avalon.vendor import requests import re - import pyblish.api @@ -55,7 +54,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): ) # Store output dir for unified publisher (filesequence) instance.data["deadlineSubmissionJob"] = response.json() - instance.data["publishJobState"] = "Active" + instance.data["outputDir"] = os.path.dirname( + render_path).replace("\\", "/") + instance.data["publishJobState"] = "Suspended" if instance.data.get("bakeScriptPath"): render_path = instance.data.get("bakeRenderPath") @@ -87,6 +88,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): script_name = os.path.basename(script_path) jobname = "%s - %s" % (script_name, instance.name) + output_filename_0 = self.preview_fname(render_path) + output_directory_0 = render_dir.replace("\\", "/") + if not responce_data: responce_data = {} @@ -119,6 +123,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): ), "Comment": self._comment, + # Optional, enable double-click to preview rendered + # frames from Deadline Monitor + "OutputFilename0": output_filename_0.replace("\\", "/"), + "OutputDirectory0": output_directory_0 + }, "PluginInfo": { # Input @@ -220,6 +229,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self.log.info("Submitting..") self.log.info(json.dumps(payload, indent=4, sort_keys=True)) + # adding expectied files to instance.data + self.expected_files(instance, render_path) + self.log.debug("__ expectedFiles: `{}`".format( + instance.data["expectedFiles"])) response = requests.post(self.deadline_url, json=payload) if not response.ok: @@ -240,3 +253,51 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "%f=%d was rounded off to nearest integer" % (value, int(value)) ) + + def preview_fname(self, path): + """Return output file path with #### for padding. + + Deadline requires the path to be formatted with # in place of numbers. + For example `/path/to/render.####.png` + + Args: + path (str): path to rendered images + + Returns: + str + + """ + self.log.debug("_ path: `{}`".format(path)) + if "%" in path: + search_results = re.search(r"(%0)(\d)(d.)", path).groups() + self.log.debug("_ search_results: `{}`".format(search_results)) + return int(search_results[1]) + if "#" in path: + self.log.debug("_ path: `{}`".format(path)) + return path + else: + return path + + def expected_files(self, + instance, + path): + """ Create expected files in instance data + """ + if not instance.data.get("expectedFiles"): + instance.data["expectedFiles"] = list() + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + pparts = file.split("#") + padding = "%0{}d".format(len(pparts) - 1) + file = pparts[0] + padding + pparts[-1] + + if "%" not in file: + instance.data["expectedFiles"].append(path) + return + + for i in range(self._frame_start, (self._frame_end + 1)): + instance.data["expectedFiles"].append( + os.path.join(dir, (file % i)).replace("\\", "/")) From 350221e3e05c4cedcab50f892500f2b961a050dc Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 28 Feb 2020 12:26:48 +0100 Subject: [PATCH 341/434] unarchivation override data and not update and return new entity --- pype/plugins/global/publish/extract_hierarchy_avalon.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/extract_hierarchy_avalon.py b/pype/plugins/global/publish/extract_hierarchy_avalon.py index cdc8b28913..ab8226f6ef 100644 --- a/pype/plugins/global/publish/extract_hierarchy_avalon.py +++ b/pype/plugins/global/publish/extract_hierarchy_avalon.py @@ -117,10 +117,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): self.import_to_avalon(entity_data["childs"], entity) def unarchive_entity(self, entity, data): - entity_data = entity.get("data") or {} - entity_data.update(data) - data = entity_data - + # Unarchived asset should not use same data new_entity = { "_id": entity["_id"], "schema": "avalon-core:asset-3.0", @@ -133,6 +130,7 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): {"_id": entity["_id"]}, new_entity ) + return new_entity def create_avalon_asset(self, name, data): item = { From 408db0e8600e7e70d469c6103e96a3e27f9d8242 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 28 Feb 2020 12:27:06 +0100 Subject: [PATCH 342/434] fix(nk): activating back version passing to instance --- pype/plugins/nuke/publish/collect_writes.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index c29f676ef7..3882ed0b32 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -52,9 +52,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): output_dir = os.path.dirname(path) self.log.debug('output dir: {}'.format(output_dir)) - # # get version to instance for integration - # instance.data['version'] = instance.context.data.get( - # "version", pype.get_version_from_path(nuke.root().name())) + # get version to instance for integration + instance.data['version'] = instance.context.data["version"] self.log.debug('Write Version: %s' % instance.data('version')) From 682bb996abe3428511b5a362a6fad1f39c9378ae Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 28 Feb 2020 12:27:40 +0100 Subject: [PATCH 343/434] fix(global): adding 2d compatibility --- .../global/publish/submit_publish_job.py | 55 ++++++++++++------- 1 file changed, 34 insertions(+), 21 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 29dce58101..e4151d2317 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -131,6 +131,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): - publishJobState (str, Optional): "Active" or "Suspended" This defaults to "Suspended" + - expectedFiles (list or dict): explained bellow + """ label = "Submit image sequence jobs to Deadline or Muster" @@ -166,7 +168,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): instance_transfer = { "slate": ["slateFrame"], "review": ["lutPath"], - "render.farm": ["bakeScriptPath", "bakeRenderPath", "bakeWriteNodeName"] + "render.farm": ["bakeScriptPath", "bakeRenderPath", + "bakeWriteNodeName", "version"] } # list of family names to transfer to new family if present @@ -384,13 +387,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "tags": ["review"] if preview else [] } - # add tags - if preview: - if "ftrack" not in new_instance["families"]: - if os.environ.get("FTRACK_SERVER"): - new_instance["families"].append("ftrack") - if "review" not in new_instance["families"]: - new_instance["families"].append("review") + self._solve_families(new_instance, preview) new_instance["representations"] = [rep] @@ -399,6 +396,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if new_instance.get("extendFrames", False): self._copy_extend_frames(new_instance, rep) instances.append(new_instance) + return instances def _get_representations(self, instance, exp_files): @@ -419,6 +417,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): start = int(instance.get("frameStart")) end = int(instance.get("frameEnd")) cols, rem = clique.assemble(exp_files) + bake_render_path = instance.get("bakeRenderPath") + # create representation for every collected sequence for c in cols: ext = c.tail.lstrip(".") @@ -435,8 +435,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): preview = True break break + + if bake_render_path: + preview = False + rep = { - "name": str(c), + "name": ext, "ext": ext, "files": [os.path.basename(f) for f in list(c)], "frameStart": start, @@ -450,32 +454,41 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): representations.append(rep) - families = instance.get("families") - # if we have one representation with preview tag - # flag whole instance for review and for ftrack - if preview: - if "ftrack" not in families: - if os.environ.get("FTRACK_SERVER"): - families.append("ftrack") - if "review" not in families: - families.append("review") - instance["families"] = families + self._solve_families(instance, preview) # add reminders as representations for r in rem: ext = r.split(".")[-1] rep = { - "name": r, + "name": ext, "ext": ext, "files": os.path.basename(r), "stagingDir": os.path.dirname(r), "anatomy_template": "publish", } - + if r in bake_render_path: + rep.update({ + "anatomy_template": "render", + "tags": ["review", "preview"] + }) + # solve families with `preview` attributes + self._solve_families(instance, True) representations.append(rep) return representations + def _solve_families(self, instance, preview=False): + families = instance.get("families") + # if we have one representation with preview tag + # flag whole instance for review and for ftrack + if preview: + if "ftrack" not in families: + if os.environ.get("FTRACK_SERVER"): + families.append("ftrack") + if "review" not in families: + families.append("review") + instance["families"] = families + def process(self, instance): """ Detect type of renderfarm submission and create and post dependend job From 833ba22ecfc101b8e14250f125d914e4a3ae0147 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 28 Feb 2020 12:28:02 +0100 Subject: [PATCH 344/434] fix(global): failing due missing tag in representation --- pype/plugins/global/publish/extract_jpeg.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 0b0e839529..9ad6a15dfe 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -27,8 +27,9 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): representations_new = representations[:] for repre in representations: + tags = repre.get("tags", []) self.log.debug(repre) - valid = 'review' in repre['tags'] or "thumb-nuke" in repre['tags'] + valid = 'review' in tags or "thumb-nuke" in tags if not valid: continue From 295208279f86fad583b211143329c1e47df489eb Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 28 Feb 2020 12:28:26 +0100 Subject: [PATCH 345/434] fix(global): version should be number --- pype/plugins/global/publish/collect_scene_version.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/collect_scene_version.py b/pype/plugins/global/publish/collect_scene_version.py index 2844a695e2..02e913199b 100644 --- a/pype/plugins/global/publish/collect_scene_version.py +++ b/pype/plugins/global/publish/collect_scene_version.py @@ -21,7 +21,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin): if '' in filename: return - rootVersion = pype.get_version_from_path(filename) + rootVersion = int(pype.get_version_from_path(filename)) context.data['version'] = rootVersion - + self.log.info("{}".format(type(rootVersion))) self.log.info('Scene Version: %s' % context.data.get('version')) From 71a3e111ef9db6ba6b98cc7088b2effe556eba6e Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 28 Feb 2020 12:36:42 +0100 Subject: [PATCH 346/434] feat(global): improving logic --- pype/plugins/global/publish/validate_version.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/validate_version.py b/pype/plugins/global/publish/validate_version.py index c484c517bb..acc4923e7a 100644 --- a/pype/plugins/global/publish/validate_version.py +++ b/pype/plugins/global/publish/validate_version.py @@ -10,9 +10,14 @@ class ValidateVersion(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder label = "Validate Version" + hosts = ["nuke", "maya", "blender"] def process(self, instance): - version = int(instance.data.get("version")) - last_version = int(instance.data.get("lastVersion")) + version = int(instance.data.get("version") + latest_version = int(instance.data.get("latestVersion", 0)) - assert (version != last_version), "This workfile version is already in published: database: `{0}`, workfile: `{1}`".format(last_version, version) + assert (version != latest_version), ("Version `{0}` that you are" + " trying to publish, already" + " exists in the" + " database `{1}`.").format( + version, latest_version) From c2da6a20ec7e73ab8e3ead4c91ca614085260b25 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 28 Feb 2020 14:51:06 +0100 Subject: [PATCH 347/434] making sure previous version cannot be overwritten --- .../plugins/global/publish/validate_version.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/pype/plugins/global/publish/validate_version.py b/pype/plugins/global/publish/validate_version.py index acc4923e7a..4585e5a008 100644 --- a/pype/plugins/global/publish/validate_version.py +++ b/pype/plugins/global/publish/validate_version.py @@ -1,4 +1,4 @@ -import pyblish +import pyblish.api class ValidateVersion(pyblish.api.InstancePlugin): @@ -13,11 +13,13 @@ class ValidateVersion(pyblish.api.InstancePlugin): hosts = ["nuke", "maya", "blender"] def process(self, instance): - version = int(instance.data.get("version") - latest_version = int(instance.data.get("latestVersion", 0)) + version = instance.data.get("version") + latest_version = instance.data.get("latestVersion") - assert (version != latest_version), ("Version `{0}` that you are" - " trying to publish, already" - " exists in the" - " database `{1}`.").format( - version, latest_version) + if latest_version is not None: + msg = ("Version `{0}` that you are" + " trying to publish, already" + " exists in the" + " database.").format( + version, latest_version) + assert (int(version) > int(latest_version)), msg From 8997da158a85ca7465e30e0735d21b58b07b8141 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 28 Feb 2020 14:59:10 +0100 Subject: [PATCH 348/434] install project specific plugins (publish, load and create) found in {PYPE_PROJECT_PLUGINS}/{project_name} --- pype/__init__.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/pype/__init__.py b/pype/__init__.py index 89c653bf6f..0177c27596 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -11,6 +11,7 @@ log = logging.getLogger(__name__) __version__ = "2.5.0" +PROJECT_PLUGINS_PATH = os.environ.get("PYPE_PROJECT_PLUGINS") PACKAGE_DIR = os.path.dirname(__file__) PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") @@ -72,6 +73,18 @@ def install(): pyblish.register_discovery_filter(filter_pyblish_plugins) avalon.register_plugin_path(avalon.Loader, LOAD_PATH) + # Register project specific plugins + project_name = os.environ.get("AVALON_PROJECT") + if PROJECT_PLUGINS_PATH and project_name: + for path in PROJECT_PLUGINS_PATH.split(os.pathsep): + if not path: + continue + plugin_path = os.path.join(path, project_name) + if os.path.exists(plugin_path): + pyblish.register_plugin_path(plugin_path) + avalon.register_plugin_path(avalon.Loader, plugin_path) + avalon.register_plugin_path(avalon.Creator, plugin_path) + # apply monkey patched discover to original one avalon.discover = patched_discover From 9ae412bff6fddcd2b64c7b542c81369345e07f9e Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 28 Feb 2020 15:14:20 +0100 Subject: [PATCH 349/434] use subdict plugins for project specific plugins --- pype/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/__init__.py b/pype/__init__.py index 0177c27596..4858441080 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -79,7 +79,7 @@ def install(): for path in PROJECT_PLUGINS_PATH.split(os.pathsep): if not path: continue - plugin_path = os.path.join(path, project_name) + plugin_path = os.path.join(path, project_name, "plugins") if os.path.exists(plugin_path): pyblish.register_plugin_path(plugin_path) avalon.register_plugin_path(avalon.Loader, plugin_path) From c765c02115c4833a7667dc93fbdeec07dd6a2ad1 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 28 Feb 2020 15:27:57 +0100 Subject: [PATCH 350/434] standalone publish allow project specific plugins --- pype/standalonepublish/__init__.py | 2 ++ pype/standalonepublish/publish.py | 28 +++++++++++++++---- .../standalonepublish_module.py | 15 +++------- 3 files changed, 29 insertions(+), 16 deletions(-) diff --git a/pype/standalonepublish/__init__.py b/pype/standalonepublish/__init__.py index c7be80f189..8e615afbea 100644 --- a/pype/standalonepublish/__init__.py +++ b/pype/standalonepublish/__init__.py @@ -1,3 +1,5 @@ +PUBLISH_PATHS = [] + from .standalonepublish_module import StandAlonePublishModule from .app import ( show, diff --git a/pype/standalonepublish/publish.py b/pype/standalonepublish/publish.py index fcbb6e137d..045b3d590e 100644 --- a/pype/standalonepublish/publish.py +++ b/pype/standalonepublish/publish.py @@ -5,14 +5,14 @@ import tempfile import random import string -from avalon import io -from avalon import api as avalon +from avalon import io, api from avalon.tools import publish as av_publish import pype from pypeapp import execute import pyblish.api +from . import PUBLISH_PATHS def set_context(project, asset, task, app): @@ -31,7 +31,6 @@ def set_context(project, asset, task, app): os.environ["AVALON_TASK"] = task io.Session["AVALON_TASK"] = task - io.install() av_project = io.find_one({'type': 'project'}) @@ -76,7 +75,7 @@ def avalon_api_publish(data, gui=True): io.install() # Create hash name folder in temp - chars = "".join( [random.choice(string.ascii_letters) for i in range(15)] ) + chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) staging_dir = tempfile.mkdtemp(chars) # create also json and fill with data @@ -105,8 +104,27 @@ def avalon_api_publish(data, gui=True): def cli_publish(data, gui=True): io.install() + pyblish.api.deregister_all_plugins() + # Registers Global pyblish plugins + pype.install() + # Registers Standalone pyblish plugins + for path in PUBLISH_PATHS: + pyblish.api.register_plugin_path(path) + + project_plugins_paths = os.environ.get("PYPE_PROJECT_PLUGINS") + project_name = os.environ["AVALON_PROJECT"] + if project_plugins_paths and project_name: + for path in project_plugins_paths.split(os.pathsep): + if not path: + continue + plugin_path = os.path.join(path, project_name, "plugins") + if os.path.exists(plugin_path): + pyblish.api.register_plugin_path(plugin_path) + api.register_plugin_path(api.Loader, plugin_path) + api.register_plugin_path(api.Creator, plugin_path) + # Create hash name folder in temp - chars = "".join( [random.choice(string.ascii_letters) for i in range(15)] ) + chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) staging_dir = tempfile.mkdtemp(chars) # create json for return data diff --git a/pype/standalonepublish/standalonepublish_module.py b/pype/standalonepublish/standalonepublish_module.py index 75c033e16b..64195bc271 100644 --- a/pype/standalonepublish/standalonepublish_module.py +++ b/pype/standalonepublish/standalonepublish_module.py @@ -2,16 +2,16 @@ import os from .app import show from .widgets import QtWidgets import pype -import pyblish.api +from . import PUBLISH_PATHS class StandAlonePublishModule: - PUBLISH_PATHS = [] def __init__(self, main_parent=None, parent=None): self.main_parent = main_parent self.parent_widget = parent - self.PUBLISH_PATHS.append(os.path.sep.join( + PUBLISH_PATHS.clear() + PUBLISH_PATHS.append(os.path.sep.join( [pype.PLUGINS_DIR, "standalonepublisher", "publish"] )) @@ -24,16 +24,9 @@ class StandAlonePublishModule: def process_modules(self, modules): if "FtrackModule" in modules: - self.PUBLISH_PATHS.append(os.path.sep.join( + PUBLISH_PATHS.append(os.path.sep.join( [pype.PLUGINS_DIR, "ftrack", "publish"] )) - def tray_start(self): - # Registers Global pyblish plugins - pype.install() - # Registers Standalone pyblish plugins - for path in self.PUBLISH_PATHS: - pyblish.api.register_plugin_path(path) - def show(self): show(self.main_parent, False) From 1944cbdd72fbf1f5699ac7e7a0be9ed163a04765 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 28 Feb 2020 16:43:31 +0100 Subject: [PATCH 351/434] action set attribute `is_published` of deleted asset versions to False --- .../actions/action_delete_old_versions.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py index bec21dae96..23ceb124e5 100644 --- a/pype/ftrack/actions/action_delete_old_versions.py +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -361,6 +361,24 @@ class DeleteOldVersions(BaseAction): self.dbcon.uninstall() + for entity in entities: + entity["is_published"] = False + + try: + session.commit() + + except Exception: + msg = ( + "Could not set `is_published` attribute to `False`" + " for selected AssetVersions." + ) + self.log.warning(msg, exc_info=True) + + return { + "success": False, + "message": msg + } + return True def delete_whole_dir_paths(self, dir_paths): From 3fba3e17ad44a6394419aca019ce67781fc590c0 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 28 Feb 2020 16:45:15 +0100 Subject: [PATCH 352/434] just commenting because last commit won't work --- .../actions/action_delete_old_versions.py | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py index 23ceb124e5..7b8b7c5617 100644 --- a/pype/ftrack/actions/action_delete_old_versions.py +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -361,23 +361,23 @@ class DeleteOldVersions(BaseAction): self.dbcon.uninstall() - for entity in entities: - entity["is_published"] = False - - try: - session.commit() - - except Exception: - msg = ( - "Could not set `is_published` attribute to `False`" - " for selected AssetVersions." - ) - self.log.warning(msg, exc_info=True) - - return { - "success": False, - "message": msg - } + # for entity in entities: + # entity["is_published"] = False + # + # try: + # session.commit() + # + # except Exception: + # msg = ( + # "Could not set `is_published` attribute to `False`" + # " for selected AssetVersions." + # ) + # self.log.warning(msg, exc_info=True) + # + # return { + # "success": False, + # "message": msg + # } return True From f63ec18f4eb677d1e06a9a91b325dd4a811e0fde Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 28 Feb 2020 17:11:07 +0100 Subject: [PATCH 353/434] first attemp possible solution --- .../actions/action_delete_old_versions.py | 74 +++++++++++++------ 1 file changed, 53 insertions(+), 21 deletions(-) diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py index 7b8b7c5617..f6a66318c9 100644 --- a/pype/ftrack/actions/action_delete_old_versions.py +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -167,8 +167,11 @@ class DeleteOldVersions(BaseAction): asset_versions_by_parent_id = collections.defaultdict(list) subset_names_by_asset_name = collections.defaultdict(list) + ftrack_assets_by_name = {} for entity in entities: - parent_ent = entity["asset"]["parent"] + ftrack_asset = entity["asset"] + + parent_ent = ftrack_asset["parent"] parent_ftrack_id = parent_ent["id"] parent_name = parent_ent["name"] @@ -183,9 +186,12 @@ class DeleteOldVersions(BaseAction): project = parent_ent["project"] # Collect subset names per asset - subset_name = entity["asset"]["name"] + subset_name = ftrack_asset["name"] subset_names_by_asset_name[parent_name].append(subset_name) + if subset_name not in ftrack_assets_by_name: + ftrack_assets_by_name[subset_name] = ftrack_asset + # Set Mongo collection project_name = project["full_name"] self.dbcon.Session["AVALON_PROJECT"] = project_name @@ -236,7 +242,6 @@ class DeleteOldVersions(BaseAction): def sort_func(ent): return int(ent["name"]) - last_versions_by_parent = collections.defaultdict(list) all_last_versions = [] for parent_id, _versions in versions_by_parent.items(): for idx, version in enumerate( @@ -244,7 +249,6 @@ class DeleteOldVersions(BaseAction): ): if idx >= versions_count: break - last_versions_by_parent[parent_id].append(version) all_last_versions.append(version) self.log.debug("Collected versions ({})".format(len(versions))) @@ -253,6 +257,11 @@ class DeleteOldVersions(BaseAction): for version in all_last_versions: versions.remove(version) + # Update versions_by_parent without filtered versions + versions_by_parent = collections.defaultdict(list) + for ent in versions: + versions_by_parent[ent["parent"]].append(ent) + # Filter already deleted versions versions_to_pop = [] for version in versions: @@ -361,23 +370,46 @@ class DeleteOldVersions(BaseAction): self.dbcon.uninstall() - # for entity in entities: - # entity["is_published"] = False - # - # try: - # session.commit() - # - # except Exception: - # msg = ( - # "Could not set `is_published` attribute to `False`" - # " for selected AssetVersions." - # ) - # self.log.warning(msg, exc_info=True) - # - # return { - # "success": False, - # "message": msg - # } + # Set attribute `is_published` to `False` on ftrack AssetVersions + for subset_id, _versions in versions_by_parent.items(): + subset_name = None + for subset in subsets: + if subset["_id"] == subset_id: + subset_name = subset["name"] + break + + if subset_name is None: + self.log.warning( + "Subset with ID `{}` was not found.".format(str(subset_id)) + ) + continue + + ftrack_asset = ftrack_assets_by_name.get(subset_name) + if not ftrack_asset: + self.log.warning(( + "Could not find Ftrack asset with name `{}`" + ).format(subset_name)) + continue + + version_numbers = [int(ver["name"]) for ver in _versions] + for version in ftrack_asset["versions"]: + if int(version["version"]) in version_numbers: + version["is_published"] = False + + try: + session.commit() + + except Exception: + msg = ( + "Could not set `is_published` attribute to `False`" + " for selected AssetVersions." + ) + self.log.warning(msg, exc_info=True) + + return { + "success": False, + "message": msg + } return True From 228c0ed19083f171a84b1743fc70a5796fd517ed Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 28 Feb 2020 17:27:41 +0100 Subject: [PATCH 354/434] bugfix: missing icons in maya shelf --- pype/maya/lib.py | 19 +++++++++++++------ setup/maya/userSetup.py | 5 ++++- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 6ea108e801..a06810ea94 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2399,15 +2399,19 @@ class shelf(): if not item.get('command'): item['command'] = self._null if item['type'] == 'button': - self.addButon(item['name'], command=item['command']) + self.addButon(item['name'], + command=item['command'], + icon=item['icon']) if item['type'] == 'menuItem': self.addMenuItem(item['parent'], item['name'], - command=item['command']) + command=item['command'], + icon=item['icon']) if item['type'] == 'subMenu': self.addMenuItem(item['parent'], item['name'], - command=item['command']) + command=item['command'], + icon=item['icon']) def addButon(self, label, icon="commandButton.png", command=_null, doubleCommand=_null): @@ -2417,7 +2421,8 @@ class shelf(): ''' cmds.setParent(self.name) if icon: - icon = self.iconPath + icon + icon = os.path.join(self.iconPath, icon) + print(icon) cmds.shelfButton(width=37, height=37, image=icon, label=label, command=command, dcc=doubleCommand, imageOverlayLabel=label, olb=self.labelBackground, @@ -2429,7 +2434,8 @@ class shelf(): double click command and image. ''' if icon: - icon = self.iconPath + icon + icon = os.path.join(self.iconPath, icon) + print(icon) return cmds.menuItem(p=parent, label=label, c=command, i="") def addSubMenu(self, parent, label, icon=None): @@ -2438,7 +2444,8 @@ class shelf(): the specified parent popup menu. ''' if icon: - icon = self.iconPath + icon + icon = os.path.join(self.iconPath, icon) + print(icon) return cmds.menuItem(p=parent, label=label, i=icon, subMenu=1) def _cleanOldShelf(self): diff --git a/setup/maya/userSetup.py b/setup/maya/userSetup.py index b419e9d27e..4f4aed36b7 100644 --- a/setup/maya/userSetup.py +++ b/setup/maya/userSetup.py @@ -14,12 +14,15 @@ shelf_preset = presets['maya'].get('project_shelf') if shelf_preset: project = os.environ["AVALON_PROJECT"] + icon_path = os.path.join(os.environ['PYPE_PROJECT_SCRIPTS'], project,"icons") + icon_path = os.path.abspath(icon_path) + for i in shelf_preset['imports']: import_string = "from {} import {}".format(project, i) print(import_string) exec(import_string) -cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], preset=shelf_preset)") +cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)") print("finished PYPE usersetup") From 3a3774c3baa58da245011d52ede69f5935519767 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 28 Feb 2020 17:31:27 +0100 Subject: [PATCH 355/434] fix(nuke): thumbnail to publish on farm --- pype/plugins/nuke/publish/extract_thumbnail.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nuke/publish/extract_thumbnail.py b/pype/plugins/nuke/publish/extract_thumbnail.py index 55ba34a0d4..88ea78e623 100644 --- a/pype/plugins/nuke/publish/extract_thumbnail.py +++ b/pype/plugins/nuke/publish/extract_thumbnail.py @@ -116,7 +116,7 @@ class ExtractThumbnail(pype.api.Extractor): write_node["raw"].setValue(1) write_node.setInput(0, previous_node) temporary_nodes.append(write_node) - tags = ["thumbnail"] + tags = ["thumbnail", "publish_on_farm"] # retime for first_frame = int(last_frame) / 2 From 6f545cec66e7c60a60b8a5a9d0d415d6bc69b2d2 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 28 Feb 2020 17:42:14 +0100 Subject: [PATCH 356/434] fix(nk): adding handles to instance data --- pype/plugins/nuke/publish/collect_writes.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 3882ed0b32..76c2e8fa75 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -111,7 +111,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "outputDir": output_dir, "ext": ext, "label": label, - "handles": handles, + "handleStart": handle_start, + "handleEnd": handle_end, "frameStart": first_frame, "frameEnd": last_frame, "outputType": output_type, From d9f08df58918295abc7d63e91a3b287c0e7a888b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 28 Feb 2020 17:42:41 +0100 Subject: [PATCH 357/434] fix(nk): removing obsolete plugin --- .../nuke/publish/collect_script_version.py | 22 ------------------- 1 file changed, 22 deletions(-) delete mode 100644 pype/plugins/nuke/publish/collect_script_version.py diff --git a/pype/plugins/nuke/publish/collect_script_version.py b/pype/plugins/nuke/publish/collect_script_version.py deleted file mode 100644 index 9a6b5bf572..0000000000 --- a/pype/plugins/nuke/publish/collect_script_version.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import pype.api as pype -import pyblish.api - - -class CollectScriptVersion(pyblish. api.ContextPlugin): - """Collect Script Version.""" - - order = pyblish.api.CollectorOrder - label = "Collect Script Version" - hosts = [ - "nuke", - "nukeassist" - ] - - def process(self, context): - file_path = context.data["currentFile"] - base_name = os.path.basename(file_path) - # get version string - version = pype.get_version_from_path(base_name) - - context.data['version'] = version From 391861da419d112df89eb8cd99a9a15ee40aa712 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 28 Feb 2020 17:43:22 +0100 Subject: [PATCH 358/434] fix(global): improving representation submition --- .../global/publish/submit_publish_job.py | 29 ++++++++++++++++--- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index e4151d2317..80b3137673 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -468,8 +468,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): } if r in bake_render_path: rep.update({ + "fps": instance.get("fps"), "anatomy_template": "render", - "tags": ["review", "preview"] + "tags": ["review", "delete"] }) # solve families with `preview` attributes self._solve_families(instance, True) @@ -498,7 +499,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): :param instance: Instance data :type instance: dict """ - data = instance.data.copy() context = instance.context self.context = context @@ -531,10 +531,19 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): start = instance.data.get("frameStart") if start is None: start = context.data["frameStart"] + end = instance.data.get("frameEnd") if end is None: end = context.data["frameEnd"] + handle_start = instance.data.get("handleStart") + if handle_start is None: + handle_start = context.data["handleStart"] + + handle_end = instance.data.get("handleEnd") + if handle_end is None: + handle_end = context.data["handleEnd"] + if data.get("extendFrames", False): start, end = self._extend_frames( asset, @@ -563,7 +572,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "asset": asset, "frameStart": start, "frameEnd": end, - "fps": data.get("fps", 25), + "handleStart": handle_start, + "handleEnd": handle_end, + "fps": data["fps"], "source": source, "extendFrames": data.get("extendFrames"), "overrideExistingFrame": data.get("overrideExistingFrame"), @@ -584,6 +595,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): for v in values: instance_skeleton_data[v] = instance.data.get(v) + # look into instance data if representations are not having any + # which are having tag `publish_on_farm` and include them + for r in instance.data.get("representations", []): + if "publish_on_farm" in r.get("tags"): + # create representations attribute of not there + if "representations" not in instance_skeleton_data.keys(): + instance_skeleton_data["representations"] = [] + + instance_skeleton_data["representations"].append(r) + instances = None assert data.get("expectedFiles"), ("Submission from old Pype version" " - missing expectedFiles") @@ -657,7 +678,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): data.get("expectedFiles") ) - if "representations" not in instance_skeleton_data: + if "representations" not in instance_skeleton_data.keys(): instance_skeleton_data["representations"] = [] # add representation From 81662d4edd50dd9a40965236ff186c25cee6c2ef Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 28 Feb 2020 17:43:41 +0100 Subject: [PATCH 359/434] fix(nuke): improving loaders --- pype/plugins/nuke/load/load_mov.py | 10 +++++++--- pype/plugins/nuke/load/load_sequence.py | 5 ++++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py index 77346a82a4..88e65156cb 100644 --- a/pype/plugins/nuke/load/load_mov.py +++ b/pype/plugins/nuke/load/load_mov.py @@ -112,6 +112,7 @@ class LoadMov(api.Loader): ) version = context['version'] version_data = version.get("data", {}) + repr_id = context["representation"]["_id"] orig_first = version_data.get("frameStart") orig_last = version_data.get("frameEnd") @@ -120,12 +121,16 @@ class LoadMov(api.Loader): first = orig_first - diff last = orig_last - diff - handle_start = version_data.get("handleStart") - handle_end = version_data.get("handleEnd") + handle_start = version_data.get("handleStart", 0) + handle_end = version_data.get("handleEnd", 0) colorspace = version_data.get("colorspace") repr_cont = context["representation"]["context"] + self.log.debug( + "Representation id `{}` ".format(repr_id)) + + context["representation"]["_id"] # create handles offset (only to last, because of mov) last += handle_start + handle_end # offset should be with handles so it match orig frame range @@ -138,7 +143,6 @@ class LoadMov(api.Loader): file = self.fname if not file: - repr_id = context["representation"]["_id"] self.log.warning( "Representation id `{}` is failing to load".format(repr_id)) return diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index db77c53aff..690f074c3f 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -86,8 +86,11 @@ class LoadSequence(api.Loader): version = context['version'] version_data = version.get("data", {}) - + repr_id = context["representation"]["_id"] + self.log.info("version_data: {}\n".format(version_data)) + self.log.debug( + "Representation id `{}` ".format(repr_id)) self.first_frame = int(nuke.root()["first_frame"].getValue()) self.handle_start = version_data.get("handleStart", 0) From 4e49c8b40b4c3a596c1c41e94b4b852c27b515ed Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 2 Mar 2020 14:32:22 +0100 Subject: [PATCH 360/434] feat(nks): creating track item on metadata time --- pype/nukestudio/lib.py | 53 ++++++++++++++++++------------------------ 1 file changed, 22 insertions(+), 31 deletions(-) diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py index 6caae770e6..fad6d32ffc 100644 --- a/pype/nukestudio/lib.py +++ b/pype/nukestudio/lib.py @@ -561,34 +561,28 @@ class ClipLoader: self.active_bin = self._make_project_bin(self.data["binPath"]) log.debug("__ active_bin: `{}`".format(self.active_bin)) + # check if slate is included + slate_on = next((f for f in self.context["version"]["data"]["families"] + if "slate" in f), None) + # create mediaItem in active project bin # create clip media media = hiero.core.MediaSource(self.data["path"]) - media_in = int(media.startTime()) media_duration = int(media.duration()) - log.debug("__ media_in: `{}`".format(media_in)) - log.debug("__ media_duration: `{}`".format(media_duration)) - handle_start = self.data["assetData"]["handleStart"] - handle_end = self.data["assetData"]["handleEnd"] + handle_start = int(self.data["assetData"]["handleStart"]) + handle_end = int(self.data["assetData"]["handleEnd"]) + + clip_in = int(self.data["assetData"]["clipIn"]) + clip_out = int(self.data["assetData"]["clipOut"]) + + # calculate slate differences + if slate_on: + media_duration -= 1 + handle_start += 1 fps = self.data["assetData"]["fps"] - if media_in: - source_in = int(media_in + handle_start) - else: - source_in = int(self.data["assetData"]["frameStart"] + handle_start) - - if media_duration: - source_out = int((media_in + media_duration - 1) - handle_end) - else: - source_out = int(self.data["assetData"]["frameEnd"] - handle_end) - - log.debug("__ source_in: `{}`".format(source_in)) - log.debug("__ source_out: `{}`".format(source_out)) - log.debug("__ handle_start: `{}`".format(handle_start)) - log.debug("__ handle_end: `{}`".format(handle_end)) - # create Clip from Media _clip = hiero.core.Clip(media) _clip.setName(self.data["name"]) @@ -609,21 +603,18 @@ class ClipLoader: clip = version.item() # add to track as clip item - trackItem = hiero.core.TrackItem( + track_item = hiero.core.TrackItem( self.data["name"], hiero.core.TrackItem.kVideo) - log.info("clip: `{}`".format(clip)) - log.info("_clip: `{}`".format(_clip)) - log.info("clip.sourceIn(): `{}`".format(clip.sourceIn())) - log.info("clip.sourceOut(): `{}`".format(clip.sourceOut())) + track_item.setSource(clip) - trackItem.setSource(clip) - # trackItem.setTimelineIn(self.data["assetData"]["clipIn"]) - trackItem.setSourceIn(5) - # trackItem.setTimelineOut(self.data["assetData"]["clipOut"]) - trackItem.setSourceOut(10) + track_item.setSourceIn(handle_start) + track_item.setTimelineIn(clip_in) - self.active_track.addTrackItem(trackItem) + track_item.setSourceOut(media_duration - handle_end) + track_item.setTimelineOut(clip_out) + + self.active_track.addTrackItem(track_item) log.info("Loading clips: `{}`".format(self.data["name"])) From ec6f10f5bf524f8b60157939a3187fbdf216edbe Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 2 Mar 2020 14:33:17 +0100 Subject: [PATCH 361/434] feat(nks): printing context for better dev --- .../nukestudio/load/load_sequences_to_timeline_asset_origin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py index 62ed2e1271..523bcf91eb 100644 --- a/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py +++ b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py @@ -26,7 +26,7 @@ class LoadSequencesToTimelineAssetOrigin(api.Loader): "hieroWorkfileName": hiero.ui.activeProject().name() }) - self.log.info("data: `{}`".format(data)) + self.log.develop("_ context: `{}`".format(context)) clip_loader = lib.ClipLoader(self, context, **data) clip_loader.load() From b4c88a5c30e37f233f7abc6d4e2f7da69c016c58 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 2 Mar 2020 18:30:10 +0100 Subject: [PATCH 362/434] fix(nks): debug log wrong argument --- .../nukestudio/load/load_sequences_to_timeline_asset_origin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py index 523bcf91eb..09d9b1a4bb 100644 --- a/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py +++ b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py @@ -26,7 +26,7 @@ class LoadSequencesToTimelineAssetOrigin(api.Loader): "hieroWorkfileName": hiero.ui.activeProject().name() }) - self.log.develop("_ context: `{}`".format(context)) + self.log.debug("_ context: `{}`".format(context)) clip_loader = lib.ClipLoader(self, context, **data) clip_loader.load() From 1b219b5cfb898be8528ef3726d427ad637603d95 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 2 Mar 2020 18:30:33 +0100 Subject: [PATCH 363/434] feat(nks): dealing with track names and correct timing --- pype/nukestudio/lib.py | 55 ++++++++++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 15 deletions(-) diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py index fad6d32ffc..e7b7232938 100644 --- a/pype/nukestudio/lib.py +++ b/pype/nukestudio/lib.py @@ -414,7 +414,8 @@ class ClipLoader: asset = str(repr_cntx["asset"]) subset = str(repr_cntx["subset"]) representation = str(repr_cntx["representation"]) - self.data["name"] = "_".join([asset, subset, representation]) + self.data["clip_name"] = "_".join([asset, subset, representation]) + self.data["track_name"] = "_".join([subset, representation]) # gets file path file = self.cls.fname @@ -545,13 +546,20 @@ class ClipLoader: def _get_active_track(self, track): if not track: - track_name = self.data["name"] + track_name = self.data["track_name"] + else: + track_name = track.name() - if track_name not in self.active_sequence.videoTracks(): - track = hiero.core.VideoTrack(track_name) - self.active_sequence.addTrack(track) + track_pass = next( + (t for t in self.active_sequence.videoTracks() + if t.name() in track_name), None + ) - return track + if not track_pass: + track_pass = hiero.core.VideoTrack(track_name) + self.active_sequence.addTrack(track_pass) + + return track_pass def load(self): log.debug("__ active_project: `{}`".format(self.active_project)) @@ -561,9 +569,8 @@ class ClipLoader: self.active_bin = self._make_project_bin(self.data["binPath"]) log.debug("__ active_bin: `{}`".format(self.active_bin)) - # check if slate is included - slate_on = next((f for f in self.context["version"]["data"]["families"] - if "slate" in f), None) + log.debug("__ version.data: `{}`".format( + self.context["version"]["data"])) # create mediaItem in active project bin # create clip media @@ -576,6 +583,22 @@ class ClipLoader: clip_in = int(self.data["assetData"]["clipIn"]) clip_out = int(self.data["assetData"]["clipOut"]) + log.debug("__ media_duration: `{}`".format(media_duration)) + log.debug("__ handle_start: `{}`".format(handle_start)) + log.debug("__ handle_end: `{}`".format(handle_end)) + log.debug("__ clip_in: `{}`".format(clip_in)) + log.debug("__ clip_out: `{}`".format(clip_out)) + + # check if slate is included + # either in version data families or by calculating frame diff + slate_on = next( + (f for f in self.context["version"]["data"]["families"] + if "slate" in f), + None) or bool((( + clip_in - clip_out + 1) + handle_start + handle_end + ) - media_duration) + + log.debug("__ slate_on: `{}`".format(slate_on)) # calculate slate differences if slate_on: media_duration -= 1 @@ -585,26 +608,28 @@ class ClipLoader: # create Clip from Media _clip = hiero.core.Clip(media) - _clip.setName(self.data["name"]) + _clip.setName(self.data["clip_name"]) # add Clip to bin if not there yet - if self.data["name"] not in [b.name() for b in self.active_bin.items()]: + if self.data["clip_name"] not in [ + b.name() + for b in self.active_bin.items()]: binItem = hiero.core.BinItem(_clip) self.active_bin.addItem(binItem) _source = next((item for item in self.active_bin.items() - if self.data["name"] in item.name()), None) + if self.data["clip_name"] in item.name()), None) if not _source: log.warning("Problem with created Source clip: `{}`".format( - self.data["name"])) + self.data["clip_name"])) version = next((s for s in _source.items()), None) clip = version.item() # add to track as clip item track_item = hiero.core.TrackItem( - self.data["name"], hiero.core.TrackItem.kVideo) + self.data["clip_name"], hiero.core.TrackItem.kVideo) track_item.setSource(clip) @@ -616,7 +641,7 @@ class ClipLoader: self.active_track.addTrackItem(track_item) - log.info("Loading clips: `{}`".format(self.data["name"])) + log.info("Loading clips: `{}`".format(self.data["clip_name"])) def create_nk_workfile_clips(nk_workfiles, seq=None): From ce03cacd8b5fcf6976071b46218986179123adef Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 2 Mar 2020 22:37:34 +0100 Subject: [PATCH 364/434] remove useless outputDirectory0 --- pype/plugins/nuke/publish/submit_nuke_deadline.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index d69d5ba8bc..ba43ed574b 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -125,8 +125,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # Optional, enable double-click to preview rendered # frames from Deadline Monitor - "OutputFilename0": output_filename_0.replace("\\", "/"), - "OutputDirectory0": output_directory_0 + "OutputFilename0": output_filename_0.replace("\\", "/") }, "PluginInfo": { From 073855cb5f4f203447a71338d0a961f0745ddc08 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 2 Mar 2020 22:37:55 +0100 Subject: [PATCH 365/434] add all outputs to deadline OutputFilename --- .../maya/publish/submit_maya_deadline.py | 24 ++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index 2f236be424..bd8497152e 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -1,6 +1,7 @@ import os import json import getpass +import clique from maya import cmds @@ -242,7 +243,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # Optional, enable double-click to preview rendered # frames from Deadline Monitor - "OutputFilename0": output_filename_0.replace("\\", "/"), + "OutputDirectory0": os.path.dirname(output_filename_0), + "OutputFilename0": output_filename_0.replace("\\", "/") }, "PluginInfo": { # Input @@ -272,6 +274,26 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "AuxFiles": [] } + exp = instance.data.get("expectedFiles") + + OutputFilenames = {} + expIndex = 0 + + if isinstance(exp[0], dict): + # we have aovs and we need to iterate over them + for aov, files in exp[0].items(): + col = clique.assemble(files)[0][0] + outputFile = col.format('{head}{padding}{tail}') + payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile + OutputFilenames[expIndex] = outputFile + expIndex += 1 + else: + col = clique.assemble(files)[0][0] + outputFile = col.format('{head}{padding}{tail}') + payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile + # OutputFilenames[expIndex] = outputFile + + # We need those to pass them to pype for it to set correct context keys = [ "FTRACK_API_KEY", From a7605c6c502ccf857e273e6b772f2b9f1f80bf74 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 2 Mar 2020 22:38:17 +0100 Subject: [PATCH 366/434] add handles and fps to cg renders publishing --- pype/plugins/global/publish/submit_publish_job.py | 6 +++++- pype/plugins/maya/publish/collect_render.py | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 80b3137673..cb835bff11 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -544,6 +544,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): if handle_end is None: handle_end = context.data["handleEnd"] + fps = instance.data.get("fps") + if fps is None: + fps = context.data["fps"] + if data.get("extendFrames", False): start, end = self._extend_frames( asset, @@ -574,7 +578,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "frameEnd": end, "handleStart": handle_start, "handleEnd": handle_end, - "fps": data["fps"], + "fps": fps, "source": source, "extendFrames": data.get("extendFrames"), "overrideExistingFrame": data.get("overrideExistingFrame"), diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index 07eec4192f..f31198448b 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -220,6 +220,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin): layer=layer_name)), "renderer": self.get_render_attribute("currentRenderer", layer=layer_name), + "handleStart": context.data["assetEntity"]['data']['handleStart'], + "handleEnd": context.data["assetEntity"]['data']['handleEnd'], # instance subset "family": "renderlayer", From b075770388d5e0ec6b7cdd0981ea23ccff052137 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 2 Mar 2020 23:19:12 +0100 Subject: [PATCH 367/434] bump version --- pype/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/__init__.py b/pype/__init__.py index 4858441080..5cd9832558 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -9,7 +9,7 @@ from pypeapp import config import logging log = logging.getLogger(__name__) -__version__ = "2.5.0" +__version__ = "2.6.0" PROJECT_PLUGINS_PATH = os.environ.get("PYPE_PROJECT_PLUGINS") PACKAGE_DIR = os.path.dirname(__file__) From c795e0a3c8b6719f529b53eb00ac7fbc60bc2706 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 3 Mar 2020 11:26:39 +0100 Subject: [PATCH 368/434] tasks model has selectable items --- pype/tools/assetcreator/model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/tools/assetcreator/model.py b/pype/tools/assetcreator/model.py index b77ffa7a5d..3af1d77127 100644 --- a/pype/tools/assetcreator/model.py +++ b/pype/tools/assetcreator/model.py @@ -241,7 +241,7 @@ class TasksModel(TreeModel): self.endResetModel() def flags(self, index): - return QtCore.Qt.ItemIsEnabled + return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable def headerData(self, section, orientation, role): From ae256b25f9802c5aad426a63c877df477e91fd54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Tue, 3 Mar 2020 11:58:09 +0000 Subject: [PATCH 369/434] hotfix: path to metadata json was missing slash --- pype/plugins/global/publish/submit_publish_job.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index cb835bff11..027647a598 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -193,11 +193,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): metadata_path = os.path.normpath(metadata_path) mount_root = os.path.normpath(os.environ["PYPE_STUDIO_PROJECTS_MOUNT"]) - network_root = os.path.normpath( - os.environ["PYPE_STUDIO_PROJECTS_PATH"] - ) - + network_root = os.environ["PYPE_STUDIO_PROJECTS_PATH"] metadata_path = metadata_path.replace(mount_root, network_root) + metadata_path = os.path.normpath(metadata_path) # Generate the payload for Deadline submission payload = { From e53213e8762683ecd61e8034410818cdec787b18 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 3 Mar 2020 14:39:13 +0100 Subject: [PATCH 370/434] fix(nuke): adding deadline output_directory_0 --- pype/plugins/nuke/publish/submit_nuke_deadline.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index ba43ed574b..e7aae0199b 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -125,6 +125,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # Optional, enable double-click to preview rendered # frames from Deadline Monitor + "OutputDirectory0": output_directory_0, "OutputFilename0": output_filename_0.replace("\\", "/") }, From d1e739e699266321c67bdc758aed1ac17592240b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 3 Mar 2020 14:42:19 +0100 Subject: [PATCH 371/434] feat(global): adding OutputDirectory0 to dependent job revew --- pype/plugins/global/publish/submit_publish_job.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index cb835bff11..71ba279af9 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -209,7 +209,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "UserName": job["Props"]["User"], "Comment": instance.context.data.get("comment", ""), "Priority": job["Props"]["Pri"], - "Pool": self.deadline_pool + "Pool": self.deadline_pool, + "OutputDirectory0": output_dir }, "PluginInfo": { "Version": "3.6", From 3c020c478bad1f111353317a2d93d21f74354bb9 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 3 Mar 2020 18:38:12 +0100 Subject: [PATCH 372/434] fix(nk): adding deadline variables from presets --- pype/nuke/lib.py | 2 +- .../nuke/publish/submit_nuke_deadline.py | 22 +++++++++++++++---- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 6eb4da951c..dedc42fa1d 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -432,7 +432,7 @@ def add_deadline_tab(node): node.addKnob(nuke.Tab_Knob("Deadline")) knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size") - knob.setValue(1) + knob.setValue(0) node.addKnob(knob) knob = nuke.Int_Knob("deadlinePriority", "Priority") diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index ba43ed574b..ee7432e241 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -22,6 +22,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): families = ["render.farm"] optional = True + deadline_priority = 50 + deadline_pool = "" + deadline_pool_secondary = "" + deadline_chunk_size = 1 + def process(self, instance): node = instance[0] @@ -89,7 +94,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): jobname = "%s - %s" % (script_name, instance.name) output_filename_0 = self.preview_fname(render_path) - output_directory_0 = render_dir.replace("\\", "/") if not responce_data: responce_data = {} @@ -100,6 +104,15 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): except OSError: pass + # define chunk and priority + chunk_size = instance.data.get("deadlineChunkSize") + if chunk_size == 0: + chunk_size = self.deadline_chunk_size + + priority = instance.data.get("deadlinePriority") + if priority != 50: + priority = self.deadline_priority + payload = { "JobInfo": { # Top-level group name @@ -111,10 +124,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # Arbitrary username, for visualisation in Monitor "UserName": self._deadline_user, - "Priority": instance.data["deadlinePriority"], + "Priority": priority, + "ChunkSize": chunk_size, - "Pool": "2d", - "SecondaryPool": "2d", + "Pool": self.deadline_pool, + "SecondaryPool": self.deadline_pool_secondary, "Plugin": "Nuke", "Frames": "{start}-{end}".format( From e00ad0f3eb28ab89feec23af918fbde16e50d749 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 3 Mar 2020 18:39:31 +0100 Subject: [PATCH 373/434] fix(nk): removing already removed `OutputDirectory0` --- pype/plugins/nuke/publish/submit_nuke_deadline.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index e7aae0199b..0c0663c147 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -89,7 +89,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): jobname = "%s - %s" % (script_name, instance.name) output_filename_0 = self.preview_fname(render_path) - output_directory_0 = render_dir.replace("\\", "/") if not responce_data: responce_data = {} @@ -125,7 +124,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # Optional, enable double-click to preview rendered # frames from Deadline Monitor - "OutputDirectory0": output_directory_0, "OutputFilename0": output_filename_0.replace("\\", "/") }, From df80aa7088367a36fa487b91af551155a4fc8bad Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 3 Mar 2020 18:50:04 +0100 Subject: [PATCH 374/434] fix(global): integrate frame to representation was not correct --- pype/plugins/global/publish/integrate_new.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 2a9b813231..0d1606c67c 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -441,7 +441,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if sequence_repre and repre.get("frameStart"): representation['context']['frame'] = ( - src_padding_exp % int(repre.get("frameStart")) + dst_padding_exp % int(repre.get("frameStart")) ) self.log.debug("__ representation: {}".format(representation)) From 525e987427bc633a412d9477d12c1d6742e708ad Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Tue, 3 Mar 2020 18:50:39 +0100 Subject: [PATCH 375/434] fix(global): outputName on representation was only single file --- pype/plugins/global/publish/integrate_new.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 0d1606c67c..1d061af173 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -278,6 +278,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): stagingdir = repre['stagingDir'] if repre.get('anatomy_template'): template_name = repre['anatomy_template'] + if repre.get("outputName"): + template_data["output"] = repre['outputName'] template = os.path.normpath( anatomy.templates[template_name]["path"]) @@ -389,9 +391,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): template_data["representation"] = repre['ext'] - if repre.get("outputName"): - template_data["output"] = repre['outputName'] - src = os.path.join(stagingdir, fname) anatomy_filled = anatomy.format(template_data) template_filled = anatomy_filled[template_name]["path"] From a22ca2665502fadd5f776002a7cd0126cf8a2bff Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 4 Mar 2020 15:09:02 +0100 Subject: [PATCH 376/434] feat(glob,nk): old files/dirs remove --- .../_publish_unused/collect_deadline_user.py | 60 ------- .../global/_publish_unused/collect_json.py | 127 --------------- .../_publish_unused/collect_textures.py | 88 ---------- .../global/_publish_unused/extract_json.py | 51 ------ .../_publish_unused/extract_quicktime.py | 86 ---------- .../global/_publish_unused/transcode.py | 153 ------------------ pype/plugins/nuke/_load_unused/load_alembic | 0 .../plugins/nuke/_load_unused/load_camera_abc | 0 pype/plugins/nuke/_load_unused/load_camera_nk | 1 - pype/plugins/nuke/_load_unused/load_still | 1 - .../_publish_unused/collect_render_target.py | 46 ------ .../nuke/_publish_unused/submit_deadline.py | 147 ----------------- .../nuke/_publish_unused/test_instances.py | 24 --- .../_publish_unused/validate_nuke_settings.py | 68 -------- .../_publish_unused/validate_proxy_mode.py | 33 ---- setup/nuke/nuke_path/atom_server.py | 54 ------- setup/nuke/nuke_path/menu.py | 1 - 17 files changed, 940 deletions(-) delete mode 100644 pype/plugins/global/_publish_unused/collect_deadline_user.py delete mode 100644 pype/plugins/global/_publish_unused/collect_json.py delete mode 100644 pype/plugins/global/_publish_unused/collect_textures.py delete mode 100644 pype/plugins/global/_publish_unused/extract_json.py delete mode 100644 pype/plugins/global/_publish_unused/extract_quicktime.py delete mode 100644 pype/plugins/global/_publish_unused/transcode.py delete mode 100644 pype/plugins/nuke/_load_unused/load_alembic delete mode 100644 pype/plugins/nuke/_load_unused/load_camera_abc delete mode 100644 pype/plugins/nuke/_load_unused/load_camera_nk delete mode 100644 pype/plugins/nuke/_load_unused/load_still delete mode 100644 pype/plugins/nuke/_publish_unused/collect_render_target.py delete mode 100644 pype/plugins/nuke/_publish_unused/submit_deadline.py delete mode 100644 pype/plugins/nuke/_publish_unused/test_instances.py delete mode 100644 pype/plugins/nuke/_publish_unused/validate_nuke_settings.py delete mode 100644 pype/plugins/nuke/_publish_unused/validate_proxy_mode.py delete mode 100644 setup/nuke/nuke_path/atom_server.py diff --git a/pype/plugins/global/_publish_unused/collect_deadline_user.py b/pype/plugins/global/_publish_unused/collect_deadline_user.py deleted file mode 100644 index f4d13a0545..0000000000 --- a/pype/plugins/global/_publish_unused/collect_deadline_user.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -import subprocess - -import pyblish.api - -CREATE_NO_WINDOW = 0x08000000 - - -def deadline_command(cmd): - # Find Deadline - path = os.environ.get("DEADLINE_PATH", None) - assert path is not None, "Variable 'DEADLINE_PATH' must be set" - - executable = os.path.join(path, "deadlinecommand") - if os.name == "nt": - executable += ".exe" - assert os.path.exists( - executable), "Deadline executable not found at %s" % executable - assert cmd, "Must have a command" - - query = (executable, cmd) - - process = subprocess.Popen(query, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - creationflags=CREATE_NO_WINDOW) - out, err = process.communicate() - - return out - - -class CollectDeadlineUser(pyblish.api.ContextPlugin): - """Retrieve the local active Deadline user""" - - order = pyblish.api.CollectorOrder + 0.499 - label = "Deadline User" - - hosts = ['maya', 'fusion', 'nuke'] - families = [ - "renderlayer", - "saver.deadline", - "imagesequence" - ] - - - def process(self, context): - """Inject the current working file""" - user = None - try: - user = deadline_command("GetCurrentUserName").strip() - except: - self.log.warning("Deadline command seems not to be working") - - if not user: - self.log.warning("No Deadline user found. " - "Do you have Deadline installed?") - return - - self.log.info("Found Deadline user: {}".format(user)) - context.data['deadlineUser'] = user diff --git a/pype/plugins/global/_publish_unused/collect_json.py b/pype/plugins/global/_publish_unused/collect_json.py deleted file mode 100644 index dc5bfb9c81..0000000000 --- a/pype/plugins/global/_publish_unused/collect_json.py +++ /dev/null @@ -1,127 +0,0 @@ -import os -import json -import re - -import pyblish.api -import clique - - -class CollectJSON(pyblish.api.ContextPlugin): - """ Collecting the json files in current directory. """ - - label = "JSON" - order = pyblish.api.CollectorOrder - hosts = ['maya'] - - def version_get(self, string, prefix): - """ Extract version information from filenames. Code from Foundry"s - nukescripts.version_get() - """ - - regex = r"[/_.]{}\d+".format(prefix) - matches = re.findall(regex, string, re.IGNORECASE) - - if not len(matches): - msg = "No '_{}#' found in '{}'".format(prefix, string) - raise ValueError(msg) - return matches[-1:][0][1], re.search(r"\d+", matches[-1:][0]).group() - - def process(self, context): - current_file = context.data.get("currentFile", '') - # Skip if current file is not a directory - if not os.path.isdir(current_file): - return - - # Traverse directory and collect collections from json files. - instances = [] - for root, dirs, files in os.walk(current_file): - for f in files: - if f.endswith(".json"): - with open(os.path.join(root, f)) as json_data: - for data in json.load(json_data): - instances.append(data) - - # Validate instance based on supported families. - valid_families = ["img", "cache", "scene", "mov"] - valid_data = [] - for data in instances: - families = data.get("families", []) + [data["family"]] - family_type = list(set(families) & set(valid_families)) - if family_type: - valid_data.append(data) - - # Create existing output instance. - scanned_dirs = [] - files = [] - collections = [] - for data in valid_data: - if "collection" not in data.keys(): - continue - if data["collection"] is None: - continue - - instance_collection = clique.parse(data["collection"]) - - try: - version = self.version_get( - os.path.basename(instance_collection.format()), "v" - )[1] - except KeyError: - # Ignore any output that is not versioned - continue - - # Getting collections of all previous versions and current version - for count in range(1, int(version) + 1): - - # Generate collection - version_string = "v" + str(count).zfill(len(version)) - head = instance_collection.head.replace( - "v" + version, version_string - ) - collection = clique.Collection( - head=head.replace("\\", "/"), - padding=instance_collection.padding, - tail=instance_collection.tail - ) - collection.version = count - - # Scan collection directory - scan_dir = os.path.dirname(collection.head) - if scan_dir not in scanned_dirs and os.path.exists(scan_dir): - for f in os.listdir(scan_dir): - file_path = os.path.join(scan_dir, f) - files.append(file_path.replace("\\", "/")) - scanned_dirs.append(scan_dir) - - # Match files to collection and add - for f in files: - if collection.match(f): - collection.add(f) - - # Skip if no files were found in the collection - if not list(collection): - continue - - # Skip existing collections - if collection in collections: - continue - - instance = context.create_instance(name=data["name"]) - version = self.version_get( - os.path.basename(collection.format()), "v" - )[1] - - basename = os.path.basename(collection.format()) - instance.data["label"] = "{0} - {1}".format( - data["name"], basename - ) - - families = data["families"] + [data["family"]] - family = list(set(valid_families) & set(families))[0] - instance.data["family"] = family - instance.data["families"] = ["output"] - instance.data["collection"] = collection - instance.data["version"] = int(version) - instance.data["publish"] = False - - collections.append(collection) diff --git a/pype/plugins/global/_publish_unused/collect_textures.py b/pype/plugins/global/_publish_unused/collect_textures.py deleted file mode 100644 index c38e911033..0000000000 --- a/pype/plugins/global/_publish_unused/collect_textures.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -import re -import copy -from avalon import io -from pprint import pprint - -import pyblish.api -from avalon import api - - -texture_extensions = ['.tif', '.tiff', '.jpg', '.jpeg', '.tx', '.png', '.tga', - '.psd', '.dpx', '.hdr', '.hdri', '.exr', '.sxr', '.psb'] - - -class CollectTextures(pyblish.api.ContextPlugin): - """ - Gather all texture files in working directory, traversing whole structure. - """ - - order = pyblish.api.CollectorOrder - targets = ["texture"] - label = "Textures" - hosts = ["shell"] - - def process(self, context): - - if os.environ.get("PYPE_PUBLISH_PATHS"): - paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep) - else: - cwd = context.get("workspaceDir", os.getcwd()) - paths = [cwd] - - textures = [] - for path in paths: - for dir, subdir, files in os.walk(path): - textures.extend( - os.path.join(dir, x) for x in files - if os.path.splitext(x)[1].lower() in texture_extensions) - - self.log.info("Got {} texture files.".format(len(textures))) - if len(textures) < 1: - raise RuntimeError("no textures found.") - - asset_name = os.environ.get("AVALON_ASSET") - family = 'texture' - subset = 'Main' - - project = io.find_one({'type': 'project'}) - asset = io.find_one({ - 'type': 'asset', - 'name': asset_name - }) - - context.data['project'] = project - context.data['asset'] = asset - - for tex in textures: - self.log.info("Processing: {}".format(tex)) - name, ext = os.path.splitext(tex) - simple_name = os.path.splitext(os.path.basename(tex))[0] - instance = context.create_instance(simple_name) - - instance.data.update({ - "subset": subset, - "asset": asset_name, - "label": simple_name, - "name": simple_name, - "family": family, - "families": [family, 'ftrack'], - }) - instance.data['destination_list'] = list() - instance.data['representations'] = list() - instance.data['source'] = 'pype command' - - texture_data = {} - texture_data['anatomy_template'] = 'texture' - texture_data["ext"] = ext - texture_data["label"] = simple_name - texture_data["name"] = "texture" - texture_data["stagingDir"] = os.path.dirname(tex) - texture_data["files"] = os.path.basename(tex) - texture_data["thumbnail"] = False - texture_data["preview"] = False - - instance.data["representations"].append(texture_data) - self.log.info("collected instance: {}".format(instance.data)) - - self.log.info("All collected.") diff --git a/pype/plugins/global/_publish_unused/extract_json.py b/pype/plugins/global/_publish_unused/extract_json.py deleted file mode 100644 index 8aff324574..0000000000 --- a/pype/plugins/global/_publish_unused/extract_json.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import json -import datetime -import time - -import pyblish.api -import clique - - -class ExtractJSON(pyblish.api.ContextPlugin): - """ Extract all instances to a serialized json file. """ - - order = pyblish.api.IntegratorOrder - label = "JSON" - hosts = ['maya'] - - def process(self, context): - - workspace = os.path.join( - os.path.dirname(context.data["currentFile"]), "workspace", - "instances") - - if not os.path.exists(workspace): - os.makedirs(workspace) - - output_data = [] - for instance in context: - self.log.debug(instance['data']) - - data = {} - for key, value in instance.data.iteritems(): - if isinstance(value, clique.Collection): - value = value.format() - - try: - json.dumps(value) - data[key] = value - except KeyError: - msg = "\"{0}\"".format(value) - msg += " in instance.data[\"{0}\"]".format(key) - msg += " could not be serialized." - self.log.debug(msg) - - output_data.append(data) - - timestamp = datetime.datetime.fromtimestamp( - time.time()).strftime("%Y%m%d-%H%M%S") - filename = timestamp + "_instances.json" - - with open(os.path.join(workspace, filename), "w") as outfile: - outfile.write(json.dumps(output_data, indent=4, sort_keys=True)) diff --git a/pype/plugins/global/_publish_unused/extract_quicktime.py b/pype/plugins/global/_publish_unused/extract_quicktime.py deleted file mode 100644 index 76a920b798..0000000000 --- a/pype/plugins/global/_publish_unused/extract_quicktime.py +++ /dev/null @@ -1,86 +0,0 @@ -import os -import pyblish.api -import subprocess -import clique - - -class ExtractQuicktimeEXR(pyblish.api.InstancePlugin): - """Resolve any dependency issies - - This plug-in resolves any paths which, if not updated might break - the published file. - - The order of families is important, when working with lookdev you want to - first publish the texture, update the texture paths in the nodes and then - publish the shading network. Same goes for file dependent assets. - """ - - label = "Extract Quicktime" - order = pyblish.api.ExtractorOrder - families = ["imagesequence", "render", "write", "source"] - hosts = ["shell"] - - def process(self, instance): - # fps = instance.data.get("fps") - # start = instance.data.get("startFrame") - # stagingdir = os.path.normpath(instance.data.get("stagingDir")) - # - # collected_frames = os.listdir(stagingdir) - # collections, remainder = clique.assemble(collected_frames) - # - # full_input_path = os.path.join( - # stagingdir, collections[0].format('{head}{padding}{tail}') - # ) - # self.log.info("input {}".format(full_input_path)) - # - # filename = collections[0].format('{head}') - # if not filename.endswith('.'): - # filename += "." - # movFile = filename + "mov" - # full_output_path = os.path.join(stagingdir, movFile) - # - # self.log.info("output {}".format(full_output_path)) - # - # config_data = instance.context.data['output_repre_config'] - # - # proj_name = os.environ.get('AVALON_PROJECT', '__default__') - # profile = config_data.get(proj_name, config_data['__default__']) - # - # input_args = [] - # # overrides output file - # input_args.append("-y") - # # preset's input data - # input_args.extend(profile.get('input', [])) - # # necessary input data - # input_args.append("-start_number {}".format(start)) - # input_args.append("-i {}".format(full_input_path)) - # input_args.append("-framerate {}".format(fps)) - # - # output_args = [] - # # preset's output data - # output_args.extend(profile.get('output', [])) - # # output filename - # output_args.append(full_output_path) - # mov_args = [ - # "ffmpeg", - # " ".join(input_args), - # " ".join(output_args) - # ] - # subprocess_mov = " ".join(mov_args) - # sub_proc = subprocess.Popen(subprocess_mov) - # sub_proc.wait() - # - # if not os.path.isfile(full_output_path): - # raise("Quicktime wasn't created succesfully") - # - # if "representations" not in instance.data: - # instance.data["representations"] = [] - # - # representation = { - # 'name': 'mov', - # 'ext': 'mov', - # 'files': movFile, - # "stagingDir": stagingdir, - # "preview": True - # } - # instance.data["representations"].append(representation) diff --git a/pype/plugins/global/_publish_unused/transcode.py b/pype/plugins/global/_publish_unused/transcode.py deleted file mode 100644 index 6da65e3cc7..0000000000 --- a/pype/plugins/global/_publish_unused/transcode.py +++ /dev/null @@ -1,153 +0,0 @@ -import os -import subprocess - -import pyblish.api -import filelink - - -class ExtractTranscode(pyblish.api.InstancePlugin): - """Extracts review movie from image sequence. - - Offset to get images to transcode from. - """ - - order = pyblish.api.ExtractorOrder + 0.1 - label = "Transcode" - optional = True - families = ["review"] - - def find_previous_index(self, index, indexes): - """Finds the closest previous value in a list from a value.""" - - data = [] - for i in indexes: - if i >= index: - continue - data.append(index - i) - - return indexes[data.index(min(data))] - - def process(self, instance): - - if "collection" in instance.data.keys(): - self.process_image(instance) - - if "output_path" in instance.data.keys(): - self.process_movie(instance) - - def process_image(self, instance): - - collection = instance.data.get("collection", []) - - if not list(collection): - msg = "Skipping \"{0}\" because no frames was found." - self.log.warning(msg.format(instance.data["name"])) - return - - # Temporary fill the missing frames. - missing = collection.holes() - if not collection.is_contiguous(): - pattern = collection.format("{head}{padding}{tail}") - for index in missing.indexes: - dst = pattern % index - src_index = self.find_previous_index( - index, list(collection.indexes) - ) - src = pattern % src_index - - filelink.create(src, dst) - - # Generate args. - # Has to be yuv420p for compatibility with older players and smooth - # playback. This does come with a sacrifice of more visible banding - # issues. - # -crf 18 is visually lossless. - args = [ - "ffmpeg", "-y", - "-start_number", str(min(collection.indexes)), - "-framerate", str(instance.context.data["framerate"]), - "-i", collection.format("{head}{padding}{tail}"), - "-pix_fmt", "yuv420p", - "-crf", "18", - "-timecode", "00:00:00:01", - "-vframes", - str(max(collection.indexes) - min(collection.indexes) + 1), - "-vf", - "scale=trunc(iw/2)*2:trunc(ih/2)*2", - ] - - if instance.data.get("baked_colorspace_movie"): - args = [ - "ffmpeg", "-y", - "-i", instance.data["baked_colorspace_movie"], - "-pix_fmt", "yuv420p", - "-crf", "18", - "-timecode", "00:00:00:01", - ] - - args.append(collection.format("{head}.mov")) - - self.log.debug("Executing args: {0}".format(args)) - - # Can't use subprocess.check_output, cause Houdini doesn't like that. - p = subprocess.Popen( - args, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - stdin=subprocess.PIPE, - cwd=os.path.dirname(args[-1]) - ) - - output = p.communicate()[0] - - # Remove temporary frame fillers - for f in missing: - os.remove(f) - - if p.returncode != 0: - raise ValueError(output) - - self.log.debug(output) - - def process_movie(self, instance): - # Generate args. - # Has to be yuv420p for compatibility with older players and smooth - # playback. This does come with a sacrifice of more visible banding - # issues. - args = [ - "ffmpeg", "-y", - "-i", instance.data["output_path"], - "-pix_fmt", "yuv420p", - "-crf", "18", - "-timecode", "00:00:00:01", - ] - - if instance.data.get("baked_colorspace_movie"): - args = [ - "ffmpeg", "-y", - "-i", instance.data["baked_colorspace_movie"], - "-pix_fmt", "yuv420p", - "-crf", "18", - "-timecode", "00:00:00:01", - ] - - split = os.path.splitext(instance.data["output_path"]) - args.append(split[0] + "_review.mov") - - self.log.debug("Executing args: {0}".format(args)) - - # Can't use subprocess.check_output, cause Houdini doesn't like that. - p = subprocess.Popen( - args, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - stdin=subprocess.PIPE, - cwd=os.path.dirname(args[-1]) - ) - - output = p.communicate()[0] - - if p.returncode != 0: - raise ValueError(output) - - self.log.debug(output) diff --git a/pype/plugins/nuke/_load_unused/load_alembic b/pype/plugins/nuke/_load_unused/load_alembic deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pype/plugins/nuke/_load_unused/load_camera_abc b/pype/plugins/nuke/_load_unused/load_camera_abc deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pype/plugins/nuke/_load_unused/load_camera_nk b/pype/plugins/nuke/_load_unused/load_camera_nk deleted file mode 100644 index 8b13789179..0000000000 --- a/pype/plugins/nuke/_load_unused/load_camera_nk +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pype/plugins/nuke/_load_unused/load_still b/pype/plugins/nuke/_load_unused/load_still deleted file mode 100644 index c2aa061c5a..0000000000 --- a/pype/plugins/nuke/_load_unused/load_still +++ /dev/null @@ -1 +0,0 @@ -# usually used for mattepainting diff --git a/pype/plugins/nuke/_publish_unused/collect_render_target.py b/pype/plugins/nuke/_publish_unused/collect_render_target.py deleted file mode 100644 index 6c04414f69..0000000000 --- a/pype/plugins/nuke/_publish_unused/collect_render_target.py +++ /dev/null @@ -1,46 +0,0 @@ -import pyblish.api - - -@pyblish.api.log -class CollectRenderTarget(pyblish.api.InstancePlugin): - """Collect families for all instances""" - - order = pyblish.api.CollectorOrder + 0.2 - label = "Collect Render Target" - hosts = ["nuke", "nukeassist"] - families = ['write'] - - def process(self, instance): - - node = instance[0] - - self.log.info('processing {}'.format(node)) - - families = [] - if instance.data.get('families'): - families += instance.data['families'] - - # set for ftrack to accept - # instance.data["families"] = ["ftrack"] - - if node["render"].value(): - # dealing with local/farm rendering - if node["render_farm"].value(): - families.append("render.farm") - else: - families.append("render.local") - else: - families.append("render.frames") - # to ignore staging dir op in integrate - instance.data['transfer'] = False - - families.append('ftrack') - - instance.data["families"] = families - - # Sort/grouped by family (preserving local index) - instance.context[:] = sorted(instance.context, key=self.sort_by_family) - - def sort_by_family(self, instance): - """Sort by family""" - return instance.data.get("families", instance.data.get("family")) diff --git a/pype/plugins/nuke/_publish_unused/submit_deadline.py b/pype/plugins/nuke/_publish_unused/submit_deadline.py deleted file mode 100644 index 8b86189425..0000000000 --- a/pype/plugins/nuke/_publish_unused/submit_deadline.py +++ /dev/null @@ -1,147 +0,0 @@ -import os -import json -import getpass - -from avalon import api -from avalon.vendor import requests - -import pyblish.api - - -class NukeSubmitDeadline(pyblish.api.InstancePlugin): - # TODO: rewrite docstring to nuke - """Submit current Comp to Deadline - - Renders are submitted to a Deadline Web Service as - supplied via the environment variable DEADLINE_REST_URL - - """ - - label = "Submit to Deadline" - order = pyblish.api.IntegratorOrder - hosts = ["nuke"] - families = ["write", "render.deadline"] - - def process(self, instance): - - context = instance.context - - key = "__hasRun{}".format(self.__class__.__name__) - if context.data.get(key, False): - return - else: - context.data[key] = True - - DEADLINE_REST_URL = api.Session.get("DEADLINE_REST_URL", - "http://localhost:8082") - assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL" - - # Collect all saver instances in context that are to be rendered - write_instances = [] - for instance in context[:]: - if not self.families[0] in instance.data.get("families"): - # Allow only saver family instances - continue - - if not instance.data.get("publish", True): - # Skip inactive instances - continue - self.log.debug(instance.data["name"]) - write_instances.append(instance) - - if not write_instances: - raise RuntimeError("No instances found for Deadline submittion") - - hostVersion = int(context.data["hostVersion"]) - filepath = context.data["currentFile"] - filename = os.path.basename(filepath) - comment = context.data.get("comment", "") - deadline_user = context.data.get("deadlineUser", getpass.getuser()) - - # Documentation for keys available at: - # https://docs.thinkboxsoftware.com - # /products/deadline/8.0/1_User%20Manual/manual - # /manual-submission.html#job-info-file-options - payload = { - "JobInfo": { - # Top-level group name - "BatchName": filename, - - # Job name, as seen in Monitor - "Name": filename, - - # User, as seen in Monitor - "UserName": deadline_user, - - # Use a default submission pool for Nuke - "Pool": "nuke", - - "Plugin": "Nuke", - "Frames": "{start}-{end}".format( - start=int(instance.data["frameStart"]), - end=int(instance.data["frameEnd"]) - ), - - "Comment": comment, - }, - "PluginInfo": { - # Input - "FlowFile": filepath, - - # Mandatory for Deadline - "Version": str(hostVersion), - - # Render in high quality - "HighQuality": True, - - # Whether saver output should be checked after rendering - # is complete - "CheckOutput": True, - - # Proxy: higher numbers smaller images for faster test renders - # 1 = no proxy quality - "Proxy": 1, - }, - - # Mandatory for Deadline, may be empty - "AuxFiles": [] - } - - # Enable going to rendered frames from Deadline Monitor - for index, instance in enumerate(write_instances): - path = instance.data["path"] - folder, filename = os.path.split(path) - payload["JobInfo"]["OutputDirectory%d" % index] = folder - payload["JobInfo"]["OutputFilename%d" % index] = filename - - # Include critical variables with submission - keys = [ - # TODO: This won't work if the slaves don't have accesss to - # these paths, such as if slaves are running Linux and the - # submitter is on Windows. - "PYTHONPATH", - "NUKE_PATH" - # "OFX_PLUGIN_PATH", - ] - environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) - - payload["JobInfo"].update({ - "EnvironmentKeyValue%d" % index: "{key}={value}".format( - key=key, - value=environment[key] - ) for index, key in enumerate(environment) - }) - - self.log.info("Submitting..") - self.log.info(json.dumps(payload, indent=4, sort_keys=True)) - - # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(DEADLINE_REST_URL) - response = requests.post(url, json=payload) - if not response.ok: - raise Exception(response.text) - - # Store the response for dependent job submission plug-ins - for instance in write_instances: - instance.data["deadlineSubmissionJob"] = response.json() diff --git a/pype/plugins/nuke/_publish_unused/test_instances.py b/pype/plugins/nuke/_publish_unused/test_instances.py deleted file mode 100644 index e3fcc4b8f1..0000000000 --- a/pype/plugins/nuke/_publish_unused/test_instances.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyblish.api - - -class IncrementTestPlugin(pyblish.api.ContextPlugin): - """Increment current script version.""" - - order = pyblish.api.CollectorOrder + 0.5 - label = "Test Plugin" - hosts = ['nuke'] - - def process(self, context): - instances = context[:] - - prerender_check = list() - families_check = list() - for instance in instances: - if ("prerender" in str(instance)): - prerender_check.append(instance) - if instance.data.get("families", None): - families_check.append(True) - - if len(prerender_check) != len(families_check): - self.log.info(prerender_check) - self.log.info(families_check) diff --git a/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py b/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py deleted file mode 100644 index 441658297d..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py +++ /dev/null @@ -1,68 +0,0 @@ -import nuke -import os -import pyblish.api -from avalon import io -# TODO: add repair function - - -@pyblish.api.log -class ValidateSettingsNuke(pyblish.api.Validator): - """ Validates settings """ - - families = ['scene'] - hosts = ['nuke'] - optional = True - label = 'Settings' - - def process(self, instance): - - asset = io.find_one({"name": os.environ['AVALON_ASSET']}) - try: - avalon_resolution = asset["data"].get("resolution", '') - avalon_pixel_aspect = asset["data"].get("pixelAspect", '') - avalon_fps = asset["data"].get("fps", '') - avalon_first = asset["data"].get("frameStart", '') - avalon_last = asset["data"].get("frameEnd", '') - avalon_crop = asset["data"].get("crop", '') - except KeyError: - print( - "No resolution information found for \"{0}\".".format( - asset["name"] - ) - ) - return - - # validating first frame - local_first = nuke.root()['first_frame'].value() - msg = 'First frame is incorrect.' - msg += '\n\nLocal first: %s' % local_first - msg += '\n\nOnline first: %s' % avalon_first - assert local_first == avalon_first, msg - - # validating last frame - local_last = nuke.root()['last_frame'].value() - msg = 'Last frame is incorrect.' - msg += '\n\nLocal last: %s' % local_last - msg += '\n\nOnline last: %s' % avalon_last - assert local_last == avalon_last, msg - - # validating fps - local_fps = nuke.root()['fps'].value() - msg = 'FPS is incorrect.' - msg += '\n\nLocal fps: %s' % local_fps - msg += '\n\nOnline fps: %s' % avalon_fps - assert local_fps == avalon_fps, msg - - # validating resolution width - local_width = nuke.root().format().width() - msg = 'Width is incorrect.' - msg += '\n\nLocal width: %s' % local_width - msg += '\n\nOnline width: %s' % avalon_resolution[0] - assert local_width == avalon_resolution[0], msg - - # validating resolution width - local_height = nuke.root().format().height() - msg = 'Height is incorrect.' - msg += '\n\nLocal height: %s' % local_height - msg += '\n\nOnline height: %s' % avalon_resolution[1] - assert local_height == avalon_resolution[1], msg diff --git a/pype/plugins/nuke/_publish_unused/validate_proxy_mode.py b/pype/plugins/nuke/_publish_unused/validate_proxy_mode.py deleted file mode 100644 index a82fb16f31..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_proxy_mode.py +++ /dev/null @@ -1,33 +0,0 @@ -import nuke - -import pyblish.api - - -class RepairNukeProxyModeAction(pyblish.api.Action): - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - - nuke.root()["proxy"].setValue(0) - - -class ValidateNukeProxyMode(pyblish.api.ContextPlugin): - """Validates against having proxy mode on.""" - - order = pyblish.api.ValidatorOrder - optional = True - label = "Proxy Mode" - actions = [RepairNukeProxyModeAction] - hosts = ["nuke", "nukeassist"] - # targets = ["default", "process"] - - def process(self, context): - - msg = ( - "Proxy mode is not supported. Please disable Proxy Mode in the " - "Project settings." - ) - assert not nuke.root()["proxy"].getValue(), msg diff --git a/setup/nuke/nuke_path/atom_server.py b/setup/nuke/nuke_path/atom_server.py deleted file mode 100644 index 1742c290c1..0000000000 --- a/setup/nuke/nuke_path/atom_server.py +++ /dev/null @@ -1,54 +0,0 @@ -''' - Simple socket server using threads -''' - -import socket -import sys -import threading -import StringIO -import contextlib - -import nuke - -HOST = '' -PORT = 8888 - - -@contextlib.contextmanager -def stdoutIO(stdout=None): - old = sys.stdout - if stdout is None: - stdout = StringIO.StringIO() - sys.stdout = stdout - yield stdout - sys.stdout = old - - -def _exec(data): - with stdoutIO() as s: - exec(data) - return s.getvalue() - - -def server_start(): - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.bind((HOST, PORT)) - s.listen(5) - - while 1: - client, address = s.accept() - try: - data = client.recv(4096) - if data: - result = nuke.executeInMainThreadWithResult(_exec, args=(data)) - client.send(str(result)) - except SystemExit: - result = self.encode('SERVER: Shutting down...') - client.send(str(result)) - raise - finally: - client.close() - -t = threading.Thread(None, server_start) -t.setDaemon(True) -t.start() diff --git a/setup/nuke/nuke_path/menu.py b/setup/nuke/nuke_path/menu.py index 7f5de6013d..15702fa364 100644 --- a/setup/nuke/nuke_path/menu.py +++ b/setup/nuke/nuke_path/menu.py @@ -1,6 +1,5 @@ import os import sys -import atom_server import KnobScripter from pype.nuke.lib import ( From aa40c8030a1ddda1efdf77b63fecda91e5050e02 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 5 Mar 2020 12:28:44 +0100 Subject: [PATCH 377/434] fix(nks):collection of resolution datas --- .../publish/collect_hierarchy_context.py | 14 ++++++++------ pype/plugins/nukestudio/publish/collect_plates.py | 4 ++-- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 5085b9719e..ac7a58fe7e 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -161,8 +161,8 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): "asset": asset, "hierarchy": hierarchy, "parents": parents, - "width": width, - "height": height, + "resolutionWidth": width, + "resolutionHeight": height, "pixelAspect": pixel_aspect, "tasks": instance.data["tasks"] }) @@ -223,8 +223,10 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): instance.data["parents"] = s_asset_data["parents"] instance.data["hierarchy"] = s_asset_data["hierarchy"] instance.data["tasks"] = s_asset_data["tasks"] - instance.data["width"] = s_asset_data["width"] - instance.data["height"] = s_asset_data["height"] + instance.data["resolutionWidth"] = s_asset_data[ + "resolutionWidth"] + instance.data["resolutionHeight"] = s_asset_data[ + "resolutionHeight"] instance.data["pixelAspect"] = s_asset_data["pixelAspect"] # adding frame start if any on instance @@ -275,8 +277,8 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): # adding SourceResolution if Tag was present if instance.data.get("main"): in_info['custom_attributes'].update({ - "resolutionWidth": instance.data["width"], - "resolutionHeight": instance.data["height"], + "resolutionWidth": instance.data["resolutionWidth"], + "resolutionHeight": instance.data["resolutionHeight"], "pixelAspect": instance.data["pixelAspect"] }) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index acdc5193ae..b624cf0edc 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -83,7 +83,7 @@ class CollectPlates(api.InstancePlugin): class CollectPlatesData(api.InstancePlugin): """Collect plates""" - order = api.CollectorOrder + 0.495 + order = api.CollectorOrder + 0.48 label = "Collect Plates Data" hosts = ["nukestudio"] families = ["plate"] @@ -126,7 +126,7 @@ class CollectPlatesData(api.InstancePlugin): transfer_data = [ "handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", - "clipInH", "clipOutH", "asset", "track", "version", "width", "height", "pixelAspect" + "clipInH", "clipOutH", "asset", "track", "version", "resolutionWidth", "resolutionHeight", "pixelAspect" ] # pass data to version From 6d0fd510bf56c2e169cf75ea359f81d7273cfcda Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 5 Mar 2020 14:16:23 +0100 Subject: [PATCH 378/434] fix(global): adding collector for fps and pixel_aspect --- .../plugins/global/publish/collect_instance_anatomy_data.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py index 825c48dcf4..4afcac118c 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -108,9 +108,13 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): if resolution_height: anatomy_data["resolution_height"] = resolution_height + pixel_aspect = instance.data.get("pixelAspect") + if pixel_aspect: + anatomy_data["pixel_aspect"] = float("{:0.2f}".format(pixel_aspect)) + fps = instance.data.get("fps") if resolution_height: - anatomy_data["fps"] = fps + anatomy_data["fps"] = float("{:0.2f}".format(fps)) instance.data["projectEntity"] = project_entity instance.data["assetEntity"] = asset_entity From eb261734cdc233bce29284219760cd962a02d0f9 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 5 Mar 2020 14:16:56 +0100 Subject: [PATCH 379/434] fix(nks): passing fps and pixel_aspect to instance data for anatomy --- pype/plugins/nukestudio/publish/collect_hierarchy_context.py | 3 +++ pype/plugins/nukestudio/publish/collect_plates.py | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index ac7a58fe7e..5bc9bea7dd 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -42,6 +42,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): width = int(sequence.format().width()) height = int(sequence.format().height()) pixel_aspect = sequence.format().pixelAspect() + fps = context.data["fps"] # build data for inner nukestudio project property data = { @@ -164,6 +165,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): "resolutionWidth": width, "resolutionHeight": height, "pixelAspect": pixel_aspect, + "fps": fps, "tasks": instance.data["tasks"] }) @@ -228,6 +230,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): instance.data["resolutionHeight"] = s_asset_data[ "resolutionHeight"] instance.data["pixelAspect"] = s_asset_data["pixelAspect"] + instance.data["fps"] = s_asset_data["fps"] # adding frame start if any on instance start_frame = s_asset_data.get("startingFrame") diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index b624cf0edc..d08f69d4bb 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -126,7 +126,7 @@ class CollectPlatesData(api.InstancePlugin): transfer_data = [ "handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", - "clipInH", "clipOutH", "asset", "track", "version", "resolutionWidth", "resolutionHeight", "pixelAspect" + "clipInH", "clipOutH", "asset", "track", "version", "resolutionWidth", "resolutionHeight", "pixelAspect", "fps" ] # pass data to version From f4caf8e6f46a2b17883fa75d6c03a5e7e9084bd0 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 5 Mar 2020 15:43:07 +0100 Subject: [PATCH 380/434] fix(nks): didnt format anatomy with hierarchy and pixel_aspect --- pype/plugins/nukestudio/publish/extract_effects.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/pype/plugins/nukestudio/publish/extract_effects.py b/pype/plugins/nukestudio/publish/extract_effects.py index a8db5826b8..5c9ee97f2b 100644 --- a/pype/plugins/nukestudio/publish/extract_effects.py +++ b/pype/plugins/nukestudio/publish/extract_effects.py @@ -196,7 +196,8 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): "asset": asset_name, "family": instance.data["family"], "subset": subset_name, - "version": version_number + "version": version_number, + "hierarchy": instance.data["hierarchy"] }) resolution_width = instance.data.get("resolutionWidth") @@ -207,9 +208,13 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): if resolution_height: anatomy_data["resolution_height"] = resolution_height + pixel_aspect = instance.data.get("pixelAspect") + if pixel_aspect: + anatomy_data["pixel_aspect"] = float("{:0.2f}".format(pixel_aspect)) + fps = instance.data.get("fps") if resolution_height: - anatomy_data["fps"] = fps + anatomy_data["fps"] = float("{:0.2f}".format(fps)) instance.data["projectEntity"] = project_entity instance.data["assetEntity"] = asset_entity From efd9305438e6de93afe8fb25d28af419dc1cd6e3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 9 Mar 2020 17:02:37 +0100 Subject: [PATCH 381/434] status factory is initialized before checker thread starts --- pype/ftrack/ftrack_server/sub_event_status.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/pype/ftrack/ftrack_server/sub_event_status.py b/pype/ftrack/ftrack_server/sub_event_status.py index 1a15a1f28d..d3e6a3d647 100644 --- a/pype/ftrack/ftrack_server/sub_event_status.py +++ b/pype/ftrack/ftrack_server/sub_event_status.py @@ -369,13 +369,6 @@ def main(args): # store socket connection object ObjectFactory.sock = sock - statuse_names = { - "main": "Main process", - "storer": "Event Storer", - "processor": "Event Processor" - } - - ObjectFactory.status_factory = StatusFactory(statuse_names) ObjectFactory.status_factory["main"].update(server_info) _returncode = 0 try: @@ -429,6 +422,13 @@ if __name__ == "__main__": signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) + statuse_names = { + "main": "Main process", + "storer": "Event Storer", + "processor": "Event Processor" + } + ObjectFactory.status_factory = StatusFactory(statuse_names) + checker_thread = OutputChecker() ObjectFactory.checker_thread = checker_thread checker_thread.start() From 3b83a1f480e865dea2c4483aa785ca20597c3087 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 9 Mar 2020 17:41:20 +0100 Subject: [PATCH 382/434] fix(nks): loading clips with now slates --- pype/nukestudio/lib.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py index e7b7232938..774a9d45bf 100644 --- a/pype/nukestudio/lib.py +++ b/pype/nukestudio/lib.py @@ -595,10 +595,11 @@ class ClipLoader: (f for f in self.context["version"]["data"]["families"] if "slate" in f), None) or bool((( - clip_in - clip_out + 1) + handle_start + handle_end + clip_out - clip_in + 1) + handle_start + handle_end ) - media_duration) log.debug("__ slate_on: `{}`".format(slate_on)) + # calculate slate differences if slate_on: media_duration -= 1 @@ -638,7 +639,7 @@ class ClipLoader: track_item.setSourceOut(media_duration - handle_end) track_item.setTimelineOut(clip_out) - + track_item.setPlaybackSpeed(1) self.active_track.addTrackItem(track_item) log.info("Loading clips: `{}`".format(self.data["clip_name"])) From ef51f1ed513bd8d740533710eb5509a684647088 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 9 Mar 2020 17:43:31 +0100 Subject: [PATCH 383/434] feat(nks): print repre _id --- .../nukestudio/load/load_sequences_to_timeline_asset_origin.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py index 09d9b1a4bb..2ee2409b86 100644 --- a/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py +++ b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py @@ -27,6 +27,8 @@ class LoadSequencesToTimelineAssetOrigin(api.Loader): }) self.log.debug("_ context: `{}`".format(context)) + self.log.debug("_ representation._id: `{}`".format( + context["representation"]["_id"])) clip_loader = lib.ClipLoader(self, context, **data) clip_loader.load() From a206b8fe7ad2c9ce121de8ca635cce6ca54f150d Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 11 Mar 2020 17:37:57 +0100 Subject: [PATCH 384/434] hotfix: add textures family to integrator --- pype/plugins/global/publish/integrate_new.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 1d061af173..aa214f36cb 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -80,7 +80,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "matchmove", "image" "source", - "assembly" + "assembly", + "textures" ] exclude_families = ["clip"] db_representation_context_keys = [ From 02014e20cfec96ba2f188c2d21c2522dca2ebf0a Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Thu, 12 Mar 2020 11:22:53 +0100 Subject: [PATCH 385/434] fix(global): maya is collecting fps as string --- .../global/publish/collect_instance_anatomy_data.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py index 4afcac118c..06a25b7c8a 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -110,11 +110,13 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): pixel_aspect = instance.data.get("pixelAspect") if pixel_aspect: - anatomy_data["pixel_aspect"] = float("{:0.2f}".format(pixel_aspect)) + anatomy_data["pixel_aspect"] = float("{:0.2f}".format( + float(pixel_aspect))) fps = instance.data.get("fps") - if resolution_height: - anatomy_data["fps"] = float("{:0.2f}".format(fps)) + if fps: + anatomy_data["fps"] = float("{:0.2f}".format( + float(fps))) instance.data["projectEntity"] = project_entity instance.data["assetEntity"] = asset_entity From 6caa339d8254a65583e032cff9edfa90df82164b Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 13 Mar 2020 12:04:46 +0100 Subject: [PATCH 386/434] feat(global): adding no handles to extract burnin --- pype/plugins/global/publish/extract_burnin.py | 33 +++++++++++++++---- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 008bebb271..3d5de28153 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -4,7 +4,6 @@ import copy import pype.api import pyblish -from pypeapp import config class ExtractBurnin(pype.api.Extractor): @@ -30,6 +29,8 @@ class ExtractBurnin(pype.api.Extractor): 'version', instance.context.data.get('version')) frame_start = int(instance.data.get("frameStart") or 0) frame_end = int(instance.data.get("frameEnd") or 1) + handle_start = instance.data.get("handleStart") + handle_end = instance.data.get("handleEnd") duration = frame_end - frame_start + 1 prep_data = copy.deepcopy(instance.data["anatomyData"]) @@ -59,6 +60,9 @@ class ExtractBurnin(pype.api.Extractor): is_sequence = "sequence" in repre.get("tags", []) + # no handles switch from profile tags + no_handles = "no-handles" in repre.get("tags", []) + stagingdir = repre["stagingDir"] filename = "{0}".format(repre["files"]) @@ -90,17 +94,32 @@ class ExtractBurnin(pype.api.Extractor): filled_anatomy = anatomy.format_all(_prep_data) _prep_data["anatomy"] = filled_anatomy.get_solved() + # copy frame range variables + frame_start_cp = frame_start + frame_end_cp = frame_end + duration_cp = duration + + if no_handles: + frame_start_cp = frame_start + handle_start + frame_end_cp = frame_end - handle_end + duration_cp = frame_end_cp - frame_start_cp + 1 + _prep_data.update({ + "frame_start": frame_start_cp, + "frame_end": frame_end_cp, + "duration": duration_cp, + }) + # dealing with slates - slate_frame_start = frame_start - slate_frame_end = frame_end - slate_duration = duration + slate_frame_start = frame_start_cp + slate_frame_end = frame_end_cp + slate_duration = duration_cp # exception for slate workflow if ("slate" in instance.data["families"]): if "slate-frame" in repre.get("tags", []): - slate_frame_start = frame_start - 1 - slate_frame_end = frame_end - slate_duration = duration + 1 + slate_frame_start = frame_start_cp - 1 + slate_frame_end = frame_end_cp + slate_duration = duration_cp + 1 self.log.debug("__1 slate_frame_start: {}".format(slate_frame_start)) From 0123c8c5776fc97887a7202082c4d27f84253004 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 13 Mar 2020 12:11:35 +0100 Subject: [PATCH 387/434] feat(global): extract review no handles tag --- pype/plugins/global/publish/extract_review.py | 45 +++++++++++++++---- 1 file changed, 36 insertions(+), 9 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index f5dba108c5..c7f286c3e2 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -31,12 +31,17 @@ class ExtractReview(pyblish.api.InstancePlugin): output_profiles = self.outputs or {} inst_data = instance.data - fps = inst_data.get("fps") - start_frame = inst_data.get("frameStart") - resolution_width = inst_data.get("resolutionWidth", to_width) - resolution_height = inst_data.get("resolutionHeight", to_height) + fps = float(inst_data.get("fps")) + frame_start = inst_data.get("frameStart") + frame_end = inst_data.get("frameEnd") + handle_start = inst_data.get("handleStart") + handle_end = inst_data.get("handleEnd") pixel_aspect = inst_data.get("pixelAspect", 1) self.log.debug("Families In: `{}`".format(inst_data["families"])) + self.log.debug("__ frame_start: {}".format(frame_start)) + self.log.debug("__ frame_end: {}".format(frame_end)) + self.log.debug("__ handle_start: {}".format(handle_start)) + self.log.debug("__ handle_end: {}".format(handle_end)) # get representation and loop them representations = inst_data["representations"] @@ -73,6 +78,9 @@ class ExtractReview(pyblish.api.InstancePlugin): is_sequence = ("sequence" in p_tags) and (ext in ( "png", "jpg", "jpeg")) + # no handles switch from profile tags + no_handles = "no-handles" in p_tags + self.log.debug("Profile name: {}".format(name)) if not ext: @@ -142,6 +150,7 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.info("new_tags: `{}`".format(new_tags)) input_args = [] + output_args = [] # overrides output file input_args.append("-y") @@ -152,12 +161,20 @@ class ExtractReview(pyblish.api.InstancePlugin): # necessary input data # adds start arg only if image sequence if isinstance(repre["files"], list): + if frame_start != repre.get("detectedStart", frame_start): + frame_start = repre.get("detectedStart") + + # exclude handle if no handles defined + if no_handles: + frame_start += handle_start - if start_frame != repre.get("detectedStart", start_frame): - start_frame = repre.get("detectedStart") input_args.append( "-start_number {0} -framerate {1}".format( - start_frame, fps)) + frame_start, fps)) + else: + if no_handles: + start_sec = float(handle_start) / fps + input_args.append("-ss {:0.2f}".format(start_sec)) input_args.append("-i {}".format(full_input_path)) @@ -191,7 +208,6 @@ class ExtractReview(pyblish.api.InstancePlugin): ] ) - output_args = [] codec_args = profile.get('codec', []) output_args.extend(codec_args) # preset's output data @@ -238,6 +254,13 @@ class ExtractReview(pyblish.api.InstancePlugin): # In case audio is longer than video. output_args.append("-shortest") + if no_handles: + duration_sec = float( + (frame_end - ( + frame_start + handle_start + ) + 1) - handle_end) / fps + output_args.append("-t {:0.2f}".format(duration_sec)) + # output filename output_args.append(full_output_path) @@ -321,6 +344,7 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug( "_ output_args: `{}`".format(output_args)) + if is_sequence: stg_dir = os.path.dirname(full_output_path) @@ -358,7 +382,10 @@ class ExtractReview(pyblish.api.InstancePlugin): "stagingDir": stg_dir, "files": os.listdir(stg_dir) }) - + if no_handles: + repre_new.update({ + "outputName": name + "_noHandles" + }) if repre_new.get('preview'): repre_new.pop("preview") if repre_new.get('thumbnail'): From 5c589dd0232accf72da96ea9b65e31657cb81881 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 13 Mar 2020 12:34:08 +0100 Subject: [PATCH 388/434] feat(global): delivery resolution to preset - also fixes of reformating --- pype/plugins/global/publish/extract_review.py | 82 ++++++++++++------- .../global/publish/extract_review_slate.py | 33 +++++--- pype/plugins/nuke/publish/collect_writes.py | 1 - 3 files changed, 75 insertions(+), 41 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index f5dba108c5..b7b6efafb8 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -23,18 +23,21 @@ class ExtractReview(pyblish.api.InstancePlugin): outputs = {} ext_filter = [] + to_width = 1920 + to_height = 1080 def process(self, instance): - to_width = 1920 - to_height = 1080 output_profiles = self.outputs or {} inst_data = instance.data - fps = inst_data.get("fps") - start_frame = inst_data.get("frameStart") - resolution_width = inst_data.get("resolutionWidth", to_width) - resolution_height = inst_data.get("resolutionHeight", to_height) + fps = float(inst_data.get("fps")) + frame_start = inst_data.get("frameStart") + frame_end = inst_data.get("frameEnd") + handle_start = inst_data.get("handleStart") + handle_end = inst_data.get("handleEnd") + resolution_width = inst_data.get("resolutionWidth", self.to_width) + resolution_height = inst_data.get("resolutionHeight", self.to_height) pixel_aspect = inst_data.get("pixelAspect", 1) self.log.debug("Families In: `{}`".format(inst_data["families"])) @@ -198,30 +201,42 @@ class ExtractReview(pyblish.api.InstancePlugin): output_args.extend(profile.get('output', [])) # defining image ratios - resolution_ratio = float(resolution_width / ( - resolution_height * pixel_aspect)) - delivery_ratio = float(to_width) / float(to_height) - self.log.debug(resolution_ratio) - self.log.debug(delivery_ratio) + resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height + delivery_ratio = float(self.to_width) / float(self.to_height) + self.log.debug( + "__ resolution_ratio: `{}`".format(resolution_ratio)) + self.log.debug( + "__ delivery_ratio: `{}`".format(delivery_ratio)) # get scale factor - scale_factor = to_height / ( + scale_factor = float(self.to_height) / ( resolution_height * pixel_aspect) - self.log.debug(scale_factor) + + # shorten two decimals long float number for testing conditions + resolution_ratio_test = float( + "{:0.2f}".format(resolution_ratio)) + delivery_ratio_test = float( + "{:0.2f}".format(delivery_ratio)) + + if resolution_ratio_test < delivery_ratio_test: + scale_factor = float(self.to_width) / ( + resolution_width * pixel_aspect) + + self.log.debug("__ scale_factor: `{}`".format(scale_factor)) # letter_box lb = profile.get('letter_box', 0) if lb != 0: - ffmpet_width = to_width - ffmpet_height = to_height + ffmpeg_width = self.to_width + ffmpeg_height = self.to_height if "reformat" not in p_tags: lb /= pixel_aspect - if resolution_ratio != delivery_ratio: - ffmpet_width = resolution_width - ffmpet_height = int( + if resolution_ratio_test != delivery_ratio_test: + ffmpeg_width = resolution_width + ffmpeg_height = int( resolution_height * pixel_aspect) else: - if resolution_ratio != delivery_ratio: + if resolution_ratio_test != delivery_ratio_test: lb /= scale_factor else: lb /= pixel_aspect @@ -233,7 +248,7 @@ class ExtractReview(pyblish.api.InstancePlugin): "c=black,drawbox=0:ih-round((ih-(iw*(" "1/{2})))/2):iw:round((ih-(iw*(1/{2})))" "/2):t=fill:c=black").format( - ffmpet_width, ffmpet_height, lb)) + ffmpeg_width, ffmpeg_height, lb)) # In case audio is longer than video. output_args.append("-shortest") @@ -252,24 +267,26 @@ class ExtractReview(pyblish.api.InstancePlugin): # scaling none square pixels and 1920 width if "reformat" in p_tags: - if resolution_ratio < delivery_ratio: + if resolution_ratio_test < delivery_ratio_test: self.log.debug("lower then delivery") - width_scale = int(to_width * scale_factor) + width_scale = int(self.to_width * scale_factor) width_half_pad = int(( - to_width - width_scale)/2) - height_scale = to_height + self.to_width - width_scale)/2) + height_scale = self.to_height height_half_pad = 0 else: self.log.debug("heigher then delivery") - width_scale = to_width + width_scale = self.to_width width_half_pad = 0 - scale_factor = float(to_width) / float( - resolution_width) - self.log.debug(scale_factor) + scale_factor = float(self.to_width) / (float( + resolution_width) * pixel_aspect) + self.log.debug( + "__ scale_factor: `{}`".format( + scale_factor)) height_scale = int( resolution_height * scale_factor) height_half_pad = int( - (to_height - height_scale)/2) + (self.to_height - height_scale)/2) self.log.debug( "__ width_scale: `{}`".format(width_scale)) @@ -287,7 +304,7 @@ class ExtractReview(pyblish.api.InstancePlugin): "scale={0}x{1}:flags=lanczos," "pad={2}:{3}:{4}:{5}:black,setsar=1" ).format(width_scale, height_scale, - to_width, to_height, + self.to_width, self.to_height, width_half_pad, height_half_pad ) @@ -372,6 +389,11 @@ class ExtractReview(pyblish.api.InstancePlugin): if "delete" in repre.get("tags", []): representations_new.remove(repre) + instance.data.update({ + "reviewToWidth": self.to_width, + "reviewToHeight": self.to_height + }) + self.log.debug( "new representations: {}".format(representations_new)) instance.data["representations"] = representations_new diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 699ed4a5eb..8c33a0d853 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -24,24 +24,36 @@ class ExtractReviewSlate(pype.api.Extractor): slate_path = inst_data.get("slateFrame") ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") - to_width = 1920 - to_height = 1080 + # values are set in ExtractReview + to_width = inst_data["reviewToWidth"] + to_height = inst_data["reviewToHeight"] + resolution_width = inst_data.get("resolutionWidth", to_width) resolution_height = inst_data.get("resolutionHeight", to_height) pixel_aspect = inst_data.get("pixelAspect", 1) fps = inst_data.get("fps") # defining image ratios - resolution_ratio = float(resolution_width / ( - resolution_height * pixel_aspect)) + resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height delivery_ratio = float(to_width) / float(to_height) - self.log.debug(resolution_ratio) - self.log.debug(delivery_ratio) + self.log.debug("__ resolution_ratio: `{}`".format(resolution_ratio)) + self.log.debug("__ delivery_ratio: `{}`".format(delivery_ratio)) # get scale factor - scale_factor = to_height / ( + scale_factor = float(to_height) / ( resolution_height * pixel_aspect) - self.log.debug(scale_factor) + + # shorten two decimals long float number for testing conditions + resolution_ratio_test = float( + "{:0.2f}".format(resolution_ratio)) + delivery_ratio_test = float( + "{:0.2f}".format(delivery_ratio)) + + if resolution_ratio_test < delivery_ratio_test: + scale_factor = float(to_width) / ( + resolution_width * pixel_aspect) + + self.log.debug("__ scale_factor: `{}`".format(scale_factor)) for i, repre in enumerate(inst_data["representations"]): _remove_at_end = [] @@ -95,7 +107,7 @@ class ExtractReviewSlate(pype.api.Extractor): # scaling none square pixels and 1920 width if "reformat" in p_tags: - if resolution_ratio < delivery_ratio: + if resolution_ratio_test < delivery_ratio_test: self.log.debug("lower then delivery") width_scale = int(to_width * scale_factor) width_half_pad = int(( @@ -106,7 +118,8 @@ class ExtractReviewSlate(pype.api.Extractor): self.log.debug("heigher then delivery") width_scale = to_width width_half_pad = 0 - scale_factor = float(to_width) / float(resolution_width) + scale_factor = float(to_width) / (float( + resolution_width) * pixel_aspect) self.log.debug(scale_factor) height_scale = int( resolution_height * scale_factor) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 993b8574f5..a547fd70bd 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -46,7 +46,6 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): ) if node["use_limit"].getValue(): - handles = 0 first_frame = int(node["first"].getValue()) last_frame = int(node["last"].getValue()) From 787e9d1295dfa1a7a115d22f14fee8e52dfe8c93 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 13 Mar 2020 14:31:01 +0100 Subject: [PATCH 389/434] feat(nks): optional version sync with project workfile --- .../nukestudio/publish/collect_clips.py | 4 +--- .../publish/collect_instance_version.py | 20 +++++++++++++++++++ .../nukestudio/publish/collect_plates.py | 9 ++++++++- 3 files changed, 29 insertions(+), 4 deletions(-) create mode 100644 pype/plugins/nukestudio/publish/collect_instance_version.py diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index b8654b0784..6a1dad9a6d 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -18,7 +18,6 @@ class CollectClips(api.ContextPlugin): context.data["assetsShared"] = dict() projectdata = context.data["projectEntity"]["data"] - version = context.data.get("version", "001") sequence = context.data.get("activeSequence") selection = context.data.get("selection") @@ -108,8 +107,7 @@ class CollectClips(api.ContextPlugin): "family": "clip", "families": [], "handleStart": projectdata.get("handleStart", 0), - "handleEnd": projectdata.get("handleEnd", 0), - "version": int(version)}) + "handleEnd": projectdata.get("handleEnd", 0)}) instance = context.create_instance(**data) diff --git a/pype/plugins/nukestudio/publish/collect_instance_version.py b/pype/plugins/nukestudio/publish/collect_instance_version.py new file mode 100644 index 0000000000..3e2eb8e8f8 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_instance_version.py @@ -0,0 +1,20 @@ +from pyblish import api + +class CollectInstanceVersion(api.InstancePlugin): + """ Collecting versions of Hiero project into instances + + If activated then any subset version is created in + version of the actual project. + """ + + order = api.CollectorOrder + 0.015 + label = "Collect Instance Version" + + optional = True + active = True + + def process(self, instance): + version = instance.context.data.get("version", "001") + instance.data.update({ + "version": int(version) + }) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index d08f69d4bb..4ed281f0ee 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -126,7 +126,7 @@ class CollectPlatesData(api.InstancePlugin): transfer_data = [ "handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", - "clipInH", "clipOutH", "asset", "track", "version", "resolutionWidth", "resolutionHeight", "pixelAspect", "fps" + "clipInH", "clipOutH", "asset", "track", "resolutionWidth", "resolutionHeight", "pixelAspect", "fps" ] # pass data to version @@ -141,6 +141,13 @@ class CollectPlatesData(api.InstancePlugin): "fps": instance.context.data["fps"] }) + version = instance.data.get("version") + if version: + version_data.update({ + "version": version + }) + + try: basename, ext = os.path.splitext(source_file) head, padding = os.path.splitext(basename) From fa4c19083d5e0d48887b2a8bf3df506ccb792205 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 13 Mar 2020 14:35:03 +0100 Subject: [PATCH 390/434] fix(nks): no need to have class activation switcher - it will be controlled by presets --- pype/plugins/nukestudio/publish/collect_instance_version.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pype/plugins/nukestudio/publish/collect_instance_version.py b/pype/plugins/nukestudio/publish/collect_instance_version.py index 3e2eb8e8f8..82cbf201d8 100644 --- a/pype/plugins/nukestudio/publish/collect_instance_version.py +++ b/pype/plugins/nukestudio/publish/collect_instance_version.py @@ -1,5 +1,6 @@ from pyblish import api + class CollectInstanceVersion(api.InstancePlugin): """ Collecting versions of Hiero project into instances @@ -10,9 +11,6 @@ class CollectInstanceVersion(api.InstancePlugin): order = api.CollectorOrder + 0.015 label = "Collect Instance Version" - optional = True - active = True - def process(self, instance): version = instance.context.data.get("version", "001") instance.data.update({ From 569818a25db263b3dbdd59c1b7d952a939744e92 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 13 Mar 2020 14:42:36 +0100 Subject: [PATCH 391/434] fix(nks): moving order just behind Collect Clips --- pype/plugins/nukestudio/publish/collect_instance_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nukestudio/publish/collect_instance_version.py b/pype/plugins/nukestudio/publish/collect_instance_version.py index 82cbf201d8..b79ccbdf54 100644 --- a/pype/plugins/nukestudio/publish/collect_instance_version.py +++ b/pype/plugins/nukestudio/publish/collect_instance_version.py @@ -8,7 +8,7 @@ class CollectInstanceVersion(api.InstancePlugin): version of the actual project. """ - order = api.CollectorOrder + 0.015 + order = api.CollectorOrder + 0.011 label = "Collect Instance Version" def process(self, instance): From 76cb3b37e27b70840b999ceff69df02551078ef5 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 13 Mar 2020 16:42:25 +0100 Subject: [PATCH 392/434] fix(global): adding back resolution attribtes wrong commitment had erased them --- pype/plugins/global/publish/extract_review.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index c7f286c3e2..81d7e1668a 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -37,6 +37,8 @@ class ExtractReview(pyblish.api.InstancePlugin): handle_start = inst_data.get("handleStart") handle_end = inst_data.get("handleEnd") pixel_aspect = inst_data.get("pixelAspect", 1) + resolution_width = inst_data.get("resolutionWidth", self.to_width) + resolution_height = inst_data.get("resolutionHeight", self.to_height) self.log.debug("Families In: `{}`".format(inst_data["families"])) self.log.debug("__ frame_start: {}".format(frame_start)) self.log.debug("__ frame_end: {}".format(frame_end)) @@ -344,7 +346,6 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug( "_ output_args: `{}`".format(output_args)) - if is_sequence: stg_dir = os.path.dirname(full_output_path) From d484e5108c034edcc6d84de6038a23e10753f481 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 13 Mar 2020 18:02:52 +0100 Subject: [PATCH 393/434] fix(global): self.to_width and height is in another PR --- pype/plugins/global/publish/extract_review.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 81d7e1668a..d03de6ad61 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -37,8 +37,8 @@ class ExtractReview(pyblish.api.InstancePlugin): handle_start = inst_data.get("handleStart") handle_end = inst_data.get("handleEnd") pixel_aspect = inst_data.get("pixelAspect", 1) - resolution_width = inst_data.get("resolutionWidth", self.to_width) - resolution_height = inst_data.get("resolutionHeight", self.to_height) + resolution_width = inst_data.get("resolutionWidth", to_width) + resolution_height = inst_data.get("resolutionHeight", to_height) self.log.debug("Families In: `{}`".format(inst_data["families"])) self.log.debug("__ frame_start: {}".format(frame_start)) self.log.debug("__ frame_end: {}".format(frame_end)) From 7244f78b66533a5e59dd4abc8df34b364fb07171 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 13 Mar 2020 18:04:59 +0100 Subject: [PATCH 394/434] feat(nuke): adding icon for pype favorite folders --- res/icons/folder-favorite.png | Bin 0 -> 7008 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 res/icons/folder-favorite.png diff --git a/res/icons/folder-favorite.png b/res/icons/folder-favorite.png new file mode 100644 index 0000000000000000000000000000000000000000..198b289e9ed39e6f8e13b6debf5aee857039a032 GIT binary patch literal 7008 zcmaJ_XH-*Lus(p1jw1C+vw}!fQKYLhk*Mrn zP>c@d{}ymU+okrNVpIC##aHeHMl#uRDV)Yn5ATQkFKF~hjia{WF1|Zo9P-reO2-FW zs+i=FrlgQeB?)J(<&BF~8m|YHI{d@>K5)3yG}ey&3iv!Ok=n^)xLkiIjC4VA=w(RNOI3UmoSJ-sf+#ZgA!>3L@%^r?No`;GK z2MSNK?RlTX{`i)VEJonR(e{pmg`d-sm9!pmZDt7i6N^7V#_$K7(r8H!=>iw)UWF1n zWBmkSx!1wNuBA2AwITi+KdB1GROGJ&>wR%c#echOrvQ6?u|w9}>CpgQsztBHH

XIfm zg%mw%MDf}?omK?pnK_|q(z@JsLHwQLwbw@!Bo+=${+Fk2d*bBe2`l-S*iY7OaLe~kN^YP2dsTg& zs}#YmBaG)$B-g4}bH#jPfAA##$;-s2n@z7i9O<|>6;WTeQ60w4K{$A=ukri~kF-Zd z((QL|Sn<@irtEKhaP-irYv#@(fj`9^H-4_r6gx=u{kv+Hu6(=|WqvH@X6+4r-~Ihw zcIva?lN|WVTq|lT|6UY17V*7yVO4DQO&e=}l4wn@rvJY-@*BjjZ6!`c?VsB%_2Cna z@9RqZ#wJS!4weL((_1l?=&pe5nG=s~E^)a(`0wKdp|ds*KH?rZ9nsz9-1q0fl=%~5 zX_QcsICqh5?Dx%Xx-`?70xuG2J!fq{~17Bq+p?`_tbMf=L8DPUVIepL zXZ&stQH5f@nMrRgF>!PptyBLH;B6&hEk(#dIjC*lPm1h?>{DVn$-n5-FxZ(i`sK;f z`D5b9-C64D@W|b&&_Wxiz=61oQ+&SoK+W?ud!An^u&>eHw)VvTk>$G3qDZd8sXj>6 z-)f`Ya{_K@l!~oOwBSc?^!AlCUR@mUi#Lf9x{-8~s9M@+wKyquEE8jerNoJGEqw9I zz8U2m!Cv>l&*evHcPjsm*SbCHPyzS)b4u3YF$05NPYS})WulywhP3c_yoBES)0#46 z!fZqZ;e|060S8Uq{KvZ}h@8ic5j#0#u3}H$pGJwTDb^q+3og{*G$>ZY*smoMGy5UA zTFjrKiADnPb%{rH!-W4!$l9Z2>1u<4z1Q(hYxZ~+n|Q>wsbzU;wlpi|wa~)&^xiqI z@NLxcem3G&u61eHH!(gw_-aMIXjG~N(NnLz-9C#n6s@K-WO(-L#GZm%WmG;VXr=}; zE+Z*T&FW}MKh-QOJ2X3c^??b6O%Lk5+0tmDV)E5cAL>Q7XqQbKnODY^?PpOoM{Fa@ zm1XF498n>%L-%mH?X6iu7U32=&6nR>0_wpLn~ma;}#JY1j5`}KGBzRW`3_hn?lV)4{@TBK- zBVR)md!_9IJsc+f?AxjWIctWnw~NNW^2tGk$v4!UIJ6MCO-?MAbgNmAJE)GDBapo^ zG`#fvZchcAHSqKF>V0X=sc5(*jVr^hWog8Z$LGp9q2rf;@K0RoYzk((;7)0EN^u%{#+8;* z+|s$cXJ+^@4|nP17J-~a(H=IFJj|WkA(Ir6eBjh6f$VubV+PCjbY6&=N3a;*+QcjB znn3pAbQ=562tp$@(bzt4KzVJt3y2<1+Y5MggL*DCM@0V1FmydzujP_JA6|vfBuq-q zdwH4fB@-do@pv-!?Q51wmlm^Wp2{x{;9xWp+r97R;bThxw@aF^XkPNx1%w};j zA3L=%&5t!^l`73?iZ-I3xp7kFex+Kt`)J7vEvNf)bUxN--*Sf#J(}C&rW@5;pXK+x zz&vx^$o*j->CNP8RBVmtxrS9tvk1zsE&PZl)9xCxov@i1eg}(y1R5$Z6%0*om$KU) zWY{b%(mwrhxY0L+X|*EGQ^>r}RDlQC(wPr%n*hPC7!*Z;T%#!hv z2KVFS@ICk*7oEsJ8j4F8O6%N*K@&RlICB5Wx*iZQt{7py>z#t?@7= zf?^4u1rbs%MO%wlEobiDO^rcBt!Nh1R@YVl%6}0RPI%{yLOH50KGdNCkEaph}8$yW^Pv|N50wBi>rMb`l6M6Xl8N5&t+*8?@A7eTbs-y z{Z_);VK;3%eKVRZ(ve{y*IE1&xZ%TanN??BOZ~o5?KFv_kU&)dFxTr{tl#G5{kJMm}mSSKg+th^H@aMZ1`C;WGbTckxca`!qm za0S5ZC=a9kpz^}{Fgv~zsqtVhf?N6FBHj?Wj2}QoT1y-e7L;!eUN6o;g_m!c(6T&K z%NbgMZnO?+E7e9nF$N(zI0pMC2qU;(xLs^v4Rm71MK*GY<#U=QJa4rgg>SJ zX3L(|?IGjOP$%iqUQ!pR7biiMlI90sL%dGERUQz_-@X@jlT@`I!6l5bzkgOdGral) zLwf{2VZd6}O9^4f*uj3RUo^ygGTDKYo_Ue>CqKL<*f?72*w$r^*Nmsu5od4BkKiWCkIz3W z^O5EPDX3mA9y_@g5Sx;=S`IuwZ>6EMHkGM2LI}-Vj%2V#3qzy-nzF@eFim0esX|VS zD3|@=JSO33MZ5CX%QvbUlem+`!EmopANuyM@-ybTuE$G;8$@`kUyubn?wS|*$TJXB z2N80PF`?ufW@R8+8un`yU`ArPhk-)tD)S+MlwitlN}dE_Q8D^0ti7@Mc!_tQOhgA2 zVava`4j;i>^HyRVnryVmCdU6cG)5XJ90^;mZu2U9w_2SJpeXB*ORt}r9G|GO-pJ9C zL@pkm{P@*&kJm{h{ca9!@l4$C)Q3HUN7PkWaAR9eiQ0Si;CkuUXMDK z*@um{BsMFZZ`z_!En)r1p<8!^KvdO=QY8_uw~9_IV8C{DggT`w;<6wYD5j+OZfZIF z?4`|PG!owqAp5+Bc{kJ>t_UVp>b8ZYfKpTau|Q3AvUv<20SQiAc^)_QMxIP(F8o8V zk3|LLxr9Z>Df&dW>qiWL#QF;LzLU+<%tX+L@&mP0%~SH-r3`W7a)UjIZ%822=BszF z0rCkTqhhRxCu}fs+s`@X+x$7e`9|6?&D17 z6}9SmJV6X)`;`6BjJ!{Uz2Xq=V;ZsaL23Q(ZW*Ccqk%w7+0v zBY2%amla7L5@ezuocl@rHOwchVIwH7+&aFJjx4mUI&fb?9{rHSabtSR9H!tMh2FWO z;}duDG0}YZh`Oz8k30Tel_dSBm{{-6IBF#<;PNh%c-er40O)UMm-x&r>pEC1YKAR* z@{|54furP@xwjOzCfw__GrxE|x- zPw=e{Is*v)@XD=5rr_$TGAP=A?%FW-9Uh-~00w4$n0bX6Q@z2K$qz4>;BjKKZfGL2 z=C_P!X3UiWoP2^F)RjHE83C20v%LVCJ|^vW@g0G6ZC zVb{9tS?f4IW6-UY;IBK5mWJLxpP}A~i4OTW7q?vPVxb%NPZ^^e*xg6grBXzpsdfI`&=5&Nbwxfd|p>q)Do4}6X_Hv&^N@Wb0%;S4p z>w1%=25IG<+KuF^F)^{Szc108PBnMWTrvCqPePDZkWK)NaV%D9SK--ax^UEWI2;ed zbTKWQ;@FS!0`=EhS1T^w2F2z0)&U!gH38?o+stK;#D+f*~ zPmfY>;sf!YaXMSar$fVi&TKanl7mQ1+S^}2yLywuagoJi=+U6|{xCo6C2T16X$EMt zN~c2V=CIG&1Lqy}`>Nu(;phs|5a3^_bfk4rWzj!9wQIJJIyy$+ARxXJx68Jbs1#~k z*K&2-&PfC1QgZB$XK+mr%{uGGpJB6g$uyApl0B+r+7^qaqiutHf;IXh(hvylUXlcU z!;0O+tpJAOd1cVxoSOM0W;b>wRu#ShXUgy663#H(z~!8p_?@Hvan-Pe)xOLM#?NBN zsM!Q_Fisw7mP|P9Ub~Y^eU!!x?DZ_stO8Y*@e57>t!A|+wovW)1|P3T3us@Hk^H_S zP>SQHT)^y0ntwr63_QP)jYHdNg8^b?NM2dkD8CDg;?`9)xP&hl$%4W1;#Kd%7=+-$ zj;;4^Fbt7|%&Fl)JEnq2rUYoDM&D=Pfe~?I2M7O5tLkj<`q{L7z#A+ZsJShpAO%9e zCF0HdC%(Flw7kSb7i+llxkfZO>$b3VclQE!V!x)Tj=<>IDQ^N?{gcxVZY{zWrILB_X`VSfHQxPcpZB@cS!Sq!-y@)B08++<^6%{uv=L(g+6Y`Y|Mpb#n_=bD znC#X=cugrtwCkA9Es}-9>1egewYw1JnCi1;9+U!7=XvuXjSy`}`7tbzF_92UUEt(- zb8nUnIyxN4dgiWc-qwO1Ye>WF9w>9JOW@hfzDroDUR=oYSp;D4gFCVqH(Vuiy4Ori zn7etVW?XM(lY9v~tQTinMDGq{i!%LH@39=;8!&bWJNIvE$d!t^Sq>=k+ddhUp5bdD zndA;SIdD(ZarIJl%%!!g8Vf=hIroD_N{Qrb4Qum}MN0{dybwjg!1X|d$?$d4D zOH$SLG*^g!^5grMBapS2l*}FEO{<8H+nk`@hf8zJAxgc3OT*81@#Kxq)R<)M%7Va< z;`ukOv?=PXeuXISflQp-^L4q*{-zmrF_8sVe6DHmkHv$fAIkG;i$d>U{n}W3AnUZ) zxT?|m(ur(EPrR$@E+vz`c2>uR`EzGS$IM4P{TXXiNPukx1-ppxv_rK(Q6Z~Ip1v)r z>%U#tU0I^^DHhqcbrEPqD-Ot*rsOcMSulTcD7~8#N_!ioMIDi{ug-DYIUT~%{L=om zgH(NEOhqCh>YbXOECNkadB)PryLEqlTS^s4>_nlu_@L6kqvu`&PpV~zDA|yp3Xf~t z>tS%AGY8c4jRI%ZMWY)OiT^~im_6Blc`dF0OA zUh;F+D4`!MuuzW!(X%$E=MqaZ*RSu?@2}|vv8FptB37#DYLMJl-?#Bq{tiP)N?X~~ zZypUhEH=>(i^oXCO(8eVG;M~!+4u@IMegSAl^@jI%r#D71JTK^ap{Ro-dd0i%aUC< zCQzqK6j7HqLD1Mnx!Q_AGhg2EiY)(W70va!@qJ>+e%JrH;$BQ+mJLC2O+Zn;?LVcm zpx!QXsQ3Q%S$oG|BzKR>0t}PkaXVE=n%Ya1hGo*Ho+!yreM4E96P7H)VRcsJ!=1BPGO(6*vBjw&k+B%c#~zvC8f7S#2*Xak|6x`PY23CjUT=J0ay8r zfD)-}m0C5&Ip8``4kgLqHLU8$l7(Rr5K(Wu$mgTqiGU_)zvb0f&e%XKHU#_Ks}zmgAkYu1 zhOmE|RV0v$B(IGYjO0z618&ZFKOyewwNaG0sV}KdleHt}sVH2ySZTHY8nu8|q;lwE xARk7*+GD-?)x`9eFKXQTd{*#M{mx;!6dOr9rsEF%5BPf Date: Fri, 13 Mar 2020 19:24:07 +0100 Subject: [PATCH 395/434] feat(nuke): adding context_favorite --- pype/nuke/utils.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pype/nuke/utils.py b/pype/nuke/utils.py index 7583221696..c7f98efaea 100644 --- a/pype/nuke/utils.py +++ b/pype/nuke/utils.py @@ -3,6 +3,23 @@ import nuke from avalon.nuke import lib as anlib +def set_context_favorites(favorites={}): + """ Addig favorite folders to nuke's browser + + Argumets: + favorites (dict): couples of {name:path} + """ + dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + icon_path = os.path.join(dir, 'res', 'icons', 'folder-favorite.png') + + for name, path in favorites.items(): + nuke.addFavoriteDir( + name, + path, + nuke.IMAGE | nuke.SCRIPT | nuke.GEO, + icon=icon_path) + + def get_node_outputs(node): ''' Return a dictionary of the nodes and pipes that are connected to node From 13d1e6bf4261e1b9654b01956a37655b07311d00 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 13 Mar 2020 19:24:45 +0100 Subject: [PATCH 396/434] feat(nuke): adding bookmarks --- pype/nuke/__init__.py | 2 +- pype/nuke/lib.py | 28 ++++++++++++++++++++++++---- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py index f1f87e40c8..e775468996 100644 --- a/pype/nuke/__init__.py +++ b/pype/nuke/__init__.py @@ -93,11 +93,11 @@ def install(): # Set context settings. nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root") + nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root") menu.install() - def launch_workfiles_app(): '''Function letting start workfiles after start of host ''' diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index dedc42fa1d..3130717a75 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -15,13 +15,12 @@ import nuke from .presets import ( get_colorspace_preset, get_node_dataflow_preset, - get_node_colorspace_preset -) - -from .presets import ( + get_node_colorspace_preset, get_anatomy ) +from .utils import set_context_favorites + from pypeapp import Logger log = Logger().get_logger(__name__, "nuke") @@ -944,6 +943,27 @@ class WorkfileSettings(object): # add colorspace menu item self.set_colorspace() + def set_favorites(self): + projects_root = os.getenv("AVALON_PROJECTS") + work_dir = os.getenv("AVALON_WORKDIR") + asset = os.getenv("AVALON_ASSET") + project = os.getenv("AVALON_PROJECT") + hierarchy = os.getenv("AVALON_HIERARCHY") + favorite_items = OrderedDict() + + # project + favorite_items.update({"Projects root": projects_root}) + favorite_items.update({"Project dir": os.path.join( + projects_root, project).replace("\\", "/")}) + # shot + favorite_items.update({"Shot dir": os.path.join( + projects_root, project, + hierarchy, asset).replace("\\", "/")}) + # workdir + favorite_items.update({"Work dir": work_dir}) + + set_context_favorites(favorite_items) + def get_hierarchical_attr(entity, attr, default=None): attr_parts = attr.split('.') From ba55689b4339212e545a4c5da88800f5d362bd2a Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 13 Mar 2020 20:02:06 +0100 Subject: [PATCH 397/434] fall back to context handles if not present in instance --- pype/plugins/global/publish/collect_avalon_entities.py | 5 +++++ pype/plugins/global/publish/extract_burnin.py | 8 ++++++-- pype/plugins/global/publish/extract_review.py | 7 +++++-- 3 files changed, 16 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/collect_avalon_entities.py b/pype/plugins/global/publish/collect_avalon_entities.py index a429b3fc84..a4f14421f2 100644 --- a/pype/plugins/global/publish/collect_avalon_entities.py +++ b/pype/plugins/global/publish/collect_avalon_entities.py @@ -45,3 +45,8 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): context.data["projectEntity"] = project_entity context.data["assetEntity"] = asset_entity + + data = asset_entity['data'] + context.data['handles'] = int(data.get("handles", 0)) + context.data["handleStart"] = int(data.get( "handleStart", 0)) + context.data["handleEnd"] = int(data.get("handleEnd", 0)) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 3d5de28153..faecbb47a7 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -25,12 +25,16 @@ class ExtractBurnin(pype.api.Extractor): if "representations" not in instance.data: raise RuntimeError("Burnin needs already created mov to work on.") + context_data = instance.context.data + version = instance.data.get( 'version', instance.context.data.get('version')) frame_start = int(instance.data.get("frameStart") or 0) frame_end = int(instance.data.get("frameEnd") or 1) - handle_start = instance.data.get("handleStart") - handle_end = instance.data.get("handleEnd") + handle_start = instance.data.get("handleStart", + context_data.get("handleStart")) + handle_end = instance.data.get("handleEnd", + context_data.get("handleEnd")) duration = frame_end - frame_start + 1 prep_data = copy.deepcopy(instance.data["anatomyData"]) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index d03de6ad61..7f88a89004 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -31,11 +31,14 @@ class ExtractReview(pyblish.api.InstancePlugin): output_profiles = self.outputs or {} inst_data = instance.data + context_data = instance.context.data fps = float(inst_data.get("fps")) frame_start = inst_data.get("frameStart") frame_end = inst_data.get("frameEnd") - handle_start = inst_data.get("handleStart") - handle_end = inst_data.get("handleEnd") + handle_start = inst_data.get("handleStart", + context_data.get("handleStart")) + handle_end = inst_data.get("handleEnd", + context_data.get("handleEnd")) pixel_aspect = inst_data.get("pixelAspect", 1) resolution_width = inst_data.get("resolutionWidth", to_width) resolution_height = inst_data.get("resolutionHeight", to_height) From 300ea97f1ecbce5c6f73761cae469b948b862af8 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 13 Mar 2020 20:32:29 +0100 Subject: [PATCH 398/434] move handles collection to global plugins --- .../global/publish/collect_avalon_entities.py | 6 ++--- .../nuke/publish/collect_asset_info.py | 25 ------------------- 2 files changed, 3 insertions(+), 28 deletions(-) delete mode 100644 pype/plugins/nuke/publish/collect_asset_info.py diff --git a/pype/plugins/global/publish/collect_avalon_entities.py b/pype/plugins/global/publish/collect_avalon_entities.py index a4f14421f2..20899361c5 100644 --- a/pype/plugins/global/publish/collect_avalon_entities.py +++ b/pype/plugins/global/publish/collect_avalon_entities.py @@ -30,7 +30,7 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): assert project_entity, ( "Project '{0}' was not found." ).format(project_name) - self.log.debug("Collected Project entity \"{}\"".format(project_entity)) + self.log.debug("Collected Project \"{}\"".format(project_entity)) asset_entity = io.find_one({ "type": "asset", @@ -41,12 +41,12 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): "No asset found by the name '{0}' in project '{1}'" ).format(asset_name, project_name) - self.log.debug("Collected Asset entity \"{}\"".format(asset_entity)) + self.log.debug("Collected Asset \"{}\"".format(asset_entity)) context.data["projectEntity"] = project_entity context.data["assetEntity"] = asset_entity data = asset_entity['data'] context.data['handles'] = int(data.get("handles", 0)) - context.data["handleStart"] = int(data.get( "handleStart", 0)) + context.data["handleStart"] = int(data.get("handleStart", 0)) context.data["handleEnd"] = int(data.get("handleEnd", 0)) diff --git a/pype/plugins/nuke/publish/collect_asset_info.py b/pype/plugins/nuke/publish/collect_asset_info.py deleted file mode 100644 index 8a8791ec36..0000000000 --- a/pype/plugins/nuke/publish/collect_asset_info.py +++ /dev/null @@ -1,25 +0,0 @@ -from avalon import api, io -import pyblish.api - - -class CollectAssetInfo(pyblish.api.ContextPlugin): - """Collect framerate.""" - - order = pyblish.api.CollectorOrder - label = "Collect Asset Info" - hosts = [ - "nuke", - "nukeassist" - ] - - def process(self, context): - asset_data = io.find_one({ - "type": "asset", - "name": api.Session["AVALON_ASSET"] - }) - self.log.info("asset_data: {}".format(asset_data)) - - context.data['handles'] = int(asset_data["data"].get("handles", 0)) - context.data["handleStart"] = int(asset_data["data"].get( - "handleStart", 0)) - context.data["handleEnd"] = int(asset_data["data"].get("handleEnd", 0)) From 7bd4182c30243ecb0f7e3803eb3157f62859f658 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 13 Mar 2020 20:32:51 +0100 Subject: [PATCH 399/434] remove handles from ftrack frame range --- pype/plugins/global/publish/extract_review.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 7f88a89004..fa29fd2fe0 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -12,7 +12,8 @@ class ExtractReview(pyblish.api.InstancePlugin): otherwise the representation is ignored. All new represetnations are created and encoded by ffmpeg following - presets found in `pype-config/presets/plugins/global/publish.json:ExtractReview:outputs`. To change the file extension + presets found in `pype-config/presets/plugins/global/ + publish.json:ExtractReview:outputs`. To change the file extension filter values use preset's attributes `ext_filter` """ @@ -171,7 +172,8 @@ class ExtractReview(pyblish.api.InstancePlugin): # exclude handle if no handles defined if no_handles: - frame_start += handle_start + frame_start_no_handles = frame_start + handle_start + frame_end_no_handles = frame_end - handle_end input_args.append( "-start_number {0} -framerate {1}".format( @@ -180,6 +182,8 @@ class ExtractReview(pyblish.api.InstancePlugin): if no_handles: start_sec = float(handle_start) / fps input_args.append("-ss {:0.2f}".format(start_sec)) + frame_start_no_handles += handle_start + frame_end_no_handles -= handle_end input_args.append("-i {}".format(full_input_path)) @@ -379,7 +383,7 @@ class ExtractReview(pyblish.api.InstancePlugin): "codec": codec_args, "_profile": profile, "resolutionHeight": resolution_height, - "resolutionWidth": resolution_width, + "resolutionWidth": resolution_width }) if is_sequence: repre_new.update({ @@ -388,7 +392,9 @@ class ExtractReview(pyblish.api.InstancePlugin): }) if no_handles: repre_new.update({ - "outputName": name + "_noHandles" + "outputName": name + "_noHandles", + "startFrameReview": frame_start_no_handles, + "endFrameReview": frame_end_no_handles }) if repre_new.get('preview'): repre_new.pop("preview") From 47f04e0fbaf3acf2f8cc1302674573f1aa3c6f6a Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 13 Mar 2020 20:41:28 +0100 Subject: [PATCH 400/434] fix frame range on review --- pype/plugins/global/publish/extract_review.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index fa29fd2fe0..23e582edd2 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -182,8 +182,8 @@ class ExtractReview(pyblish.api.InstancePlugin): if no_handles: start_sec = float(handle_start) / fps input_args.append("-ss {:0.2f}".format(start_sec)) - frame_start_no_handles += handle_start - frame_end_no_handles -= handle_end + frame_start_no_handles = frame_start + handle_start + frame_end_no_handles = frame_end - handle_end input_args.append("-i {}".format(full_input_path)) From 562e59880a118410604b3b4e160d1d9d8999c30f Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 13 Mar 2020 23:49:03 +0100 Subject: [PATCH 401/434] use frameEndHandles for review publish --- pype/plugins/nuke/publish/collect_writes.py | 8 ++++---- pype/plugins/nuke/publish/extract_render_local.py | 4 ++-- pype/plugins/nuke/publish/validate_rendered_frames.py | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 993b8574f5..0dc7c81fae 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -36,7 +36,6 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): output_type = "mov" # Get frame range - handles = instance.context.data['handles'] handle_start = instance.context.data["handleStart"] handle_end = instance.context.data["handleEnd"] first_frame = int(nuke.root()["first_frame"].getValue()) @@ -46,7 +45,6 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): ) if node["use_limit"].getValue(): - handles = 0 first_frame = int(node["first"].getValue()) last_frame = int(node["last"].getValue()) @@ -134,8 +132,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "label": label, "handleStart": handle_start, "handleEnd": handle_end, - "frameStart": first_frame, - "frameEnd": last_frame, + "frameStart": first_frame + handle_start, + "frameEnd": last_frame - handle_end, + "frameStartHandle": first_frame, + "frameEndHandle": last_frame, "outputType": output_type, "family": "write", "families": families, diff --git a/pype/plugins/nuke/publish/extract_render_local.py b/pype/plugins/nuke/publish/extract_render_local.py index 9b8baa468b..5467d239c2 100644 --- a/pype/plugins/nuke/publish/extract_render_local.py +++ b/pype/plugins/nuke/publish/extract_render_local.py @@ -27,13 +27,13 @@ class NukeRenderLocal(pype.api.Extractor): self.log.debug("instance collected: {}".format(instance.data)) - first_frame = instance.data.get("frameStart", None) + first_frame = instance.data.get("frameStartHandle", None) # exception for slate workflow if "slate" in instance.data["families"]: first_frame -= 1 - last_frame = instance.data.get("frameEnd", None) + last_frame = instance.data.get("frameEndHandle", None) node_subset_name = instance.data.get("name", None) self.log.info("Starting render") diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py index 8a8bf3cc5e..6e9b91dd72 100644 --- a/pype/plugins/nuke/publish/validate_rendered_frames.py +++ b/pype/plugins/nuke/publish/validate_rendered_frames.py @@ -51,7 +51,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): collection = collections[0] frame_length = int( - instance.data["frameEnd"] - instance.data["frameStart"] + 1 + instance.data["frameEndHandle"] - instance.data["frameStartHandle"] + 1 ) if frame_length != 1: From 9265f20cc7cd440746543e7d6bc8e299045a4725 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 13 Mar 2020 23:50:45 +0100 Subject: [PATCH 402/434] use frame range with handles in global review plugins --- pype/plugins/global/publish/extract_burnin.py | 19 ++++++----- pype/plugins/global/publish/extract_review.py | 33 ++++++++++--------- 2 files changed, 29 insertions(+), 23 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index faecbb47a7..1251e5c02f 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -35,17 +35,20 @@ class ExtractBurnin(pype.api.Extractor): context_data.get("handleStart")) handle_end = instance.data.get("handleEnd", context_data.get("handleEnd")) - duration = frame_end - frame_start + 1 + + frame_start_handle = frame_start - handle_start + frame_end_handle = frame_end + handle_end + duration = frame_end_handle - frame_start_handle + 1 prep_data = copy.deepcopy(instance.data["anatomyData"]) if "slate.farm" in instance.data["families"]: - frame_start += 1 + frame_start_handle += 1 duration -= 1 prep_data.update({ - "frame_start": frame_start, - "frame_end": frame_end, + "frame_start": frame_start_handle, + "frame_end": frame_end_handle, "duration": duration, "version": int(version), "comment": instance.context.data.get("comment", ""), @@ -99,13 +102,13 @@ class ExtractBurnin(pype.api.Extractor): _prep_data["anatomy"] = filled_anatomy.get_solved() # copy frame range variables - frame_start_cp = frame_start - frame_end_cp = frame_end + frame_start_cp = frame_start_handle + frame_end_cp = frame_end_handle duration_cp = duration if no_handles: - frame_start_cp = frame_start + handle_start - frame_end_cp = frame_end - handle_end + frame_start_cp = frame_start + frame_end_cp = frame_end duration_cp = frame_end_cp - frame_start_cp + 1 _prep_data.update({ "frame_start": frame_start_cp, diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 23e582edd2..abe3d36758 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -166,30 +166,33 @@ class ExtractReview(pyblish.api.InstancePlugin): # necessary input data # adds start arg only if image sequence + + frame_start_handle = frame_start - handle_start + frame_end_handle = frame_end + handle_end if isinstance(repre["files"], list): - if frame_start != repre.get("detectedStart", frame_start): - frame_start = repre.get("detectedStart") + if frame_start_handle != repre.get("detectedStart", frame_start_handle): + frame_start_handle = repre.get("detectedStart") # exclude handle if no handles defined if no_handles: - frame_start_no_handles = frame_start + handle_start - frame_end_no_handles = frame_end - handle_end + frame_start_handle = frame_start + frame_end_handle = frame_end input_args.append( "-start_number {0} -framerate {1}".format( - frame_start, fps)) + frame_start_handle, fps)) else: if no_handles: start_sec = float(handle_start) / fps input_args.append("-ss {:0.2f}".format(start_sec)) - frame_start_no_handles = frame_start + handle_start - frame_end_no_handles = frame_end - handle_end + frame_start_handle = frame_start + frame_end_handle = frame_end input_args.append("-i {}".format(full_input_path)) for audio in instance.data.get("audio", []): offset_frames = ( - instance.data.get("startFrameReview") - + instance.data.get("frameStartFtrack") - audio["offset"] ) offset_seconds = offset_frames / fps @@ -264,10 +267,8 @@ class ExtractReview(pyblish.api.InstancePlugin): output_args.append("-shortest") if no_handles: - duration_sec = float( - (frame_end - ( - frame_start + handle_start - ) + 1) - handle_end) / fps + duration_sec = float(frame_end_handle - frame_start_handle + 1) / fps + output_args.append("-t {:0.2f}".format(duration_sec)) # output filename @@ -383,7 +384,9 @@ class ExtractReview(pyblish.api.InstancePlugin): "codec": codec_args, "_profile": profile, "resolutionHeight": resolution_height, - "resolutionWidth": resolution_width + "resolutionWidth": resolution_width, + "frameStartFtrack": frame_start_handle, + "frameEndFtrack": frame_end_handle }) if is_sequence: repre_new.update({ @@ -393,8 +396,8 @@ class ExtractReview(pyblish.api.InstancePlugin): if no_handles: repre_new.update({ "outputName": name + "_noHandles", - "startFrameReview": frame_start_no_handles, - "endFrameReview": frame_end_no_handles + "frameStartFtrack": frame_start, + "frameEndFtrack": frame_end }) if repre_new.get('preview'): repre_new.pop("preview") From 0c296ae14db22f7b961b7c8784cafee2ac2d254e Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 13 Mar 2020 23:51:44 +0100 Subject: [PATCH 403/434] rename startFrameReview to frameStartFtrack --- pype/plugins/ftrack/publish/integrate_ftrack_instances.py | 4 ++-- pype/plugins/maya/publish/extract_quicktime.py | 8 ++------ .../plugins/standalonepublisher/publish/extract_review.py | 4 ++-- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py index 78583b0a2f..ec57f46d61 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py @@ -73,9 +73,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ''' start_frame = 0 end_frame = 1 - if 'endFrameReview' in comp and 'startFrameReview' in comp: + if 'frameEndFtrack' in comp and 'frameStartFtrack' in comp: end_frame += ( - comp['endFrameReview'] - comp['startFrameReview'] + comp['frameEndFtrack'] - comp['frameStartFtrack'] ) else: end_frame += ( diff --git a/pype/plugins/maya/publish/extract_quicktime.py b/pype/plugins/maya/publish/extract_quicktime.py index 94b5a716a2..29d6b78051 100644 --- a/pype/plugins/maya/publish/extract_quicktime.py +++ b/pype/plugins/maya/publish/extract_quicktime.py @@ -33,17 +33,13 @@ class ExtractQuicktime(pype.api.Extractor): # if start and end frames cannot be determined, get them # from Maya timeline - start = instance.data.get("startFrameReview") - end = instance.data.get("endFrameReview") + start = instance.data.get("frameStartFtrack") + end = instance.data.get("frameEndFtrack") if start is None: start = cmds.playbackOptions(query=True, animationStartTime=True) if end is None: end = cmds.playbackOptions(query=True, animationEndTime=True) self.log.info("start: {}, end: {}".format(start, end)) - handles = instance.data.get("handles", 0) - if handles: - start -= handles - end += handles # get cameras camera = instance.data['review_camera'] diff --git a/pype/plugins/standalonepublisher/publish/extract_review.py b/pype/plugins/standalonepublisher/publish/extract_review.py index 66cdcdf4df..36793d4c62 100644 --- a/pype/plugins/standalonepublisher/publish/extract_review.py +++ b/pype/plugins/standalonepublisher/publish/extract_review.py @@ -170,8 +170,8 @@ class ExtractReviewSP(pyblish.api.InstancePlugin): "stagingDir": out_stagigng_dir, "tags": new_tags, "outputName": name, - "startFrameReview": 1, - "endFrameReview": video_len + "frameStartFtrack": 1, + "frameEndFtrack": video_len }) # cleanup thumbnail from new repre if repre_new.get("thumbnail"): From fb915f2366dd00a10443411af64122ae03bb3042 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 13 Mar 2020 23:52:17 +0100 Subject: [PATCH 404/434] use frame range with handles in maya --- pype/plugins/maya/publish/collect_instances.py | 13 +++++++++---- pype/plugins/maya/publish/collect_render.py | 14 ++++++++------ pype/plugins/maya/publish/collect_review.py | 10 ++++++---- pype/plugins/maya/publish/submit_maya_deadline.py | 6 +++--- 4 files changed, 26 insertions(+), 17 deletions(-) diff --git a/pype/plugins/maya/publish/collect_instances.py b/pype/plugins/maya/publish/collect_instances.py index 39d7bcd86d..5af717ba4d 100644 --- a/pype/plugins/maya/publish/collect_instances.py +++ b/pype/plugins/maya/publish/collect_instances.py @@ -103,16 +103,22 @@ class CollectInstances(pyblish.api.ContextPlugin): # Store the exact members of the object set instance.data["setMembers"] = members - # Define nice label name = cmds.ls(objset, long=False)[0] # use short name label = "{0} ({1})".format(name, data["asset"]) + if "handles" in data: + data["handleStart"] = data["handles"] + data["handleEnd"] = data["handles"] + # Append start frame and end frame to label if present if "frameStart" and "frameEnd" in data: - label += " [{0}-{1}]".format(int(data["frameStart"]), - int(data["frameEnd"])) + data["frameStartHandle"] = data["frameStart"] - data["handleStart"] + data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"] + + label += " [{0}-{1}]".format(int(data["frameStartHandle"]), + int(data["frameEndHandle"])) instance.data["label"] = label @@ -122,7 +128,6 @@ class CollectInstances(pyblish.api.ContextPlugin): # user interface interested in visualising it. self.log.info("Found: \"%s\" " % instance.data["name"]) self.log.debug("DATA: \"%s\" " % instance.data) - def sort_by_family(instance): """Sort by family""" diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index f31198448b..f3ea1ccee5 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -211,17 +211,19 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "attachTo": attachTo, "setMembers": layer_name, "publish": True, - "frameStart": int(self.get_render_attribute("startFrame", + "frameStart": int(context.data["assetEntity"]['data']['frameStart']), + "frameEnd": int(context.data["assetEntity"]['data']['frameEnd']), + "frameStartHandle": int(self.get_render_attribute("startFrame", layer=layer_name)), - "frameEnd": int(self.get_render_attribute("endFrame", + "frameEndHandle": int(self.get_render_attribute("endFrame", layer=layer_name)), "byFrameStep": int( self.get_render_attribute("byFrameStep", layer=layer_name)), "renderer": self.get_render_attribute("currentRenderer", layer=layer_name), - "handleStart": context.data["assetEntity"]['data']['handleStart'], - "handleEnd": context.data["assetEntity"]['data']['handleEnd'], + "handleStart": int(context.data["assetEntity"]['data']['handleStart']), + "handleEnd": int(context.data["assetEntity"]['data']['handleEnd']), # instance subset "family": "renderlayer", @@ -259,8 +261,8 @@ class CollectMayaRender(pyblish.api.ContextPlugin): # Define nice label label = "{0} ({1})".format(expected_layer_name, data["asset"]) - label += " [{0}-{1}]".format(int(data["frameStart"]), - int(data["frameEnd"])) + label += " [{0}-{1}]".format(int(data["frameStartHandle"]), + int(data["frameEndHandle"])) instance = context.create_instance(expected_layer_name) instance.data["label"] = label diff --git a/pype/plugins/maya/publish/collect_review.py b/pype/plugins/maya/publish/collect_review.py index 18eee78a9c..9b6027b98d 100644 --- a/pype/plugins/maya/publish/collect_review.py +++ b/pype/plugins/maya/publish/collect_review.py @@ -54,8 +54,10 @@ class CollectReview(pyblish.api.InstancePlugin): self.log.debug('adding review family to {}'.format(reviewable_subset)) data['review_camera'] = camera # data["publish"] = False - data['startFrameReview'] = instance.data["frameStart"] - data['endFrameReview'] = instance.data["frameEnd"] + data['frameStartFtrack'] = instance.data["frameStartHandle"] + data['frameEndFtrack'] = instance.data["frameEndHandle"] + data['frameStartHandle'] = instance.data["frameStartHandle"] + data['frameEndHandle'] = instance.data["frameEndHandle"] data["frameStart"] = instance.data["frameStart"] data["frameEnd"] = instance.data["frameEnd"] data['handles'] = instance.data['handles'] @@ -69,8 +71,8 @@ class CollectReview(pyblish.api.InstancePlugin): else: instance.data['subset'] = task + 'Review' instance.data['review_camera'] = camera - instance.data['startFrameReview'] = instance.data["frameStart"] - instance.data['endFrameReview'] = instance.data["frameEnd"] + instance.data['frameStartFtrack'] = instance.data["frameStartHandle"] + instance.data['frameEndFtrack'] = instance.data["frameEndHandle"] # make ftrack publishable instance.data["families"] = ['ftrack'] diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index bd8497152e..7547f34ba1 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -234,8 +234,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "Plugin": instance.data.get("mayaRenderPlugin", "MayaBatch"), "Frames": "{start}-{end}x{step}".format( - start=int(instance.data["frameStart"]), - end=int(instance.data["frameEnd"]), + start=int(instance.data["frameStartHandle"]), + end=int(instance.data["frameEndHandle"]), step=int(instance.data["byFrameStep"]), ), @@ -340,7 +340,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): def preflight_check(self, instance): """Ensure the startFrame, endFrame and byFrameStep are integers""" - for key in ("frameStart", "frameEnd", "byFrameStep"): + for key in ("frameStartHandle", "frameEndHandle", "byFrameStep"): value = instance.data[key] if int(value) == value: From 2017d2315a54a477b324d25e6a0d85e895471967 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 13 Mar 2020 23:52:35 +0100 Subject: [PATCH 405/434] change workfile subset naming in nuke to match other hosts --- pype/plugins/nuke/publish/collect_workfile.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nuke/publish/collect_workfile.py b/pype/plugins/nuke/publish/collect_workfile.py index 9c01a3ec97..b95edf0a93 100644 --- a/pype/plugins/nuke/publish/collect_workfile.py +++ b/pype/plugins/nuke/publish/collect_workfile.py @@ -23,11 +23,12 @@ class CollectWorkfile(pyblish.api.ContextPlugin): add_publish_knob(root) family = "workfile" + task = os.getenv("AVALON_TASK", None) # creating instances per write node file_path = context.data["currentFile"] staging_dir = os.path.dirname(file_path) base_name = os.path.basename(file_path) - subset = "{0}_{1}".format(os.getenv("AVALON_TASK", None), family) + subset = family + task.capitalize() # Get frame range first_frame = int(root["first_frame"].getValue()) From bbf03cc11939c9f1fdc013b3be5a247b5d9e1786 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Fri, 13 Mar 2020 23:52:55 +0100 Subject: [PATCH 406/434] use handles correctly in deadline job submitter --- pype/plugins/global/publish/submit_publish_job.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 3ad7805fe7..47c0272254 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -355,8 +355,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): aov) staging = os.path.dirname(list(cols[0])[0]) - start = int(instance_data.get("frameStart")) - end = int(instance_data.get("frameEnd")) self.log.info("Creating data for: {}".format(subset_name)) @@ -377,8 +375,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "name": ext, "ext": ext, "files": [os.path.basename(f) for f in list(cols[0])], - "frameStart": start, - "frameEnd": end, + "frameStart": int(instance_data.get("frameStartHandle")), + "frameEnd": int(instance_data.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames "stagingDir": staging, "anatomy_template": "render", @@ -413,8 +411,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ representations = [] - start = int(instance.get("frameStart")) - end = int(instance.get("frameEnd")) cols, rem = clique.assemble(exp_files) bake_render_path = instance.get("bakeRenderPath") @@ -442,8 +438,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "name": ext, "ext": ext, "files": [os.path.basename(f) for f in list(c)], - "frameStart": start, - "frameEnd": end, + "frameStart": int(instance.get("frameStartHandle")), + "frameEnd": int(instance.get("frameEndHandle")), # If expectedFile are absolute, we need only filenames "stagingDir": os.path.dirname(list(c)[0]), "anatomy_template": "render", @@ -577,6 +573,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "frameEnd": end, "handleStart": handle_start, "handleEnd": handle_end, + "frameStartHandle": start - handle_start, + "frameEndHandle": end + handle_end, "fps": fps, "source": source, "extendFrames": data.get("extendFrames"), From 394f341fb8d904c98a902d0df1da2c298ba28579 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Sat, 14 Mar 2020 00:09:22 +0100 Subject: [PATCH 407/434] use frameStartHandle in nuke rendering --- pype/nuke/lib.py | 4 ++-- pype/plugins/nuke/publish/submit_nuke_deadline.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index dedc42fa1d..e7720c747c 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -1350,8 +1350,8 @@ class ExporterReview: else: self.fname = os.path.basename(self.path_in) self.fhead = os.path.splitext(self.fname)[0] + "." - self.first_frame = self.instance.data.get("frameStart", None) - self.last_frame = self.instance.data.get("frameEnd", None) + self.first_frame = self.instance.data.get("frameStartHandle", None) + self.last_frame = self.instance.data.get("frameEndHandle", None) if "#" in self.fhead: self.fhead = self.fhead.replace("#", "")[:-1] diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index ee7432e241..0a9ef33398 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -41,8 +41,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self._ver = re.search(r"\d+\.\d+", context.data.get("hostVersion")) self._deadline_user = context.data.get( "deadlineUser", getpass.getuser()) - self._frame_start = int(instance.data["frameStart"]) - self._frame_end = int(instance.data["frameEnd"]) + self._frame_start = int(instance.data["frameStartHandle"]) + self._frame_end = int(instance.data["frameEndHandle"]) # get output path render_path = instance.data['path'] From 90f55bcb3d9b30b7cf163d3ed345b4daeb9d0ef4 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Sat, 14 Mar 2020 20:24:56 +0100 Subject: [PATCH 408/434] use frameStartFtrack in hiero --- pype/plugins/nukestudio/publish/collect_reviews.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index af8fd4a0e7..7cf8d77de4 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -78,6 +78,8 @@ class CollectReviews(api.InstancePlugin): file_dir = os.path.dirname(file_path) file = os.path.basename(file_path) ext = os.path.splitext(file)[-1][1:] + handleStart = rev_inst.data.get("handleStart") + handleEnd = rev_inst.data.get("handleEnd") # change label instance.data["label"] = "{0} - {1} - ({2}) - review".format( @@ -86,13 +88,14 @@ class CollectReviews(api.InstancePlugin): self.log.debug("Instance review: {}".format(rev_inst.data["name"])) - # adding representation for review mov representation = { "files": file, "stagingDir": file_dir, "frameStart": rev_inst.data.get("sourceIn"), "frameEnd": rev_inst.data.get("sourceOut"), + "frameStartFtrack": rev_inst.data.get("sourceIn") - handleStart, + "frameEndFtrack": rev_inst.data.get("sourceOut") + handleEnd, "step": 1, "fps": rev_inst.data.get("fps"), "preview": True, From e86f1725bfbcdddad71965ca9a30233238c7886a Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Sat, 14 Mar 2020 21:56:12 +0100 Subject: [PATCH 409/434] add new icon and remove projects folder --- pype/nuke/lib.py | 1 - pype/nuke/utils.py | 2 +- res/icons/folder-favorite2.png | Bin 0 -> 22430 bytes res/icons/folder-favorite3.png | Bin 0 -> 7957 bytes 4 files changed, 1 insertion(+), 2 deletions(-) create mode 100644 res/icons/folder-favorite2.png create mode 100644 res/icons/folder-favorite3.png diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 3130717a75..3bbd277ae6 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -952,7 +952,6 @@ class WorkfileSettings(object): favorite_items = OrderedDict() # project - favorite_items.update({"Projects root": projects_root}) favorite_items.update({"Project dir": os.path.join( projects_root, project).replace("\\", "/")}) # shot diff --git a/pype/nuke/utils.py b/pype/nuke/utils.py index c7f98efaea..aa5bc1077e 100644 --- a/pype/nuke/utils.py +++ b/pype/nuke/utils.py @@ -10,7 +10,7 @@ def set_context_favorites(favorites={}): favorites (dict): couples of {name:path} """ dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) - icon_path = os.path.join(dir, 'res', 'icons', 'folder-favorite.png') + icon_path = os.path.join(dir, 'res', 'icons', 'folder-favorite3.png') for name, path in favorites.items(): nuke.addFavoriteDir( diff --git a/res/icons/folder-favorite2.png b/res/icons/folder-favorite2.png new file mode 100644 index 0000000000000000000000000000000000000000..91bc3f0fbedc10687402390cf90e285f3394e2c0 GIT binary patch literal 22430 zcmbTd1z23cvLK8kxI4k!ZEz3n5Q0OHnPG5umju@U!7V@tE`xg@xCe*e9^Bn_a^HRT z$-ez}zn_mebEZ#qRdrQ$b#--}$am@rm}qa%;Najel@w*)!@_lN- zs7{Iou5hod$o_ud%hr`x;ox4MTWjmP>8q)VfF12Qf#!}N2&b356AT&-PE69v2?(}@ zxKV>3mevm9v?r}UXsNBu#cB2U)wtE1WFS`7iast7EgyAlu#YWR*ql~Uf?CW=1ctyK z;s&JlvbS?^74Z_M{RdtV*!|yTE?Vk;fVkO;(@Om%NUg8-j#|di1wzfwDZl~d;S-`3 z6z1d+;N#{MV5bIf^9XZs3vu!Aa&YsAa0`g=3sV2{j}}JF#oR*Vy{!B{X~BMp(^|Q? zIf-y_d3t(sdh&8Qx>$1Y2n)k-0Js1E4j2T7tG9z2(2K*tmG0j$$U7oudnkFpN9i|J<%5 zBlFJ1(Zbpe_CP@iW{MzpYdbd%YX@rA_X^Vg;8#seM9IO`4d?)dD9MV`!lc7#ZEY?B z5(0DcfGzkqgaACk90L4&AP%4)zc2^C0EAb-f?q(u!W{hX{be1&?tg9IZ~uRr4(5(v z7>@to4HmkB{2O&w zM+-MkpbJFG5+=LN z+P{P0`srl90}Qnb%uSck(}UsQG~X-9 zN@;ujK5F;Qa$7K1z40Gw*I9n5yco0kMw*Aop%^6l>mv>znj7%(lqVCO>6wIV;dx%^ zu*X?(BR(}AU$ln@x+&C$r1BO{w1Hih6oZb`oHXQDWddIY@S6c2o%OZ%?s98u>9M%R zb5mofV}-Hr-1&ntzoV!7jPaQG?p^k7)~;Cp5(P~N4v6vIsX2R%xP1@j2=XR) zF*LuGvxk}tD%qIjW#gY2(-S3Lv!=-)yXfL3hN@Ac*dNn&4IUR_JKlCUR-m5zb3@6j> zh6#_Kk6zuD>jQ1EbgX&jr1A1@R8~C-WEq2Uz1659T>?Kl4OV@jZCu@v+n%4OlC2$^ z(>zlfu~}Z$@^DOQgU2t5iM+Od+Z9iooK)O=AQ;3Er3yo@k1tyi=rda3;uHARas9>M z58)(m?oijqdR8mY8`p`~sjC_tK7eZ1A&Ur)C~`>Ni`CkS@v7afis=2hF;FRps%oOa9HY)fD|4Hgh}w z!{`I=9R;c~c&^tuwx>ExxuZrE3Im%{7Cyi zfa*JiNi)%6F+iduoWHMseC=5$PxnDS@%Lt$k~Tdn*)R2Sdocw43X&#Iro*g)oj!*k z3w*-`DIP)eSB*nr5uR##BD$BudV@q)V;}GLKP)NDAKu;4t@wY&g|zTIkAo;@(Vbrk zngTIyn?_1yA&ATmL(#eRyX@|bwBM$)VzNlc(qv9S0ll>Pa@u|nH3i3@E)QPG zMIn>sj3Mv3Muc(XvB7Uqu{eZ?OCT2JyxKT8^lH~IAOBRtfVkgXq?N{jVY(NrN>(_7 zJm{~ue#W(bh+oUgn!a%uePb0Khh*~ojq-&}OtlWOKfhDMq-aZyh@t20t z5>4E{UE1zt0 zq_ex82lzzKk*!pe%SkJ0hmoR3E9h3V?^k~KlYE@DuG*B{b8U5G+7FKNWsh|aGa_#Z z^PXt|ACME-Uj*!+`zssH2-az1RD35U<7CZljvQKwBK`=X=`B|mUuJ+BV|7Dn`XqQH z>HFk)8aJFz^DU?xDhcXEK81Tr%ddv{jUrc=H0m51mMda+Y4d@=;>k4!TLwJefDm(i zRkYwdBqSyAcQ>Fj^NU}DM&9f7+|-+)RnoqsT*zN=VC)jDr}-M8CMw_5VhDRsmn0o$ zF{&XDAyhnt$fJWOQD+S*qpgS_u^D^%qLfD{*!vZs`eiZ3+YlqC z(+C5K$&lrC@%^hzl!w&$A7=zSKcqmUYrV9C>gM_qo5@$|NO~Vpmq;=QoxUOBho{PH z--0dKkI9C~p2ewn?Q0Bp9+HHzr~gd+uKh6cYwsDu*uPtI#ed1hoA8IHMyDs0i|qA^ zhqCX9H+_5LmBLbOtQXPp_N=MIr{$8yJjM6YE!`#buRXD@X~u7q%60fSheii^ecsO2?~K@mj`YQX>@)VfP)~&iU2AHyiK{YB6>}OwM*MqJ)nP(h29I zolnQ~Yf{1e67zNj9}A0$bJdg*zw`mK`wp{hTA8i!Gr-;Rt|Z^wtN-znrTNk$C8AFK6sst91Q*5iz$sZ$TFFYfsK_X zU)zig4F%F~i>V~U)KEOtbd+`St#iN@SByPpBNMD`eYBmPPmJCNL3JG}cn+)>fy{F` z`ge|&8V_yN)f~tOdnFg&Jfe2?E^eY{%7j+34Zbe!6}T^`YG(ZVL7A_Cz}}VZtqMxykgNwIkqyo; zKjRFgou&G>cBI0&Kl-~=jxRV}>@?7+5FceBLLvI189N*?vm9L_T6If5$o%)YFHnnS z)^!z!m3o#;u`dR>L4G9Ay#sBLE|bFD3H}pYpEun>p|M@my7_say;w8-H^sRCvB+B~ zYDufNG5Qg2v52B)W@af}cn13rc&2h#R9JT5g-@escp{_b5i}!@U)>b&WB5ySeT)5? z6(ks?bAP4vt*cmz>0al&n@osQzs%3tNiOTHsq#K6J!`a$!KcYm+3y;SlP2s2$1@<7 zlpLPjbo+oCp)uKqjO?gB>SKP`rHR6SE8B9BhB z%f)*jI+`T;g!saoN4jJ@pFYphh|9beoZR7DkfIg<(2gp@_aD1ro|-ZOUP=5+e{-LV z8;K*>$2xB%I!7naM}TbxiQW$p1824np+^q+kP_F{9o9F9$Iy67wLP|mpRs!i7aRzU z(a;AkAp&>V2ah>Lefc*aR+J`8H!>f$<@CRYS4IMb(B|xXIV8Rxkuc)SQs$7&=aDBH zdwLPASc^Vwh9SrM8W(gNZ1eKe7Q~1N7>YfFx{bsPYdzaj2E)zN)KG$tXO=t!UVdEx z#|NS6KJ+vKez&%YZ&HMH0xk;jkeK$ivPuhgQ7jQ|dF$THIY~0_8s5k1yWgs)xk!YK zk2Ltf@6r;jX4sU>duTvLCO&cxlcTSp5_M0!GishPH`G`V)k3s!@*=(IcA4?#83%1; zXc%+{3|U0Y2=PAP;X}9cH&%Gxn(LfZ_tp)ML8JZp>X$FR)l-}wuvC0v+ww*)iriIO zqQ18KW1eyH;pq9LPl9|#ghtaqdfe`VPkYv{bcVRMik`*U%|Yp|UQQJ8xY(pT7IMY4 zZ|cMdVAJzJRYN4D8AThSH{d}XM2Nq095-{L_(bG^ag81IErk*gac0HPa) z`PXC3{4u!K9>lPGBVN7#D`P5>;~ee|i@x<+D{4`qh=Io^`OP<@JN)c?)m9{^a^z|~ z>;)(hm6(`pLQwPAbA#TudSeUMEQZkfoFUp#eC+~rw5o1&1x5n0z?pp(e#z*glMN;5 z*%sN$Hr2cn2xX1#-j8XE)-O^UOxpCWT(VT}UP5K%MifcRs)zERzu+?Dz8M%IYRoUF z1-%L9DNHt{nWTp5S4x#=9wO0&Y#AtO&xXve77FGDmKt76UNCFz^~33QOG{Uzbe4pe z1($oLr=1WlQu$Ihm1K`CrY?O(y}2GJlIIPTg}7%xSXhsA`j5PEB!5R*Rc`3)jr6+E zacL+`YSAkVsaMF)E^Rx#Sl4vhrpfSiTNp^v-Cr%cQAOYAesjZ1u4n)hT5ilHX?jU@ zBS0UQhnn#H1zyTm@lYU9^AZXq2Iio>VBjvjO|apUDAshU6{z?)d$U38A0jh zZRDs8KLiVZPkMCLzl%Stjq$Od3v&vWOu{X+?N1JenTe*s_xnpS`fyPAK!JU9bVIYs z`~p}|T;;fxocsg*4azKSlTOBoU+vF0_2?7cl?%RZ7P^t9c#&m*w2?p7mD$AZ1xuBT za7TzNq`R(&oCux&dQ2jGDdy7?c|0<`IeiM@&zc?kKo?|-54bZ;9WWyc^+B2l2!-$EAz&oQ{)Wlu$(5=292;seZPubHJ|h=w*Ctpq|%+pxLMFC-ny zeaPuf*vF2Z8AaPLm9d#%J5lze>z3eo{xz!oqT-XRKIvGCd75L>P%o66WR~0ZhX-`- zjo(c+m^R~+xe2r^F%LS^U1$Q2Kh?Wt zkFG!*$k-x>(R^Dx5bTwPKhU!-F7N-2WZ2wCSFwyWXU@V!^h@PiX2=o=WoYFi;|CE@ zul7rOYott2gm9ax&`IHb+AsNB9HC>XW~FfOCwiG4G5FQw+r;nmqWt>{lnYmHM#+dH zp_<==B5`Wo^CflY{-`u!8`H&l@t8V5m5N&6U`X!S+JT8a>(G z?x@-;A{vKzYCvS)*%~jonq>jo{tpeV1MiHlgks@GI}-tu=pRLYvFHCt8+i82*6rd;=L7upd~^t^UvA)VoK+O}Ef-X1trK0E0#O~0 zW$bXo*&5gzCRNT>{s@v{!SV|b!+N2in|j^UBMPqVKke@#WW{>gP<5Gci^;Qpi8CnwK55Ch_hBg;N5HW#_aoxpc!@Yky=LHb)DR$vqLr8|$eYwL zi%g%w#y13J*U-0Rgx36sIs9kyv_AN}{cX(HWHRsic*>8GFQeaZ3Lh6Q2cwW3SEIpZ zJRu%Y2ggZG?B|CGrxeTH@pPGt#KUFTVg*Gl!*IcfI|=i+{unEhv0?JSt^+fDViqfq zzMSKDUda*zE9fjNpaiK>b(0D}&DO(k{5BC(fANjzG;PujuFDwN=daL*#xx|=d%no zJ363%pxy`r_C~^EwIf=Ks;Jo#z`@2@g|?}ZqPFh0eQe@PkF6sla{x1A>j&v%+imB0sTfXADI7LCZFOj|DH)@g}120INCMkvcAbF z+PbSG{k1;u?Mh3Wk$g2dGt+5mM{P~~p8elwm~FQg9+_h zI)=j)>2?pKS`~6iSuS)Xh}UOJdfaJ*(H3hek=>%Vcoq|ANVmM<*vK~x3% zRdjC)kzj;-^%pn6#2zG4%$hd2ef1u7MSXcIHn+meJ%a9_;h0H!niR*xhWq2uXu=@M zhCj|mG_3Z+Aea*WJ~^0l<4`C7Ct|(y}gc~^{{nL{eUnGzHL05S1I*)Hw}b2`y;byM$o2( zNP}*3TyGquzp!_%YwzejJqRg?8 z_#rbfR`eQ7n5j4C+Z3~7ux4HhkNQ@2^%IChM!_6)$?9a%Z$ANN);1FKV8aXq76X$< zO5gVli_GXvocHDXbUi-+3wQ`v%0Ogtx3;C!!ee&b_WR%O@Z%A z;@&l20nZ@4R>~yIrS+AZqPA@RebtYM$j#iJLGHW)N5>dMY!tLcRem0z3eDpx0A1Ce z>!H$w9$1c31M)j(`DbeZdRlJ!j*~IlFP0k)w%KAK`Iz3zs7!o5S=zsHh{2U8aky2g z^du#M{C>y@KNs09LZN9`&`Q7*TT#jQiouV;m9lOHi_>jzm8YVcd*L+*~dvO zb$Qy+@*!MFg7EacS1OHsgG?_??ovv^%5o0Pi=^8nO%z=`YrROr)Ii$PGQFT5_xY8S z!*}6oX=E)uPDdHwIP=5(sMrj+21N%Z)I@YK-=u!v`>Pz<@8qq26S+hAG38PbF{%Si zg|b>Df3vpWNp<>W35!}!vM>AeEjNeV<}w{~qGv0q`;ebN7v4;-N-6wy*s3g`udh4O zvJlg)RlD=)gLGQ`Undl$&pKsv1tGJWBOgVb!Dgw2t!A8#J@FGs`T^SVMi}SSq58%I z@7Y6O5g35#`{gU59VsCuGBGpp?R*oc*gxpMIcZzP(I9zyw*zz43f@=jGZ<`pMh;20o6rcvjHVSg8vH1aX#;#wVIIwE z?w$RS)8rWN6A9DFA*qNqWaJ~v@EU4cfAb!0-Jx__(uqYVSIr^cg!%eoM$@$Iu7Kb;Dxcy`;OnEbf9*(PBsS-5SUx!47ewU;Ry78h0{<+evDtl@a zS~FnK7JPT$JqtFq2FM9tTiC}L2a>rx^}1(X8V+a~oHvCJt&AROE2?{z5kX|!k8aM>WKkl5ApPDwG-u19i7?Za?r|;{>l6Pm(Jp1= zc)fn?s`moY%J$7DZk`x-omdElts-Z7q5eH?1c!x6_>c1G zsU*i|V=}wZDNL47KukM^jQYW>Z<6sMC0wNl%~px3*Fm)1^DqJXRV)MX->F1m*}M=c z$eVN)DJhrgh0eo5#`EY7QuDVm;j9X=9z8(w|8vq8f0y1H zrFlHlnHxvOXbK6srWF3w)2M$- zbFvU?(oylN5bwE?{scVF{v{@fxi-d2Ur((ZZT+S8K#}37t%!+XRX8G~K*wO|-6JyB z6N#{!EKz+=f6{dMru!w(_8Xa1aZKd_;i-02LkTP*}X{#79jX>xqm1Lnga_>uP#UH4b1PoVg3JZJ;FN^4yk%>3`~jfV|w*Ie)8459OH zp+tCuPz=q4h`)Nwr$d@T`N-%b*kN&d?}fe-N*Cx7Y1Au{_ZdW^-g(2G+Fkx-02Su( zm;cz!7%s~Q#Z0J?|Jq14wBV=QV_(g_pmQV7Jdfh*r-kJOeyd&WeMD%Xwey?gV*kO^ zaUNfJnBn+7gVad6I?`V02{sYNkGPH1|fBxO1 zpwRT}=dD+5k_bl;^xvCV%K2EjEF)7Ea6N@;04ljRCoQ-T5b{MSqqD3{Op18r`W7QK zOwBnP0EkZHpNVbX?QC=aC<|^R4@EbhWBqPNK4K1%EQxICfy>5%!K&AI*ef@h>TSViwe2B?jIj|AxwBuo?i0*m=}6L zIVCu?x^wK{!Mr^m=h~{K^4pGA)b9Ce^2`V@>&{)=e~b~h=l_)O%B&F({w?A5YL5G)%1mioVGMCM zh}em5a5`YSu-Ff)sgWRs3(l0iJ-w_G^Gl;*J}slvk(`~}xV3)#&Tud>!m6%7nEx7n zr)3bbN`iM9==KoFsaT#)QXa%z;2Kp+>3DU<8XVS z;Du->A}eQPlvrBqj3G=eG}FU#-fP~SWh`OC9YvRB#|RR#JWO=-tW0}ezI(v_K_aX% zJ9a%aeZEBM*@E7bs@hJ{D8*VyWu6-sOBQyjR}z?fIv?nA zS3;G_9vEOp#83h}Gi)!8(_{kthZDpA+*?G#cv(ngOm@!1ZXIF5l&d_|S$YY!ME-@q z`nsJ1F;frge1mHzTQC=52fP>dsrs73-wq7SoQ|(d5FbUOXrPwbViz2LIKGc?Iig{aj$dezM$|i=D(y8t zlCI^#1)|=fef_?XP#j|#FGf)Hlh5VLFW*k(8|UlM>dlbi;?_{yYyNU9iE1A!j)HmW zeFB5z7p&8GmiOKCtgs_eXa586O1!w__{~zU->1<%iP9=>QyZaDw4Fk;Ar4%l_2$F( z>|Y;I*QyX3OZDkl>HhG?=~fQLZtIT)q259DX5;ZFrVZRLT7s(aB8Q4(6E`au&9lq@>mh$smsMtp0% zHW!3Q7%K}=YD=Ti`U~5EF@L;&Fkp(XXWwOlHTPX~o z&Uo|wi6*4vQ=KMC^y|S%j5oN6YfTfz4AfNeS@0fQLx--7T@MHUKz_s~h-BL~b<8d% zQCS*SmN$;zI5p&i3oIl^B`#C!)7u)Y!1rJ8I(oO^l+W)jQvX4hs=$(RK+T^II+_LF zK{gOXRiIeQ)$kMMCnI{DC~pJS0u%dbHF5C^9zRH9;HF5ve!S^K*Cy}QROD&TtLl!m z*>y$KTfU(;EPuvh;+r&?xDTcoFQ_~d>LLk}f~E=S#!-mw=6X>1*fX&U#JG0ma*z$g zBgU61Zk5=<^^ZbtBj%Wq%1b7Od}zW@hfNDmM2YZ{j~&|k-FY((#KaiJgy zWmk3zDby@CuR0u~7|lcGJ3Z!WD_k-L)|tUWsEhZ){z`wR9{E?18NzGn2>hB{Q~1Z% zYh^m*WZNa$bSbm`#ly%b1@TRmm@KHyAE%jJLZ9TKl>8!x`c@VqFwUkjfV^bQ;}5q8 z`e%Uh^9*V?eH17l>vcyeh+=s5gt{4+qpSn(6Bf3^9lLR?qVKy7ZVSO-3(@>G{4(5m zuyD@ZkBOgLN?d(>0~OMAwin>)X;TW;gA2^$(Acg4cAH8h@YW{WEG&k;sW#YhaxB|z za*(M(MxhBq8O~(>88Z$GZn7&*vK|O>4I16KrL%-Y30&C%W%FzHVt{0VzS@c#)IEuB ze(Sv}H(nUB;;zUln+>a|Z#}c(h>1nX#Fvhd!Ms&pYi&5&5u{at`W}v?HxV1Si>nJ8 zoJ^^i6AO4RRYX)Qe#L%TJdJjfyf>Ni8?<9EXwE5^xu2j5a}U+V&PXNIaTwBPZx}K1 zmfEXm!}j`+GxSD63iD*7fV#bme7g#Thu=L)E@8FRUa&*TclzPGMpV{keRP`db`s;r z0dQV>0{lg>ILT=Wm{l<)t)HZ0!XFr-p7B6Pcx$%7Cy44Q+OxRcthEFyaPX|PG_6WT zA!%{qYneet46Fw`*1rLc3|L;VG*(0preM+smNEWM&e2d&6xEz) z99}Ktj!zs5u5$SRomKEJdBdrQPz}$@lJ}LW*M!2%(jzlQ-YlnA1l>Tq(&acTa)yspUy955O`1pFz4?82)y3pNnZlaWkNlckke0a33zf!#F)vNe zJNqBOh?tNDbazM|n@&-i@zNVs5v2TnZFqQ2eswEl0-x=m-~lsRvc3_Ivf*U#4{#-ylzYu*(7e0p>}?Af=enj;lr%o>o*5An2mX*V7B9+K=P zpjwAv$c>-?nR34+?nY=OFUI_>88yTuS?zoQ%$ad87#la|Nf;wY7f!hWob)IW?=A;58~AyVF`Q z*{e$5wfrKnZI)CqO(`x7ZIjv(Oz!14%Yl$RwVejZ69V~_R8LO()EeFT+tAsy(REf+ zi_u^yN*ztore)sGz%7d{l5i3|v2|1pZ~9s9fdfCDS+Cz(Ij3J#lfjyY2wW^L%c;fX znf*Xqc0@q6pK`&`a+nv*iWKBszBuvL1r8~d^YNbJcmK$+u%9V&`q{6zkoo-eZ>+bH z)Xw78vJf5WV^SwOll*Bf#Je)$PI)HsBRzY&v6feC zb1*B3++JLipzMHZk@!H!!{-DC)nW0B>RLyB$ zhzm{a+3g4rXuGS_!QU?R`P)cs+6^<2w8MwljnQlErRd}hi2>z@1ez0)S#NK;VF$ks zr1+oPNyIq{U&#aa~(Rj)A1gY>QQqAwiedPLm zEvq#oFrgH?>si_p@+Smy#r&*>KWaL%_k%7He{Bg23(0M*HaKkX=&k*UIeho6Yqd%4 zGnwe)_7`hec8s{k{jEolbA18~ex`WY^4c&A|A!d4j#iJ@lPcr&EbDnUEQdt zyX!ku2g^5u#N+o0Nekq^`G6cAt?b>L=Z6eGaoSR}I{ri&#jQ^g!x{sKRW#pp{IXy& z3``HBfzsp-=*a3%Tvs!))uC#PB zk4}%s$4Qf_$(#=C@V%YTEC;7_$D}Z=X*!>xT0VdQ_099| zZ@R$cq~#rPBoSSc3)OKzA66_=p}tA8V;Ne-yad;u+k4UxVGVPQoFR6*58t}4qm(bZ zC@z9yZ8b4HbjZM>#v^@5#?jFIKWBa5{;Vm(T0nGnqw|4wgQr3H!mDhjT$h>qmN7dO z#aG5M`A^HyA#hm@1H!|o(_2&O3Tin+A|7p}4KD2d`d9(SCs12=b`PB=4&ZHkZLrY z=7J)&1;v}@@h*AQOL2}Af_~Qs=WfLGD3Bp6p&;-9-rfaS!;T4e^H(g{E0>ux>p@tY z9HHY3)@Uo6>S?Mx3bSO9KGLi*dG=UN3hUIy4)bMo>PDQSXRQ_Mw8cx-@dHqH(H)z3 zYpRO$>z6CB@ZQSD)0=Q!+WxSEPUvUHK)1!@>I1DmG43PxKHZqJR9fU68FmZbOW$K3 zByXnfGDr2E>6_E3kF>;}eDY*#>>!#-!qhA8IMKq5cqcJoBfsy#hDzhIhKilw@)bVb zU|vXbA?7!=Z#~>5?TcJYC48#bdZk_JhZ&%uOXLvRoKw8qcuSSS2V>9n=p*)!h^NO096=}^TF&CtkrE5jk5?l?^jdTqe{o>nthJt5# zhhIvE5AC*O+hrkmBR)S};xiA5kF@}!cSA2;A&`(Qn+_ce8iZ7_?bHlGX9bTG#0s@^ zJwbKYS%yw$e-)cRf8~!^uZZRw%D$>hTrcifZ-onllW~n&E~)gQA{Uwb8i%JKlm7K@ z>YP18Zk#Y6WXdKw(cxCZU9V|dyf1fTk+H+Q&7?HFvQGU@Bhh+u;Ig)#k){X5lGh=H z9KPzWQI2|FlH4tNmA!{9{z#K~Znr*q1f*c^6nTlwYE$f-6n=PZ2(tp>05%gKetw1B zc^(?HnZEkhXmzF6PyC$okhxpCrS&Q2_||h{fGWS!b6pT}kPIwqTT@yr9Q{$>2S@w4 zViuQdebPsc1P~KOhKZntpeD%aGqmiSI^IjsvBlp(a**vB73u`H6e}I2H+`wA5bHP4 zp^FY3)g?{;L)lWw7*)5z5nDzK{**uX8TMU;K1Nrs-@tR6nvHTjPW zqM{Q=H>P|(t)K-A2_cOHm&Oz4?9O_4wuNwpqs#Y-QN&Bs>wA-Q&3{-@m72CtiN!0X zC7^5l-Y84a?^mmwUsj0Rztfb?@Kg)jyUNIJTEQ1`6oxI5cnYl~5E-r+*@sHtYn*wn z=7nJ|5e&zjxeO6%nZF%I^e#$CZ%f>sf;(22Fa?Qp2@irJMxoO3-CCGY4oKmh{&r@U zb4dNjeT7$-q52JTw6iNh;%FaMdNNf6w@D3rH%reUWpAJx1z8D%q@&lgMo^W+Q2mhC zO&ETOm=w-SZJ#%qztd0({Hm_>?*${N^U1LF0K5=xm(}d;Gqloez5+u-ENDGJ0ds#U z4|$|j7eKkMB$`SDnKcb+cuXQLobig1zoPI|@Z5==*2&7bOUY4(q|HQ#p)EBJ3bJUX zmu5E7(ezOxinmW!yg=jS3-PG`^o2 zaJkEuWXtRth0FE=r}OPUJD#E@l$Lf^ys4Uet1jNbZwe3r2@D}5LxBJ_ss-DU`qZ{Tn2 zx<_qpZFRVHIVW^^kR_|!1f}gNzKS6|Dnwnf!}4?VRp!UujYt$uOKnzhzEC12B|LAi zwCcFTDR7PMtt**>*G1TmCB6BuyYmhkaU&rtv4PLo2nCza`DPya40K#jn65AGEyvnq z!&2RjHqi;7+)?-yJ)^X$#wXg4+ffJP(bJ+7p~z3;@N*4&eS+^`1)A2kd@8>~ zaTbT&*Ra;w7d8n z8m~J^MM399;uRZ#tMa+p3=uueswXj6mW{(;vz6uO!vhr!f%+NI#d zl)Wt?Z@02$_JZc^fkKIjm61;j`u5c?4Fr1jT-b>8j#RE!*DpULB8?v0zboq+Y!a8) zv2P0A<;#4iGkn;BhNc%p;?u+texXvpzragAI~d8hnMAgdK2**9k`F|p`Ak7RX|+Nf z9o{5|kk|IYo&6OR+RUdSb+^iV?lUNpP^@Z}>W`!WtiSx$b}y|eQnrODCSXNw0xQNp zYokDAZea=ln*dihITzY&Qu(L18Um1qLGLq zykS*iK^7j0$Gz33ep=Atk29NImRD3wDXy-e(G_E-0EVu^_H5FUYqyR!u7T z@Bf79>>#)C>VQ;EJgUAz1?YAXbZbNV$(dO9RU$rOcp;w#!rv5p21Y739vvDGIzo1g z*=DW0jJ)IzfABY?Q>2aeJhT-)|6->)&)$@sQh2oQT%3s{{G=#bX@5RV*+gdYbfN`ynj>2C^5&aH!_T6$g=_&)O5+{er*G7poe;VCH46Ao`iLi9*meiWV zn#o!cL30ajhF@y&hyvF4c?Z3KG#oZPHa1CL_N9a2LgI7kQ*OCebKRYm%GeNPI%;~> z0)l)Bfx|w1DGG_)sY=tS%~HyAh>kpdaew6%PGPD<7xQtb0fW;R}Q~5rkc5DTQPa-{EQC~*Ogz#T!vA4#dJ^| zLEI}v=gDhdp%T&(sgFr-UWrB>s(}z3nvy?Z!Y3N8v^=ecV+YUjzN{CZhKz;!{n2qe zYC(#Ok%infeNxC*_?U`L>+JXTHVIR_=M-o}wjKIVoYns+emDC2xa=!4 z#X9vpWj)UCJcvMO6=1A#tr7@;1-VpLgu?;(9V`!MxIQt9;>+bK=f``UQ=#8; zqVg-^a>x-$_K>dlzAqIQHACaW-a5OW5V2$ksdMIrC=)fSai(YeMIpoR4|AG0G79dS zC`UZ*w)af_iuM(p7H2epmYtSu{bcXaVI{0rUwc2_8=YMnn}i5`$7&ue3=oo-DZl;r zM*j9!*~%OF^1DL{Jaice*QHej<*cwD^sN0j!*iwj7E}}8L(+PX#y6Q8Yy|mF2%rHY z55}9nVUBtzNF?zK1%c7jLE~$R*{!u`_jf}Z+pZ;7pH`>{Y)|m;BZs1V$e30ymJR`U zMs%!6dII3L=Y#MjN=!C#(zKfCxqH_IExMyvb|yz7F{MAXDmUUKy5zCIxl zr6c2e>G+$8fNgQY3HZRL{=j>NyquL+uzVE!7$X1Lo8GY`y`exV_#92+FR%&{@U*%u z7;p{>7Ozh)({p!OmiHq6F8xD!#oBPT9aZOxg)T=qO|7#^2xcTFFKr)192 zoTqi^d|o_o<4ML~`-j1vl$%&e)zc_d)ltXV0vFs0!XwJ5&5kI?w8WTNr%^B!!piGM zqcv>i9kT|9H2UQML#4fhX*6hr0e(U61bZ$p`(CiRs!qhc>ybnw(C=gF(Nxrt{{=}& z=&u5`L{fume3$8p@<1i4&)YYLRD2RikOdlrq~o|GY@E+=mXi)7RX)2fiN`S7j&K14 zw~SGDiPiIEyBMdRIve2}k)L0~Ha(DRWBeQCsPK2+sg!8?)QM;sVP&2*o{r8Vx?3s^ zGZ!2gZ`s?p@v`1x1gCx5I;P*A|9J|9quRd2c-{%3<-e12-6O&d5SXg4Mw7S|n}`G4 z*Ks&!OG>!f4^vadAavw67}n+KB}6I5uE$iJxy3`UdX2 z_?|ojJls$|tpU3Ym85yOL}Ie5*JuuuFuXj339g8Ph;y^XdyhH;QN_@2i#mN_8S%MD z^qH_7q9J*tCNL2L#3}E3D1dyt_iTRCINd*?;jXny4urxZce{gE(JcxUmT0YQ;B*$KU(Ir zofi)C*Y?Ef7aN?t;Gz>aB0Y

M0ilOdi6o%0Z6{Uq4F7c-|%zm%mYqA~|J}DO2!E3TSk&^fR0(_M_Tb;A;tGo)9KY14xas#*L>d++Ie4zJ+ukY@P zzgN?PE%ICyKgX7J3_LpnX*EKpG`qbTk00SkwOJ2<>@7cH7xOV@k?Eu2{1f~de_}*r zE8#PoFYINC!;S-n!S#YCou6kpb~sa5fjTdgY(HABFo5p$Rr#^azND2a%c$0Kq}?ca z1la}U80pAA5~q_CY+LL5g@u|9lL}RZ<#5-pBrDUtY9Ad^z0N^+mN*TEy?nA;>rJ_e zKgrksl-#M7Eyt?6Y?9|TBkIaz`{qbcE<`u)e+Bmv2<;USC&Msj@ARFg>Mwf7;f*KB zvnH18E&Q_IzVp)80dJJ3OeZQ2`OZUsxB0v~{w~kEcXKV$^Wj$Ddnc;eF9!Y&@S+vI zvl{Tnfd_!U#|96BZN$!xX>I4O`p(WTc+7tmcn@&hItjgwx^89xEMaKauz49lw&V;) zi`F38+Y<+(MLca1y_xRZ^!((O7rn0RSt`?nSj(#2o|u2t8Z_Fo!1qzivurFIAu1m8 zgJxs@VRwI@S$LOO`mRPR-sAY;$Rvg~AYG6FA(Y|9{X3Zsj&vIB@pOg++Cvn?&a1N- zeb>;}q+1maIalS?c5o7sB8i}9WY;cIKmBZOe*WvqNNlaJ7i(v~-U$5gn)Ct#@Cx8h zQ6#LCsI2bLfYZvFxZ4+r@jXuXi8QJInqgn0NCp@C1`|5r#69o#i3lU!>$pJ24P8`T z-Txab9@7cRah*Ezq!=H!c8Z!7`Qc?wpTC`3zU*tp9+MYhMZA=kQB2O?x<=Ps1HQSu z?v0F*+?aJLW(ipC)RW8BYTlbzlHgndz5{qIMJfe5GTO0{^M1SHUxQh(W~aTnzIDi{ zbH)7T|4`eeLS1ujLvnHp`--cma`qF1`cS2H=u!vPLvB@LY<-mhT4+ zH>%URiM1`}PTp!|gz$89|22>CpT;b{C&u^?4l&Ol&x<*Pq4Nms^_277hS!R$)(cnwQl?7=Qrul4l>?Fqk5g4{e{_xy9XnXlpEmp+5X3=07vR!mf0 z3w-4o-@_^3TY-NCJSI& ziu87x8HvieKSg)QLY?R>53T&OyS39l=`p>_Li%0cZ^Zq}PQZ3A!~vMseqXV9!0y@S zV8GQU?@*XzyNOw3H(U#zzlo;3kz)0$jlGFhFJXkGoic%AHSuHB zbFxwADgPL~H=*T(t-5*0&aKarCEy6)t_MTyFW_rwjmoRNRrvepx!mRzx5dqiH2af>;%Z8wkZPP$u%^LzZmfAmrQ z`j7oP0RH}8dp+O$_0Q$Xl_PKdb+5XWXTR)bI4JjOxvCl_=HjVb-4Ib}ZDOmCsLjCgve3j{;9`$i3VrQP~RX8DmIx4m^a1 z8hQ3aVqV*EGSd3%iT33fH{pcN1VTk%jyA-y;mXx@u0`L=G3OswOl~?2=bz2LefzKT zfe+rtKl}OLJNA3O{GJE-<@Y?;`TY$)^ygK##5_2l*p~MS>l<2}1CImuOH{T)RUoO6 zIkfVFhB?2Td-k~m`+h?0+aY<4dv@Lm_2{$VwyO7372c7f|3&|zipP*KI z9qupnJmMAzKJT3xXT*9pyR>q$K4Lg-5wj6nEjxtMJ8dw)eEsI zqVfsgt~C~w>%e>1j3_pV$|NL7TI7`^$t)zFiZMPxl3WZiHa&9M-JV6TT8t|uDntDa zMqg(h3Fr6V>>f|t{{(;dd-w8PfAXKP-^f%M#0t6HJOO-ejj#VQ@B!eOL}lx$?@^{= z#|dfrjK}-~M7v~!`z?g;&pm!E9x)4?T;4DrV^!ucgL7FVJojdv{_G=s<_i~j>tFmi z9(v>%)d8`J3RNG%>7bGlP4kU1rP2PUIh8s z)hSaS;G{(GB8O<8C#8QL;kh?+_3{DV{b%3K2S4#p=WEK>ELM;R+zI>};Om(87OjiD z?z@1u0nJT(Fxt{@8grdzIIU)@dG-E0Yy6a1@(=L%Vercp8+^jH*ay5yiRjFwl-n1Z z!wYX`|MGR-^5!4ogP(Y40UId`F?X&4cL2Y+CRNpeCxKsL1Eap=smxL{%zzNnozn+K z`bEs6SvV83{Bg`)6GC_a?l+C&K%8jx4hE-pp_TJzfA}Z3>!S~H=Vu>RPeZItQv@CY zehPRa@T?WT-UHy@1HZh<4@IIfHAYBlkUPZuaft5(?l*WU&+~6hlH`@R@2qSODws9T zpW<)))qlmi-~UU#s8dkb&-+rMJ}`2Rgx#3(@#Ys%%)Te%JRVPJ0s zJ*qDP{|xw@O@9~?mHG0FnSBJff`{v1dwHI{CIrr!k(_+J&VhtTLI`#CuN-#ij1cSE zy(@eF|BzyTl0UU#*Y`B=zf(kfmvj=d_rx-_MorI?gJu#hI!6u;=r7-EvG&Ws8bHI`V(8JHP-&owBo^!F zv3#=h3a$d*3w+rLUCSZG8k(P`2z%ZdBr1#QG}QuR*;OJRAhid8?*V=Ycm;4%$7@-d z>9$fc9J40SA`k7oEC+5rHfZ_1ghs4`$MRX=ZvmeN{v7ZUU}uTvzXJTH(rfy_wmkre z%HmuWND>G3alU}r?_hR-`?tj`dyUo7=Nr_P=cyR(0rR}v#PTd5IYm`eyxXbq0pc=7 zo);ljf~yn}VfO(4HSlH@KY{PS!@xU%w^IzqZ5#@GTVcEZ+!RnDeUjE`gXu*0`m;$Kg62w}j)d|<5U4I6KMJ+1Fx zp+>%+^IQMsz4o5>-f4voGxgT;Rc~e-VF^ndhS)^vDDrz!4uLNKzgQanuPX)Sg}}>! zXU`xc_fzcaawo-b*FC@^+g{)jmF1GT*<+c7OP+EVTl>x37@Wvgc&OcEMj#sGrQHaz z;zRft&VhHiM_s?WW2r$%agY4LhwrjaJ@90poVuTfyE4WFMxast8z>g8iE(`0f_B-7 z$!SpR!SdlK5|2M~h4=pc^rud8!LexJ;J4-(vDK#z8P^IQBZ= z*8b;I-5+J9A(f?iD%1N^tHa?z#((iw-%T*%2Y>igTz%*YNt&n*3n5QsmF#C)ELR~K zo;}22>m3~OFU;+o29I{fX0<)^DTa6Zr`HF17CyP>G3XTr{9Aq5iVNFFe(1lugMan2 zcd~nKS5;UDiOMPpiExE%x7M@pMzH5#u~rOJO>WdG8HxLbv+Q_*IL_nfU0r{ih$y5+2#JbV3IT|A zm}U7N#3ngch(*lDq=v79hbwVY^2z(z4zkf)|tX-QI`yc@(k7^XL_j zQKEvp6CorjVpVW3O9-`qH4i)_$wy-rzhG%{pIP|hczlbypEonSSl?_T9FwZ8?k9(uu^Ecyc736?;~;)a^@W6L()8n5JIA|?w*TBe~=`-Nbvh1zd__I z@EnTls{Ir2?Nl!wfF#gAUt&%tUfHeND`}M?(mR);m|7u(L}lGEGnxlCaL=yQ>f8_h z&JcFK7_jfd;%jg_GthDHK=d7paTmO^R=+I_Z9&O;ypXD=B5yL8lXuYf_a`3frwj6s{r(XFVKiBx!XP$0a z5`yRMCXYl2F{?dN5o`{5*1|mpc*e8*-O>H$qlI@w>~2DmlLjDr!u;@4Z~npWdiX7G z`T9d3Vq|E}LqE zfY@qK?4Bxw5E2zJSYj~3KG@ySTbFa1_ZZ=2iSrc14|(I_#g~2gP4P`{d@<2dKK#*# zX*HW@GeaSSL`C$`o|jK-hcwAA<>U{9kX{LH`ys?D@tLdnw}0C!_#{Mha$&xUG8QYkM3^KMfCfVL%7L08sMwXDq!+nE{q^cwY z0+As)*^?j;Xs+m5gajkze&K7uKN2h_pJ)goL``%-Pfil`ArRya2HBhK?dFQ7G9wHF zX-o>;FfM`x&=81~O&lwb8b)VpQ0T#oNNdg47uz*87&L3mEoN@UZY%;lgyEDBMfXf_ zCsPx`sFpNMn{^siad==Lg3b=qh>Hl1jK;@VYksqf2lt|3BTbEO5O$cg=6aDs4R1FO z4FWTYu3=_qj;3NvaT*quh8S~GV-s_f2G$s3X=IEu!kD0qF?eHhyqSf@k3X8gTNEt_ zPqKIX;S2n-)(l~@S$HF(*w|RZSQA5LRIm}o(h}If8ey?$fIvsbN3sLs(2>zvKP}kP zqp4907MsD0)DT$=q%dRH)|w#F?@Nea{W2RF{Uc2vVMcL*EF+Aev1m!(fHdka94jU& z{F`$c)rcNWkDy1gqX8E43(E>&vYF8#%ztD0*W>@?0HoHbRqnR;LRJy|s5T=%BH7qc*rog5v)bios(n@MgyU z3koPh3uFiWpI{mlAH5ycE* zgo6hgh=7We7)Cf7&4|>9CT+0$7O$Hdo){U;4veJIiT2i-AUlQ(1`SWgQG=)$3KfkB z#8{$DgG}jYOLGef+JtI?qnnyzFf?I8z)NXJSb~8wXN?%*-(s<|Z^#5%2%u`KOijV13b`(rKDMh8cfTlmFN1f6R{!p#!7;fvVrVM>B)iv4K(a z^}(Qi|7R;0{k!0!19$xA(EopQ`X1B2hW<~j^WTL2BQ4aBz{p@aka;6b(PF;U^?Ol_ z{<&V?p8Y1CKjA=;MYq4U0`TM4WfEivQ${H$EMa`4<1Zl2Q62L@xJRQyXIBd z9@&C5i>@v6zIvh_t(K^1|!d3tzq*^Q=0@VrEo58-Cq3 zJ3QZ(8`_wmP3#QZual*%qR-PNIPhrIPTwwRMIx_MlZYj2f4fpLw6A$&f4lx3_!IoE zz@JusSo{r0`~m)9@ps^F?g8j`AoQ=s&pnTASj3{k7Emg8nm@r`$6s24s7_cTUJH8% zdCsBpi#d;`2ei#f#$|2JVF)^@ME+UEPNHP)8{3n$wXUByh9xV!7x|0(ONG~&@>DI! z?_m?Nt+M$Nu5RchGR4c_OjsV|KC%tU_EjPtSgN1HHQ{`Kn@IB%ED*TG`QokWIPsQy z^-iiM%I@J$K#X8X5{U?0zimy8mw;Zg5bU#)NSDn$%fFaDKfjy1OJ}xWtuRKAV}Cza zJ6E541zwCWgjL3SOHM7}x5Jm9lgm5S*cCgg35)H&re{3a+ckA%WlzsR9mUp)L9xUM zTfNrZV(Ta>+KSfX1?Zfv9AVN-q51LzO;20VjO?zB-?LeEDqZ*O!E;M_8A|%|kSDN# z8TmuKZTu&LZ*t1*{&Ys*R^YC^Jv>OXU+I0WB8&W|#}S!Ut>k^Z)5rM+=)L7DZhsno ztSm6&-#zyF9MXHE)dOF%k`$T&=GDWJ<*#qT5*1pO#oggKpV(6ULZWq5&+}!lEf6VA zx2-s4>U1eK&M&Pkd*eeZrI|F#%j?34I=2nTk6}Hu#Fw*Oap}qGO?SQIn>GA6p3$uX zWBgpcOdfght+tKybr{B^2ikp^F`PbkO4qT^CTq(23Ur%Bo_1FG(bV2eD%)aLot;Qc z8KLrRmPqZO$CysQa>!L4M+zs+tod&0ClsR5LL1BbQ-xg_$9U4YNAFm4hUGy{T9LIX zkNWOuei)beC9P-wAG;l}w=@BkL&v}-Uvoh+Xt1hPG%}CesZ9e;TE3bZq-28Z- z=2Njgxi=^Pj>#Q-%~9j(4i&5E>(?UqPdxW&o%9lTxaiTNV8Rbw*p(xY`{GGbrwpIT zj#rgE8InIiY385FPaMrs4p$U`pPB#>Aly1^rdjoXUH6WEI7%6Oy2SV z`qjBj@2!r^nRepjTQ7nG{oM+RIhEMGJtXvOV!y|xF*dXqY1q7!RO+g!Uiq=1w5)#2 z$x1yjwIWNh0)Z39jQ|Wv_toS?s9w1pLnm@?A)~rFTwMz6AZTBluH!gcBE63wH67 z8W&Ov=UqAMQ ztDn^)IGEiGu0EvCxl$GJVa3AfwTOPC03%QrC%3IUia5^Gsxcb>sZU#jvc76l&b3GRY8EtEg`vZe1RZf3`N7kE%C1wE#m^K}o-GVlUGxbQ0`#iUoubX~7@yrrn_z@NnXCPD&#Ml<- z+zg1DcNx5H3O%EN^f;(hB>TJpRbQd0m#M{#=2*aUP^o}F@zyJGhx3E(KdJ%D>WvuY zDDCUeBsh>FO}LpbrndU}JODbsTrzQaoZ?Nc5m-)Cv-di&A+|{bhjGQEK9?P~0#{S)XUR{GbN@<%hbZ;Lz~sw%z^)n~v|pL5rI^ zWY2ydc1694!QKe{W+!ah1M|_qyvv|HCs|7|Hu)neB8%g?$-OhEWYEF!jq+5lgWW#+ zWfY03NRxE;dB!(%>y^b)@qaanEXt(rCDp}U0urIu%;SWegIe8Pg9P%PqPu1_7&W$xU;o&g+(BYbnq|(q;pTv%)-jn|3cZxq>5<)g zO71DLsq#Maueo(6xfOhCdz+4;n%i#|Q&jG0y=fY~-cW8kA>}PfeNDe_y5CkRiBH?? zcE5h7r&A;7H^mlpT%q?Y%gALx+O5VgRsM_L%e*d+?05ZH)5P&w{uKWU|6-aS_0x>U zn0UU76{A{uY^JX+XB`P8Q#^4i^!(B}Y1TfS_!LG>1@<;+l$33Xd&%ECJ!2*l{B%VK z>dmcVdo|P!p%oc4Hgp7X7^;TqF?Zv~B`zKJ$Tp=Fa16Kw=aLT*!)h9Qkbv#xyX;eUWmZ6%&TP^GKapG3x~AFGy0=y^Q$mJFEBVNEQEnxCb?vT~noqqf zt^!YMiCtIUSB-pZ zsCIv- z$_uM2u2$%R68QENqFgpyDu3&XxSJZgJY~w)doHYL#VpT}r06Ynck^>*Kd6~m(T*~s zZa=^V?MLs|2-PS%D1cUne3r4)wm)|fjlNyPg1bkH!gjaj*Va@}#4iEGIsaLqMbi5l zYkR3$LQjF?@|JKBrWt0_({WHVu>m%R5tdW{BI#vr6$JAe?KfN!sp5+u!P+if2XL-; zZl1f3m9Uf0z1XRjVaZzeX3_TMN1_QQWd)Qnql(U`>M`)Bdd1v2mV|q^rXS$wQ&;X= zhhxKAn}9CgfcncG+u*7S&bU16n7wr5XQm*q4^)-prIzNm^~oe!B<$E{TIy;G7#6f{ zly}u5`%t$MMg5Em$)LNcRYc=@eN3P5K$p|U=csfLpAVcK6M7`^fH%tpgec>)QYa5z zXZ7m(zt6=AzNX23yvUi&vj-hidZ+(A)UDK*@s5e9q&s2a-w5EObY`U^V zeIS2eLFpQ$teqw!G~FH+wvV1us(IkpEB53)2$_?Ie?dro+Vhk+KlC*^>{HCw;oPM6 zmF;00=i96h*X}GueeqlSJ`=bP4OJ0p2_j1*l>Jh7Dd!k2+^f;KX{}*D_ z=S#mRT1b>S8`S4WNqJb6KE|`~H3x}DBCjp*C;5p1J0uTn??PRl@^qmaj0Y26X)7%x zz*_d@&w>s8OB+c4@psX?mkST`Z}Pd|tz4|ExDgxhe9MZuS%oE0ZK~dqccuIhC?_tE z%fU4mwKyr3zOez~TEJ;IoKTtmlxevJF__Ia;QDb^!YrmxH$e4LhLNqvxV0=R?xjq!{4M=_a0%0%=1nHJe?X{7H7mt z2qoUSyq>=^&Hd{PH(5R9W@kqxY3hzloQ?3hD$==~p?7ti-McAJ7cwP>6AOI}d0#WM z&Q)weIx+w0jl-b&e;(xwapdGPB}azD9x%?eYh7s9iqNN8^$mSq>Jxb!AlKzp!i zPey7VF!y5MEnAS>_4K1=DDX6}tSzDigA{;)p#Z zYcdIOe}LFU#+*VtOm- zV3l#=&{*cXHuo!U^ua8<-3N7ZpN_Yq!Tv$&m{kb7c6dEA{r%>Y4AW=tU}|dyJ_*&h zjq2v$DP}WwM}Q~Mm%KvH8&q09q?%nh?CkSC=0NISAbS4-jt#Uxs;$_XVYBRe?$U^4M~LRQ;LT(oMGC3{LS#0rC6j34WB|!_wv@i6$&L*2?a7H(i2* z>MWjH;2Bntrj9rbYl^EDWn#qTE^J<*TkYJStiR0X>z$$~GGzsl0^f|=%~9gQOG^z! z3BO7boaFBTWuXX289V*lMH0{=fy-=J;kD6#!-Q^0a01pIy*Clq2}<$EUJDMBbnxba zD)O&9+fZ-6^rZb5zE9Vu*~U-(Mo2^V+C#V1!C8t#9ToG3q2Sk%Tb)l`!jdkeZ(p+w zZrGemsjfat)_e6dG;Gt_f^^->>f#d;t;o@pA!=84A5T?g$4jvkga!lF1cw83=^K)! zJ{b{DK82j=-o>vPO#XU+f3~Z>Euv;!?y9!?!?&IH1KSWI5WjNayOt(&JA-BAj3CXpBqO{fjXhXz} zW)@24UbzjmLZDWyR=azw&Bx1e(sEsT%j`KQ^V&N<$;6d&2$r$2MTGd`=RF&pSD6&$ zd(k5#c?Y^^SuPxLuEVH;5rKZo-}Lmx`>S;;ZKu^L&`suL$3JCn^4)nwFpZMqlwQ%v)4>EPk*D`m8Z$N$ioYOHI{CRA8=hOLH26m8@=o{El7>KQpKWZ}$t<{?{o9}Rx&E1J@d$mht zi7!vJ!&P9dvTY|*E44SmHa2Bs1uPMcyBzH@xMl_>DB!=7K64LtcWYl^#OdwZm0+>p zLxTC2B#6HR3Zjm{r|I@4;$2Bv)s;r`OE?5Bl{*4Wr6;zp9g1lVB+zw)Ir~dt#<~$< z+*#X=_(g!MjuN?&H()RFnyd`PhfSlZ-$A#)Upr4`-EL?g@`~<#nNAqr(zm-oeW<}o z%-^*syOdYn(HLilRJ#3XrMhR;*Rj{I_BSmqgK2Y$!b=H>@n=T<f>Ue(XEfGsx?uAjfgpAp~HX#vG<`3sf0bUb3;b`TdJ;5f)ZKKShSL7y5=P3 z9A_i6U|$`s*J%-T2F33#ItKg9RPbHnAg!3TlluWu1zV|sbbcSV z;Iz107$+acyx_i2v`0ppX&5qT4qc`&^Jk$%t5Uwhsf6JN@j8s9)2kli6P4#I!NK7| z_1bZNA?37K8^nK6t1>Pne}B}<0IRe8bvZ=mQ0>iSYfEyJHImdYtjRD@9&83(l$$xyt%Dj-8;KUmvr@A-h Date: Sat, 14 Mar 2020 22:41:29 +0100 Subject: [PATCH 410/434] call to_heigh and to_width vars from self. --- pype/plugins/global/publish/extract_review.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index f67aa0ae94..7b5aba818c 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -41,8 +41,8 @@ class ExtractReview(pyblish.api.InstancePlugin): handle_end = inst_data.get("handleEnd", context_data.get("handleEnd")) pixel_aspect = inst_data.get("pixelAspect", 1) - resolution_width = inst_data.get("resolutionWidth", to_width) - resolution_height = inst_data.get("resolutionHeight", to_height) + resolution_width = inst_data.get("resolutionWidth", self.to_width) + resolution_height = inst_data.get("resolutionHeight", self.to_height) self.log.debug("Families In: `{}`".format(inst_data["families"])) self.log.debug("__ frame_start: {}".format(frame_start)) self.log.debug("__ frame_end: {}".format(frame_end)) @@ -223,7 +223,7 @@ class ExtractReview(pyblish.api.InstancePlugin): output_args.extend(profile.get('output', [])) # defining image ratios - resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height + resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height delivery_ratio = float(self.to_width) / float(self.to_height) self.log.debug( "__ resolution_ratio: `{}`".format(resolution_ratio)) From a78773dc81ddad8f92c886d65969aaec78f6ff98 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Sun, 15 Mar 2020 00:22:58 +0100 Subject: [PATCH 411/434] make sure we fallback to old handles attribute --- pype/plugins/global/publish/collect_avalon_entities.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/collect_avalon_entities.py b/pype/plugins/global/publish/collect_avalon_entities.py index 20899361c5..299e2f03be 100644 --- a/pype/plugins/global/publish/collect_avalon_entities.py +++ b/pype/plugins/global/publish/collect_avalon_entities.py @@ -47,6 +47,7 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): context.data["assetEntity"] = asset_entity data = asset_entity['data'] - context.data['handles'] = int(data.get("handles", 0)) - context.data["handleStart"] = int(data.get("handleStart", 0)) - context.data["handleEnd"] = int(data.get("handleEnd", 0)) + handles = int(data.get("handles", 0)) + context.data["handles"] = handles + context.data["handleStart"] = int(data.get("handleStart", handles)) + context.data["handleEnd"] = int(data.get("handleEnd", handles)) From 37778f57db87e39d7e61901db8cd915f066155e3 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Sun, 15 Mar 2020 00:30:10 +0100 Subject: [PATCH 412/434] rely on ffmpeg start second calculation --- pype/plugins/global/publish/extract_review.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index abe3d36758..5c40227494 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -183,8 +183,12 @@ class ExtractReview(pyblish.api.InstancePlugin): frame_start_handle, fps)) else: if no_handles: - start_sec = float(handle_start) / fps - input_args.append("-ss {:0.2f}".format(start_sec)) + # start_sec = float(handle_start) / fps + input_args.append( + "-start_number {0} -framerate {1}".format( + handle_start, fps + ) + ) frame_start_handle = frame_start frame_end_handle = frame_end From 317c25a7c52f5a8be517a444737344053431ae85 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Sun, 15 Mar 2020 16:27:55 +0100 Subject: [PATCH 413/434] get frameStartHandle -1 for slate --- pype/plugins/nuke/publish/extract_slate_frame.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nuke/publish/extract_slate_frame.py b/pype/plugins/nuke/publish/extract_slate_frame.py index 488f9bd31d..0d8bfe9dc5 100644 --- a/pype/plugins/nuke/publish/extract_slate_frame.py +++ b/pype/plugins/nuke/publish/extract_slate_frame.py @@ -77,7 +77,7 @@ class ExtractSlateFrame(pype.api.Extractor): else: fname = os.path.basename(instance.data.get("path", None)) fhead = os.path.splitext(fname)[0] + "." - first_frame = instance.data.get("frameStart", None) - 1 + first_frame = instance.data.get("frameStartHandle", None) - 1 last_frame = first_frame if "#" in fhead: From 58b563ad046cdb09619eda08559462e62d27e3ad Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Sun, 15 Mar 2020 16:28:10 +0100 Subject: [PATCH 414/434] revert ffmpeg frame calculation --- pype/plugins/global/publish/extract_review.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 5c40227494..abe3d36758 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -183,12 +183,8 @@ class ExtractReview(pyblish.api.InstancePlugin): frame_start_handle, fps)) else: if no_handles: - # start_sec = float(handle_start) / fps - input_args.append( - "-start_number {0} -framerate {1}".format( - handle_start, fps - ) - ) + start_sec = float(handle_start) / fps + input_args.append("-ss {:0.2f}".format(start_sec)) frame_start_handle = frame_start frame_end_handle = frame_end From 86a611574b54dd6d53d937a3306fb2dcce6af134 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Mon, 16 Mar 2020 19:02:45 +0100 Subject: [PATCH 415/434] Update README.md --- README.md | 30 +++++------------------------- 1 file changed, 5 insertions(+), 25 deletions(-) diff --git a/README.md b/README.md index e254b0ad87..8110887cbd 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,11 @@ Pype ==== -The base studio _config_ for [Avalon](https://getavalon.github.io/) +Welcome to PYPE _config_ for [Avalon](https://getavalon.github.io/) -Currently this config is dependent on our customised avalon instalation so it won't work with vanilla avalon core. We're working on open sourcing all of the necessary code though. You can still get inspiration or take our individual validators and scripts which should work just fine in other pipelines. +To get all the key information about the project, go to [PYPE.club](http://pype.club) + + +Currently this config is dependent on our customised avalon instalation so it won't work with vanilla avalon core. To install it you'll need to download [pype-setup](github.com/pypeclub/pype-setup), which is able to deploy everything for you if you follow the documentation. _This configuration acts as a starting point for all pype club clients wth avalon deployment._ - -Code convention ---------------- - -Below are some of the standard practices applied to this repositories. - -- **Etiquette: PEP8** - - All code is written in PEP8. It is recommended you use a linter as you work, flake8 and pylinter are both good options. -- **Etiquette: Napoleon docstrings** - - Any docstrings are made in Google Napoleon format. See [Napoleon](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) for details. - -- **Etiquette: Semantic Versioning** - - This project follows [semantic versioning](http://semver.org). -- **Etiquette: Underscore means private** - - Anything prefixed with an underscore means that it is internal to wherever it is used. For example, a variable name is only ever used in the parent function or class. A module is not for use by the end-user. In contrast, anything without an underscore is public, but not necessarily part of the API. Members of the API resides in `api.py`. - -- **API: Idempotence** - - A public function must be able to be called twice and produce the exact same result. This means no changing of state without restoring previous state when finishing. For example, if a function requires changing the current selection in Autodesk Maya, it must restore the previous selection prior to completing. From 12557526c32e9d38d77d8ba73d3ff8a45975890b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 16 Mar 2020 19:47:29 +0100 Subject: [PATCH 416/434] integrate ftrack note use label from ftrack's custom attribute configuration --- .../ftrack/publish/integrate_ftrack_note.py | 71 +++++++++++++++++-- 1 file changed, 64 insertions(+), 7 deletions(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py index 2621ca96ab..9d040585d5 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_note.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -1,4 +1,5 @@ import sys +import json import pyblish.api import six @@ -18,6 +19,47 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): # - note label must exist in Ftrack note_labels = [] + def get_intent_label(self, session, intent_value): + if not intent_value: + return + + intent_configurations = session.query( + "CustomAttributeConfiguration where key is intent" + ).all() + if not intent_configurations: + return + + intent_configuration = intent_configurations[0] + if len(intent_configuration) > 1: + self.log.warning(( + "Found more than one `intent` custom attribute." + " Using first found." + )) + + config = intent_configuration.get("config") + if not config: + return + + items = config.get("data") + if not items: + return + + if sys.version_info[0] < 3: + string_type = basestring + else: + string_type = str + + if isinstance(items, string_type): + items = json.loads(items) + + intent_label = None + for item in items: + if item["value"] == intent_value: + intent_label = item["menu"] + break + + return intent_label + def process(self, instance): comment = (instance.context.data.get("comment") or "").strip() if not comment: @@ -26,17 +68,33 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): self.log.debug("Comment is set to `{}`".format(comment)) - intent = instance.context.data.get("intent") - if intent: - msg = "Intent is set to `{}` and was added to comment.".format( - intent - ) + session = instance.context.data["ftrackSession"] + + intent_val = instance.context.data.get("intent", {}).get("value") + intent_label = None + if intent_val: + intent_label = self.get_intent_label(session, intent_val) + if intent_label is None: + intent_label = intent_val + + # if intent label is set then format comment + # - it is possible that intent_label is equal to "" (empty string) + if intent_label: + msg = "Intent label is to `{}`.".format(intent_label) comment = self.note_with_intent_template.format(**{ - "intent": intent, + "intent": intent_val, "comment": comment }) + + elif intent_val: + msg = ( + "Intent is set to `{}` and was not added" + " to comment because label is set to `{}`." + ).format(intent_val, intent_label) + else: msg = "Intent is not set." + self.log.debug(msg) asset_versions_key = "ftrackIntegratedAssetVersions" @@ -45,7 +103,6 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): self.log.info("There are any integrated AssetVersions") return - session = instance.context.data["ftrackSession"] user = session.query( "User where username is \"{}\"".format(session.api_user) ).first() From d2f9336b4541ce1ace974b6e162da525b2dad8c3 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 16 Mar 2020 19:51:20 +0100 Subject: [PATCH 417/434] updated intent getting --- pype/plugins/ftrack/publish/integrate_ftrack_instances.py | 2 +- pype/plugins/global/publish/extract_burnin.py | 2 +- pype/plugins/nuke/publish/extract_slate_frame.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py index ec57f46d61..591dcf0dc2 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py @@ -127,7 +127,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Add custom attributes for AssetVersion assetversion_cust_attrs = {} - intent_val = instance.context.data.get("intent") + intent_val = instance.context.data.get("intent", {}).get("value") if intent_val: assetversion_cust_attrs["intent"] = intent_val diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 1251e5c02f..be287fbb14 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -52,7 +52,7 @@ class ExtractBurnin(pype.api.Extractor): "duration": duration, "version": int(version), "comment": instance.context.data.get("comment", ""), - "intent": instance.context.data.get("intent", "") + "intent": instance.context.data.get("intent", {}).get("label", "") }) # get anatomy project diff --git a/pype/plugins/nuke/publish/extract_slate_frame.py b/pype/plugins/nuke/publish/extract_slate_frame.py index 0d8bfe9dc5..eff51d95d4 100644 --- a/pype/plugins/nuke/publish/extract_slate_frame.py +++ b/pype/plugins/nuke/publish/extract_slate_frame.py @@ -157,7 +157,7 @@ class ExtractSlateFrame(pype.api.Extractor): return comment = instance.context.data.get("comment") - intent = instance.context.data.get("intent") + intent = instance.context.data.get("intent", {}).get("value") try: node["f_submission_note"].setValue(comment) From 354c8d7a50092e7f998552abc5c6433e2e2b54ff Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 16 Mar 2020 19:51:52 +0100 Subject: [PATCH 418/434] set intent only if set in extract burnin --- pype/plugins/global/publish/extract_burnin.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index be287fbb14..086a1fdfb2 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -51,10 +51,13 @@ class ExtractBurnin(pype.api.Extractor): "frame_end": frame_end_handle, "duration": duration, "version": int(version), - "comment": instance.context.data.get("comment", ""), - "intent": instance.context.data.get("intent", {}).get("label", "") + "comment": instance.context.data.get("comment", "") }) + intent = instance.context.data.get("intent", {}).get("label") + if intent: + prep_data["intent"] = intent + # get anatomy project anatomy = instance.context.data['anatomy'] From 349beaccfc5ca213771325c84e4ef8dec4ecdc0b Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 16 Mar 2020 19:52:04 +0100 Subject: [PATCH 419/434] collect anatomy docs cleanup --- pype/plugins/global/publish/collect_anatomy.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pype/plugins/global/publish/collect_anatomy.py b/pype/plugins/global/publish/collect_anatomy.py index ae83e39513..73ae3bb024 100644 --- a/pype/plugins/global/publish/collect_anatomy.py +++ b/pype/plugins/global/publish/collect_anatomy.py @@ -6,10 +6,6 @@ Requires: username -> collect_pype_user *(pyblish.api.CollectorOrder + 0.001) datetimeData -> collect_datetime_data *(pyblish.api.CollectorOrder) -Optional: - comment -> collect_comment *(pyblish.api.CollectorOrder) - intent -> collected in pyblish-lite - Provides: context -> anatomy (pypeapp.Anatomy) context -> anatomyData From 8072f1dcc9736eeacd3387fb8ddc276725c39d7a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 16 Mar 2020 19:52:59 +0100 Subject: [PATCH 420/434] removed intent from required keys in collect rendered files --- pype/plugins/global/publish/collect_rendered_files.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py index 010cf44c15..552fd49f6d 100644 --- a/pype/plugins/global/publish/collect_rendered_files.py +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -35,7 +35,7 @@ class CollectRenderedFiles(pyblish.api.ContextPlugin): def _process_path(self, data): # validate basic necessary data data_err = "invalid json file - missing data" - required = ["asset", "user", "intent", "comment", + required = ["asset", "user", "comment", "job", "instances", "session", "version"] assert all(elem in data.keys() for elem in required), data_err From d8662f56e6b5f25eba23295f51349e015b0343ca Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 16 Mar 2020 19:57:42 +0100 Subject: [PATCH 421/434] fix(nuke): mixed slashes issue on ocio config path --- pype/nuke/lib.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index e7720c747c..68a73dffac 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -620,7 +620,8 @@ class WorkfileSettings(object): # third set ocio custom path if root_dict.get("customOCIOConfigPath"): self._root_node["customOCIOConfigPath"].setValue( - str(root_dict["customOCIOConfigPath"]).format(**os.environ) + str(root_dict["customOCIOConfigPath"]).format( + **os.environ).replace("\\", "/") ) log.debug("nuke.root()['{}'] changed to: {}".format( "customOCIOConfigPath", root_dict["customOCIOConfigPath"])) From f37ea5d82ae82b9c8c04ba06d86f982a120035ad Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 16 Mar 2020 20:09:14 +0100 Subject: [PATCH 422/434] intent is not required in extract slate frame --- pype/plugins/nuke/publish/extract_slate_frame.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/nuke/publish/extract_slate_frame.py b/pype/plugins/nuke/publish/extract_slate_frame.py index eff51d95d4..369cbe0496 100644 --- a/pype/plugins/nuke/publish/extract_slate_frame.py +++ b/pype/plugins/nuke/publish/extract_slate_frame.py @@ -157,7 +157,7 @@ class ExtractSlateFrame(pype.api.Extractor): return comment = instance.context.data.get("comment") - intent = instance.context.data.get("intent", {}).get("value") + intent = instance.context.data.get("intent", {}).get("value", "") try: node["f_submission_note"].setValue(comment) From 0d20e27cb3d7912fc5f7a7716473c55df495c24c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 16 Mar 2020 20:13:51 +0100 Subject: [PATCH 423/434] it is used default intent label in ftrack if ftrack's not found --- .../ftrack/publish/integrate_ftrack_note.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py index 9d040585d5..a0e7719779 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_note.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -71,18 +71,19 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): session = instance.context.data["ftrackSession"] intent_val = instance.context.data.get("intent", {}).get("value") - intent_label = None + intent_label = instance.context.data.get("intent", {}).get("label") + final_label = None if intent_val: - intent_label = self.get_intent_label(session, intent_val) - if intent_label is None: - intent_label = intent_val + final_label = self.get_intent_label(session, intent_val) + if final_label is None: + final_label = intent_label # if intent label is set then format comment # - it is possible that intent_label is equal to "" (empty string) - if intent_label: - msg = "Intent label is to `{}`.".format(intent_label) + if final_label: + msg = "Intent label is set to `{}`.".format(final_label) comment = self.note_with_intent_template.format(**{ - "intent": intent_val, + "intent": final_label, "comment": comment }) @@ -90,7 +91,7 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): msg = ( "Intent is set to `{}` and was not added" " to comment because label is set to `{}`." - ).format(intent_val, intent_label) + ).format(intent_val, final_label) else: msg = "Intent is not set." From c91b49510045afc5903db2eff5813bc8a09bab39 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 16 Mar 2020 20:22:02 +0100 Subject: [PATCH 424/434] fix ftrack json parse --- pype/plugins/ftrack/publish/integrate_ftrack_note.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py index a0e7719779..679010ca58 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_note.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -40,7 +40,8 @@ class IntegrateFtrackNote(pyblish.api.InstancePlugin): if not config: return - items = config.get("data") + configuration = json.loads(config) + items = configuration.get("data") if not items: return From 64e6dedde0bd0abebab175612016f8c4df99f9c2 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Tue, 17 Mar 2020 11:48:26 +0100 Subject: [PATCH 425/434] handles were returning none which can't be cast to integer --- pype/plugins/global/publish/collect_avalon_entities.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_avalon_entities.py b/pype/plugins/global/publish/collect_avalon_entities.py index 299e2f03be..103f5abd1a 100644 --- a/pype/plugins/global/publish/collect_avalon_entities.py +++ b/pype/plugins/global/publish/collect_avalon_entities.py @@ -47,7 +47,7 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin): context.data["assetEntity"] = asset_entity data = asset_entity['data'] - handles = int(data.get("handles", 0)) + handles = int(data.get("handles") or 0) context.data["handles"] = handles context.data["handleStart"] = int(data.get("handleStart", handles)) context.data["handleEnd"] = int(data.get("handleEnd", handles)) From 59f97ef904057093d5b74082aadf7e35501740f3 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Tue, 17 Mar 2020 15:56:46 +0100 Subject: [PATCH 426/434] Update LICENSE --- LICENSE | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/LICENSE b/LICENSE index dfcd71eb3f..63249bb52b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018 orbi tools s.r.o +Copyright (c) 2020 Orbi Tools s.r.o. Permission is hereby granted, free of charge, to any person obtaining a copy From a549634c0ad925a038574bc01610f8abf93089a5 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Tue, 17 Mar 2020 16:12:29 +0100 Subject: [PATCH 427/434] added options to maya reference loader --- pype/maya/plugin.py | 71 ++++++++++++++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 24 deletions(-) diff --git a/pype/maya/plugin.py b/pype/maya/plugin.py index 327cf47cbd..85de5adec5 100644 --- a/pype/maya/plugin.py +++ b/pype/maya/plugin.py @@ -1,4 +1,5 @@ from avalon import api +from avalon.vendor import qargparse def get_reference_node_parents(ref): @@ -33,11 +34,25 @@ class ReferenceLoader(api.Loader): `update` logic. """ - def load(self, - context, - name=None, - namespace=None, - data=None): + + options = [ + qargparse.Integer( + "count", + label="Count", + default=1, + min=1, + help="How many times to load?" + ) + ] + + def load( + self, + context, + name=None, + namespace=None, + options=None, + data=None + ): import os from avalon.maya import lib @@ -46,29 +61,37 @@ class ReferenceLoader(api.Loader): assert os.path.exists(self.fname), "%s does not exist." % self.fname asset = context['asset'] + loaded_containers = [] - namespace = namespace or lib.unique_namespace( - asset["name"] + "_", - prefix="_" if asset["name"][0].isdigit() else "", - suffix="_", - ) + count = options.get("count") or 1 + while count > 0: + count -= 1 + namespace = namespace or lib.unique_namespace( + asset["name"] + "_", + prefix="_" if asset["name"][0].isdigit() else "", + suffix="_", + ) - self.process_reference(context=context, - name=name, - namespace=namespace, - data=data) + self.process_reference( + context=context, + name=name, + namespace=namespace, + data=data + ) - # Only containerize if any nodes were loaded by the Loader - nodes = self[:] - if not nodes: - return + # Only containerize if any nodes were loaded by the Loader + nodes = self[:] + if not nodes: + return - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) + loaded_containers.append(containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__ + )) + return loaded_containers def process_reference(self, context, name, namespace, data): """To be implemented by subclass""" From e10343de2be9a795a42b98bee2d5bd8c55343a44 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 18 Mar 2020 10:24:25 +0100 Subject: [PATCH 428/434] add offset when loading multiple subsets --- pype/maya/plugin.py | 23 ++++++++++++++++++----- pype/plugins/maya/load/load_reference.py | 12 ++++++++---- 2 files changed, 26 insertions(+), 9 deletions(-) diff --git a/pype/maya/plugin.py b/pype/maya/plugin.py index 85de5adec5..ed244d56df 100644 --- a/pype/maya/plugin.py +++ b/pype/maya/plugin.py @@ -42,6 +42,11 @@ class ReferenceLoader(api.Loader): default=1, min=1, help="How many times to load?" + ), + qargparse.Double3( + "offset", + label="Position Offset", + help="Offset loaded models for easier selection." ) ] @@ -50,8 +55,7 @@ class ReferenceLoader(api.Loader): context, name=None, namespace=None, - options=None, - data=None + options=None ): import os @@ -64,19 +68,25 @@ class ReferenceLoader(api.Loader): loaded_containers = [] count = options.get("count") or 1 - while count > 0: - count -= 1 + for c in range(0, count): namespace = namespace or lib.unique_namespace( asset["name"] + "_", prefix="_" if asset["name"][0].isdigit() else "", suffix="_", ) + # Offset loaded subset + if "offset" in options: + offset = [i * c for i in options["offset"]] + options["translate"] = offset + + self.log.info(options) + self.process_reference( context=context, name=name, namespace=namespace, - data=data + options=options ) # Only containerize if any nodes were loaded by the Loader @@ -91,6 +101,9 @@ class ReferenceLoader(api.Loader): context=context, loader=self.__class__.__name__ )) + + c += 1 + namespace = None return loaded_containers def process_reference(self, context, name, namespace, data): diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index cbd1da7cbd..b1192d9c9e 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -1,4 +1,5 @@ import pype.maya.plugin +reload(pype.maya.plugin) from avalon import api, maya from maya import cmds import os @@ -24,7 +25,7 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): icon = "code-fork" color = "orange" - def process_reference(self, context, name, namespace, data): + def process_reference(self, context, name, namespace, options): import maya.cmds as cmds from avalon import maya import pymel.core as pm @@ -101,16 +102,19 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): cmds.setAttr(groupName + ".selectHandleY", cy) cmds.setAttr(groupName + ".selectHandleZ", cz) - if data.get("post_process", True): + if "translate" in options: + cmds.setAttr(groupName + ".t", *options["translate"]) + + if options.get("post_process", True): if family == "rig": - self._post_process_rig(name, namespace, context, data) + self._post_process_rig(name, namespace, context, options) return newNodes def switch(self, container, representation): self.update(container, representation) - def _post_process_rig(self, name, namespace, context, data): + def _post_process_rig(self, name, namespace, context, options): output = next((node for node in self if node.endswith("out_SET")), None) From 6c2b056dd479be7232e95ee7ac6adc7d09eeba24 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 18 Mar 2020 10:51:31 +0100 Subject: [PATCH 429/434] make sure not to offset rigs --- pype/plugins/maya/load/load_reference.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index b1192d9c9e..797933300c 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -1,5 +1,4 @@ import pype.maya.plugin -reload(pype.maya.plugin) from avalon import api, maya from maya import cmds import os @@ -102,12 +101,11 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): cmds.setAttr(groupName + ".selectHandleY", cy) cmds.setAttr(groupName + ".selectHandleZ", cz) - if "translate" in options: - cmds.setAttr(groupName + ".t", *options["translate"]) - - if options.get("post_process", True): - if family == "rig": - self._post_process_rig(name, namespace, context, options) + if family == "rig": + self._post_process_rig(name, namespace, context, options) + else: + if "translate" in options: + cmds.setAttr(groupName + ".t", *options["translate"]) return newNodes From 47468403515e33d8b845042a474caad12ac81864 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Wed, 18 Mar 2020 12:13:48 +0100 Subject: [PATCH 430/434] add houndci configuration and change flake8 config --- .flake8 | 2 ++ .hound.yml | 0 2 files changed, 2 insertions(+) create mode 100644 .hound.yml diff --git a/.flake8 b/.flake8 index 9de8d23bb2..67ed2d77a3 100644 --- a/.flake8 +++ b/.flake8 @@ -1,5 +1,7 @@ [flake8] # ignore = D203 +ignore = BLK100 +max-line-length = 79 exclude = .git, __pycache__, diff --git a/.hound.yml b/.hound.yml new file mode 100644 index 0000000000..e69de29bb2 From 321c27234a23c169f2ae5cea94a3223f6b7b82fc Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 18 Mar 2020 12:22:38 +0100 Subject: [PATCH 431/434] wrong attribute used for pixelAspec --- pype/plugins/maya/publish/collect_render.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py index f3ea1ccee5..be3878e6bd 100644 --- a/pype/plugins/maya/publish/collect_render.py +++ b/pype/plugins/maya/publish/collect_render.py @@ -238,7 +238,7 @@ class CollectMayaRender(pyblish.api.ContextPlugin): "expectedFiles": full_exp_files, "resolutionWidth": cmds.getAttr("defaultResolution.width"), "resolutionHeight": cmds.getAttr("defaultResolution.height"), - "pixelAspect": cmds.getAttr("defaultResolution.height") + "pixelAspect": cmds.getAttr("defaultResolution.pixelAspect") } # Apply each user defined attribute as data From 1f0383268722a5b85b40b740404c1c221b60dbe8 Mon Sep 17 00:00:00 2001 From: Milan Kolar Date: Wed, 18 Mar 2020 12:50:03 +0100 Subject: [PATCH 432/434] pool was being overwritten in submit publish job --- pype/plugins/global/publish/submit_publish_job.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 47c0272254..dcf19ae32c 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -238,8 +238,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): ) i += 1 - # Avoid copied pools and remove secondary pool - payload["JobInfo"]["Pool"] = "none" + # remove secondary pool payload["JobInfo"].pop("SecondaryPool", None) self.log.info("Submitting Deadline job ...") From fb9cd34cb5b5e153906728a2b89fd48a1863e10f Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Wed, 18 Mar 2020 18:16:44 +0100 Subject: [PATCH 433/434] allow exports of non-baked cameras --- .../maya/publish/extract_camera_mayaAscii.py | 37 +++++++++---------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/pype/plugins/maya/publish/extract_camera_mayaAscii.py b/pype/plugins/maya/publish/extract_camera_mayaAscii.py index 30f686f6f5..ef80ed4ad4 100644 --- a/pype/plugins/maya/publish/extract_camera_mayaAscii.py +++ b/pype/plugins/maya/publish/extract_camera_mayaAscii.py @@ -94,11 +94,6 @@ class ExtractCameraMayaAscii(pype.api.Extractor): step = instance.data.get("step", 1.0) bake_to_worldspace = instance.data("bakeToWorldSpace", True) - # TODO: Implement a bake to non-world space - # Currently it will always bake the resulting camera to world-space - # and it does not allow to include the parent hierarchy, even though - # with `bakeToWorldSpace` set to False it should include its - # hierarchy to be correct with the family implementation. if not bake_to_worldspace: self.log.warning("Camera (Maya Ascii) export only supports world" "space baked camera extractions. The disabled " @@ -113,7 +108,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor): framerange[1] + handles] # validate required settings - assert len(cameras) == 1, "Not a single camera found in extraction" + assert len(cameras) == 1, "Single camera must be found in extraction" assert isinstance(step, float), "Step must be a float value" camera = cameras[0] transform = cmds.listRelatives(camera, parent=True, fullPath=True) @@ -124,21 +119,24 @@ class ExtractCameraMayaAscii(pype.api.Extractor): path = os.path.join(dir_path, filename) # Perform extraction - self.log.info("Performing camera bakes for: {0}".format(transform)) with avalon.maya.maintained_selection(): with lib.evaluation("off"): with avalon.maya.suspended_refresh(): - baked = lib.bake_to_world_space( - transform, - frame_range=range_with_handles, - step=step - ) - baked_shapes = cmds.ls(baked, - type="camera", - dag=True, - shapes=True, - long=True) - + if bake_to_worldspace: + self.log.info( + "Performing camera bakes: {}".format(transform)) + baked = lib.bake_to_world_space( + transform, + frame_range=range_with_handles, + step=step + ) + baked_shapes = cmds.ls(baked, + type="camera", + dag=True, + shapes=True, + long=True) + else: + baked_shapes = cameras # Fix PLN-178: Don't allow background color to be non-black for cam in baked_shapes: attrs = {"backgroundColorR": 0.0, @@ -164,7 +162,8 @@ class ExtractCameraMayaAscii(pype.api.Extractor): expressions=False) # Delete the baked hierarchy - cmds.delete(baked) + if bake_to_worldspace: + cmds.delete(baked) massage_ma_file(path) From c06d4f6ecda1cc2fae0355765f13b98170debeb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= <33513211+antirotor@users.noreply.github.com> Date: Thu, 19 Mar 2020 10:46:52 +0100 Subject: [PATCH 434/434] hound config should point to flake8 config --- .hound.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.hound.yml b/.hound.yml index e69de29bb2..409cc4416a 100644 --- a/.hound.yml +++ b/.hound.yml @@ -0,0 +1,4 @@ +flake8: + enabled: true + config_file: .flake8 +