From b63453d38f1e11a92ed94be26d3d07cb97875bb8 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Fri, 20 Sep 2019 09:56:33 +0200 Subject: [PATCH 01/28] mapping provided and required attributes in plugins --- .../plugins/global/publish/collect_anatomy.py | 20 +++++ .../plugins/global/publish/collect_comment.py | 7 ++ .../plugins/global/publish/collect_context.py | 15 ++++ .../global/publish/collect_context_label.py | 7 ++ .../publish/collect_current_shell_file.py | 8 ++ .../global/publish/collect_deadline_user.py | 9 +- .../global/publish/collect_filesequences.py | 10 +++ .../global/publish/collect_machine_name.py | 8 ++ .../publish/collect_output_repre_config.py | 10 ++- .../plugins/global/publish/collect_presets.py | 9 ++ .../global/publish/collect_project_data.py | 9 +- .../global/publish/collect_templates.py | 88 +++++++++++++++++-- .../publish/integrate_assumed_destination.py | 9 +- 13 files changed, 193 insertions(+), 16 deletions(-) create mode 100644 pype/plugins/global/publish/collect_anatomy.py diff --git a/pype/plugins/global/publish/collect_anatomy.py b/pype/plugins/global/publish/collect_anatomy.py new file mode 100644 index 0000000000..b053a3a0d1 --- /dev/null +++ b/pype/plugins/global/publish/collect_anatomy.py @@ -0,0 +1,20 @@ +""" +Requires: + None +Provides: + context -> anatomy (pypeapp.Anatomy) +""" + +from pypeapp import Anatomy +import pyblish.api + + +class CollectTemplates(pyblish.api.ContextPlugin): + """Collect Anatomy into Context""" + + order = pyblish.api.CollectorOrder + label = "Collect Templates" + + def process(self, context): + context.data['anatomy'] = Anatomy() + self.log.info("Anatomy templates collected...") diff --git a/pype/plugins/global/publish/collect_comment.py b/pype/plugins/global/publish/collect_comment.py index 5bbd1da2a1..22970665a1 100644 --- a/pype/plugins/global/publish/collect_comment.py +++ b/pype/plugins/global/publish/collect_comment.py @@ -1,3 +1,10 @@ +""" +Requires: + None +Provides: + context -> comment (str) +""" + import pyblish.api diff --git a/pype/plugins/global/publish/collect_context.py b/pype/plugins/global/publish/collect_context.py index 31ab95259c..b718f18fa8 100644 --- a/pype/plugins/global/publish/collect_context.py +++ b/pype/plugins/global/publish/collect_context.py @@ -1,3 +1,18 @@ +""" +Requires: + environment -> SAPUBLISH_INPATH + environment -> SAPUBLISH_OUTPATH + +Provides: + context -> returnJsonPath (str) + context -> project + context -> asset + instance -> destination_list (list) + instance -> representations (list) + instance -> source (list) + instance -> representations +""" + import os import pyblish.api from avalon import io diff --git a/pype/plugins/global/publish/collect_context_label.py b/pype/plugins/global/publish/collect_context_label.py index ec8e0f7cdc..9c07d7de5b 100644 --- a/pype/plugins/global/publish/collect_context_label.py +++ b/pype/plugins/global/publish/collect_context_label.py @@ -1,3 +1,10 @@ +""" +Requires: + context -> currentFile (str) +Provides: + context -> label (str) +""" + import os import pyblish.api diff --git a/pype/plugins/global/publish/collect_current_shell_file.py b/pype/plugins/global/publish/collect_current_shell_file.py index a467459bc8..961cad86a1 100644 --- a/pype/plugins/global/publish/collect_current_shell_file.py +++ b/pype/plugins/global/publish/collect_current_shell_file.py @@ -1,3 +1,11 @@ +""" +Requires: + None + +Provides: + context -> currentFile (str) +""" + import os import pyblish.api diff --git a/pype/plugins/global/publish/collect_deadline_user.py b/pype/plugins/global/publish/collect_deadline_user.py index 624e455251..125f9d0d26 100644 --- a/pype/plugins/global/publish/collect_deadline_user.py +++ b/pype/plugins/global/publish/collect_deadline_user.py @@ -1,3 +1,11 @@ +""" +Requires: + environment -> DEADLINE_PATH + +Provides: + context -> deadlineUser (str) +""" + import os import subprocess @@ -54,4 +62,3 @@ class CollectDeadlineUser(pyblish.api.ContextPlugin): self.log.info("Found Deadline user: {}".format(user)) context.data['deadlineUser'] = user - diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 33531549cb..73f3a459c8 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -1,3 +1,13 @@ +""" +Requires: + environment -> PYPE_PUBLISH_PATHS + context -> workspaceDir + +Provides: + context -> user (str) + instance -> new instance +""" + import os import re import copy diff --git a/pype/plugins/global/publish/collect_machine_name.py b/pype/plugins/global/publish/collect_machine_name.py index 02360cff04..72ef68f8ed 100644 --- a/pype/plugins/global/publish/collect_machine_name.py +++ b/pype/plugins/global/publish/collect_machine_name.py @@ -1,3 +1,11 @@ +""" +Requires: + none + +Provides: + context -> machine (str) +""" + import pyblish.api diff --git a/pype/plugins/global/publish/collect_output_repre_config.py b/pype/plugins/global/publish/collect_output_repre_config.py index 5595e29cab..f02199e778 100644 --- a/pype/plugins/global/publish/collect_output_repre_config.py +++ b/pype/plugins/global/publish/collect_output_repre_config.py @@ -1,5 +1,11 @@ -import os -import json +""" +Requires: + config_data -> ftrack.output_representation + +Provides: + context -> output_repre_config (str) +""" + import pyblish.api from pypeapp import config diff --git a/pype/plugins/global/publish/collect_presets.py b/pype/plugins/global/publish/collect_presets.py index 7e0d3e2f4b..abf85a6f01 100644 --- a/pype/plugins/global/publish/collect_presets.py +++ b/pype/plugins/global/publish/collect_presets.py @@ -1,3 +1,12 @@ +""" +Requires: + config_data -> colorspace.default + config_data -> dataflow.default + +Provides: + context -> presets +""" + from pyblish import api from pypeapp import config diff --git a/pype/plugins/global/publish/collect_project_data.py b/pype/plugins/global/publish/collect_project_data.py index de51ad880c..acdbc2c41f 100644 --- a/pype/plugins/global/publish/collect_project_data.py +++ b/pype/plugins/global/publish/collect_project_data.py @@ -1,8 +1,15 @@ +""" +Requires: + None + +Provides: + context -> projectData +""" + import pyblish.api import pype.api as pype - class CollectProjectData(pyblish.api.ContextPlugin): """Collecting project data from avalon db""" diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py index fe48e97c03..8113f1d763 100644 --- a/pype/plugins/global/publish/collect_templates.py +++ b/pype/plugins/global/publish/collect_templates.py @@ -1,16 +1,86 @@ +""" +Requires: + session -> AVALON_PROJECT + context -> anatomy (pypeapp.Anatomy) + instance -> subset + instance -> asset + instance -> family -import pype.api as pype -from pypeapp import Anatomy +Provides: + instance -> template + instance -> assumedTemplateData + instance -> assumedDestination +""" +import os + +from avalon import io, api import pyblish.api -class CollectTemplates(pyblish.api.ContextPlugin): - """Inject the current working file into context""" +class CollectTemplates(pyblish.api.InstancePlugin): + """Fill templates with data needed for publish""" - order = pyblish.api.CollectorOrder - label = "Collect Templates" + order = pyblish.api.CollectorOrder + 0.1 + label = "Collect and fill Templates" - def process(self, context): - context.data['anatomy'] = Anatomy() - self.log.info("Anatomy templates collected...") + def process(self, instance): + # get all the stuff from the database + subset_name = instance.data["subset"] + asset_name = instance.data["asset"] + project_name = api.Session["AVALON_PROJECT"] + + project = io.find_one({"type": "project", + "name": project_name}, + projection={"config": True, "data": True}) + + template = project["config"]["template"]["publish"] + anatomy = instance.context.data['anatomy'] + + asset = io.find_one({"type": "asset", + "name": asset_name, + "parent": project["_id"]}) + + assert asset, ("No asset found by the name '{}' " + "in project '{}'".format(asset_name, project_name)) + silo = asset['silo'] + + subset = io.find_one({"type": "subset", + "name": subset_name, + "parent": asset["_id"]}) + + # assume there is no version yet, we start at `1` + version = None + version_number = 1 + if subset is not None: + version = io.find_one({"type": "version", + "parent": subset["_id"]}, + sort=[("name", -1)]) + + # if there is a subset there ought to be version + if version is not None: + version_number += int(version["name"]) + + hierarchy = asset['data']['parents'] + if hierarchy: + # hierarchy = os.path.sep.join(hierarchy) + hierarchy = os.path.join(*hierarchy) + + template_data = {"root": api.Session["AVALON_PROJECTS"], + "project": {"name": project_name, + "code": project['data']['code']}, + "silo": silo, + "family": instance.data['family'], + "asset": asset_name, + "subset": subset_name, + "version": version_number, + "hierarchy": hierarchy, + "representation": "TEMP"} + + instance.data["template"] = template + instance.data["assumedTemplateData"] = template_data + + # We take the parent folder of representation 'filepath' + instance.data["assumedDestination"] = os.path.dirname( + (anatomy.format(template_data)).publish.path + ) diff --git a/pype/plugins/global/publish/integrate_assumed_destination.py b/pype/plugins/global/publish/integrate_assumed_destination.py index 6999ce6ab8..3bbd4cf33b 100644 --- a/pype/plugins/global/publish/integrate_assumed_destination.py +++ b/pype/plugins/global/publish/integrate_assumed_destination.py @@ -30,7 +30,8 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin): "resources") # Clean the path - mock_destination = os.path.abspath(os.path.normpath(mock_destination)).replace("\\", "/") + mock_destination = os.path.abspath( + os.path.normpath(mock_destination)).replace("\\", "/") # Define resource destination and transfers resources = instance.data.get("resources", list()) @@ -38,7 +39,8 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin): for resource in resources: # Add destination to the resource - source_filename = os.path.basename(resource["source"]).replace("\\", "/") + source_filename = os.path.basename( + resource["source"]).replace("\\", "/") destination = os.path.join(mock_destination, source_filename) # Force forward slashes to fix issue with software unable @@ -53,7 +55,8 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin): files = resource['files'] for fsrc in files: fname = os.path.basename(fsrc) - fdest = os.path.join(mock_destination, fname).replace("\\", "/") + fdest = os.path.join( + mock_destination, fname).replace("\\", "/") transfers.append([fsrc, fdest]) instance.data["resources"] = resources From 603b0fd3fdaad96ec27e4fc7446856e26ddf2e90 Mon Sep 17 00:00:00 2001 From: Ondrej Samohel Date: Mon, 23 Sep 2019 11:08:52 +0200 Subject: [PATCH 02/28] fixed variable name --- pype/plugins/global/publish/submit_publish_job.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 8d352b8872..b812fb16e6 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -1,7 +1,6 @@ import os import json import re -from pprint import pprint import logging from avalon import api, io @@ -147,7 +146,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "PYPE_ROOT" ] - def _submit_deadline_post_job(self, instance, job): """ Deadline specific code separated from :meth:`process` for sake of @@ -192,7 +190,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # Transfer the environment from the original job to this dependent # job so they use the same environment - environment = job["Props"].get("Env", {}) i = 0 for index, key in enumerate(environment): @@ -295,7 +292,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # Optional metadata (for debugging) "metadata": { "instance": data, - "job": job, + "job": render_job, "session": api.Session.copy() } } From b35a06ed7b1d1014477f8783743b1401927d2869 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 26 Sep 2019 10:42:15 +0200 Subject: [PATCH 03/28] remove deprecated actions --- pype/ftrack/actions/action_asset_delete.py | 129 ------------------ .../actions/action_delete_unpublished.py | 93 ------------- pype/ftrack/actions/action_set_version.py | 122 ----------------- 3 files changed, 344 deletions(-) delete mode 100644 pype/ftrack/actions/action_asset_delete.py delete mode 100644 pype/ftrack/actions/action_delete_unpublished.py delete mode 100644 pype/ftrack/actions/action_set_version.py diff --git a/pype/ftrack/actions/action_asset_delete.py b/pype/ftrack/actions/action_asset_delete.py deleted file mode 100644 index 654c78049b..0000000000 --- a/pype/ftrack/actions/action_asset_delete.py +++ /dev/null @@ -1,129 +0,0 @@ -import sys -import argparse -import logging -from pype.vendor import ftrack_api -from pype.ftrack import BaseAction - - -class AssetDelete(BaseAction): - '''Custom action.''' - - #: Action identifier. - identifier = 'asset.delete' - #: Action label. - label = 'Asset Delete' - - def discover(self, session, entities, event): - ''' Validation ''' - - if ( - len(entities) != 1 or - entities[0].entity_type not in ['Shot', 'Asset Build'] - ): - return False - - return True - - def interface(self, session, entities, event): - - if not event['data'].get('values', {}): - entity = entities[0] - - items = [] - for asset in entity['assets']: - # get asset name for label - label = 'None' - if asset['name']: - label = asset['name'] - - items.append({ - 'label': label, - 'name': label, - 'value': False, - 'type': 'boolean' - }) - - if len(items) < 1: - return { - 'success': False, - 'message': 'There are no assets to delete' - } - - return items - - def launch(self, session, entities, event): - - entity = entities[0] - # if values were set remove those items - if 'values' in event['data']: - values = event['data']['values'] - # get list of assets to delete from form - to_delete = [] - for key in values: - if values[key]: - to_delete.append(key) - # delete them by name - for asset in entity['assets']: - if asset['name'] in to_delete: - session.delete(asset) - try: - session.commit() - except Exception: - session.rollback() - raise - - return { - 'success': True, - 'message': 'Asset deleted.' - } - - -def register(session, plugins_presets={}): - '''Register action. Called when used as an event plugin.''' - - # Validate that session is an instance of ftrack_api.Session. If not, - # assume that register is being called from an old or incompatible API and - # return without doing anything. - if not isinstance(session, ftrack_api.session.Session): - return - - AssetDelete(session, plugins_presets).register() - - -def main(arguments=None): - '''Set up logging and register action.''' - if arguments is None: - arguments = [] - - parser = argparse.ArgumentParser() - # Allow setting of logging level from arguments. - loggingLevels = {} - for level in ( - logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, - logging.ERROR, logging.CRITICAL - ): - loggingLevels[logging.getLevelName(level).lower()] = level - - parser.add_argument( - '-v', '--verbosity', - help='Set the logging output verbosity.', - choices=loggingLevels.keys(), - default='info' - ) - namespace = parser.parse_args(arguments) - - # Set up basic logging - logging.basicConfig(level=loggingLevels[namespace.verbosity]) - - session = ftrack_api.Session() - register(session) - - # Wait for events - logging.info( - 'Registered actions and listening for events. Use Ctrl-C to abort.' - ) - session.event_hub.wait() - - -if __name__ == '__main__': - raise SystemExit(main(sys.argv[1:])) diff --git a/pype/ftrack/actions/action_delete_unpublished.py b/pype/ftrack/actions/action_delete_unpublished.py deleted file mode 100644 index 5e7f783ba7..0000000000 --- a/pype/ftrack/actions/action_delete_unpublished.py +++ /dev/null @@ -1,93 +0,0 @@ -import sys -import argparse -import logging -from pype.vendor import ftrack_api -from pype.ftrack import BaseAction - - -class VersionsCleanup(BaseAction): - '''Custom action.''' - - # Action identifier - identifier = 'versions.cleanup' - # Action label - label = 'Versions cleanup' - - def discover(self, session, entities, event): - ''' Validation ''' - - # Only 1 AssetVersion is allowed - if len(entities) != 1 or entities[0].entity_type != 'AssetVersion': - return False - - return True - - def launch(self, session, entities, event): - - entity = entities[0] - - # Go through all versions in asset - for version in entity['asset']['versions']: - if not version['is_published']: - session.delete(version) - try: - session.commit() - except Exception: - session.rollback() - raise - - return { - 'success': True, - 'message': 'Hidden versions were removed' - } - - -def register(session, plugins_presets={}): - '''Register action. Called when used as an event plugin.''' - - # Validate that session is an instance of ftrack_api.Session. If not, - # assume that register is being called from an old or incompatible API and - # return without doing anything. - if not isinstance(session, ftrack_api.session.Session): - return - - VersionsCleanup(session, plugins_presets).register() - - -def main(arguments=None): - '''Set up logging and register action.''' - if arguments is None: - arguments = [] - - parser = argparse.ArgumentParser() - # Allow setting of logging level from arguments. - loggingLevels = {} - for level in ( - logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, - logging.ERROR, logging.CRITICAL - ): - loggingLevels[logging.getLevelName(level).lower()] = level - - parser.add_argument( - '-v', '--verbosity', - help='Set the logging output verbosity.', - choices=loggingLevels.keys(), - default='info' - ) - namespace = parser.parse_args(arguments) - - # Set up basic logging - logging.basicConfig(level=loggingLevels[namespace.verbosity]) - - session = ftrack_api.Session() - register(session) - - # Wait for events - logging.info( - 'Registered actions and listening for events. Use Ctrl-C to abort.' - ) - session.event_hub.wait() - - -if __name__ == '__main__': - raise SystemExit(main(sys.argv[1:])) diff --git a/pype/ftrack/actions/action_set_version.py b/pype/ftrack/actions/action_set_version.py deleted file mode 100644 index 5bf965e3ef..0000000000 --- a/pype/ftrack/actions/action_set_version.py +++ /dev/null @@ -1,122 +0,0 @@ -import sys -import argparse -import logging -from pype.vendor import ftrack_api -from pype.ftrack import BaseAction - - -class SetVersion(BaseAction): - '''Custom action.''' - - #: Action identifier. - identifier = 'version.set' - #: Action label. - label = 'Version Set' - - def discover(self, session, entities, event): - ''' Validation ''' - - # Only 1 AssetVersion is allowed - if len(entities) != 1 or entities[0].entity_type != 'AssetVersion': - return False - - return True - - def interface(self, session, entities, event): - - if not event['data'].get('values', {}): - entity = entities[0] - - # Get actual version of asset - act_ver = entity['version'] - # Set form - items = [{ - 'label': 'Version number', - 'type': 'number', - 'name': 'version_number', - 'value': act_ver - }] - - return items - - def launch(self, session, entities, event): - - entity = entities[0] - - # Do something with the values or return a new form. - values = event['data'].get('values', {}) - # Default is action True - scs = False - - if not values['version_number']: - msg = 'You didn\'t enter any version.' - elif int(values['version_number']) <= 0: - msg = 'Negative or zero version is not valid.' - else: - try: - entity['version'] = values['version_number'] - session.commit() - msg = 'Version was changed to v{0}'.format( - values['version_number'] - ) - scs = True - except Exception as e: - msg = 'Unexpected error occurs during version set ({})'.format( - str(e) - ) - - return { - 'success': scs, - 'message': msg - } - - -def register(session, plugins_presets={}): - '''Register action. Called when used as an event plugin.''' - - # Validate that session is an instance of ftrack_api.Session. If not, - # assume that register is being called from an old or incompatible API and - # return without doing anything. - if not isinstance(session, ftrack_api.session.Session): - return - - SetVersion(session, plugins_presets).register() - - -def main(arguments=None): - '''Set up logging and register action.''' - if arguments is None: - arguments = [] - - parser = argparse.ArgumentParser() - # Allow setting of logging level from arguments. - loggingLevels = {} - for level in ( - logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, - logging.ERROR, logging.CRITICAL - ): - loggingLevels[logging.getLevelName(level).lower()] = level - - parser.add_argument( - '-v', '--verbosity', - help='Set the logging output verbosity.', - choices=loggingLevels.keys(), - default='info' - ) - namespace = parser.parse_args(arguments) - - # Set up basic logging - logging.basicConfig(level=loggingLevels[namespace.verbosity]) - - session = ftrack_api.Session() - register(session) - - # Wait for events - logging.info( - 'Registered actions and listening for events. Use Ctrl-C to abort.' - ) - session.event_hub.wait() - - -if __name__ == '__main__': - raise SystemExit(main(sys.argv[1:])) From 11a2bc2378893f529dc0c59ac0407d396972f5ae Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 26 Sep 2019 10:42:31 +0200 Subject: [PATCH 04/28] ignore doctors by default --- pype/ftrack/actions/action_attributes_remapper.py | 1 + pype/ftrack/actions/action_cust_attr_doctor.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/pype/ftrack/actions/action_attributes_remapper.py b/pype/ftrack/actions/action_attributes_remapper.py index 2c4899410d..759b5765e5 100644 --- a/pype/ftrack/actions/action_attributes_remapper.py +++ b/pype/ftrack/actions/action_attributes_remapper.py @@ -8,6 +8,7 @@ from pype.ftrack.lib.io_nonsingleton import DbConnector class AttributesRemapper(BaseAction): '''Edit meta data action.''' + ignore_me = True #: Action identifier. identifier = 'attributes.remapper' #: Action label. diff --git a/pype/ftrack/actions/action_cust_attr_doctor.py b/pype/ftrack/actions/action_cust_attr_doctor.py index b875f52ab8..b51eb9a48b 100644 --- a/pype/ftrack/actions/action_cust_attr_doctor.py +++ b/pype/ftrack/actions/action_cust_attr_doctor.py @@ -9,6 +9,8 @@ from pype.ftrack import BaseAction class CustomAttributeDoctor(BaseAction): + + ignore_me = True #: Action identifier. identifier = 'custom.attributes.doctor' #: Action label. From b1ef62e66fc266070b4856ba8f4fe65d48ee4317 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 26 Sep 2019 10:43:08 +0200 Subject: [PATCH 05/28] added alternatives for fstart and fend in custom attribute doctor --- pype/ftrack/actions/action_cust_attr_doctor.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pype/ftrack/actions/action_cust_attr_doctor.py b/pype/ftrack/actions/action_cust_attr_doctor.py index b51eb9a48b..af5fe2dc4a 100644 --- a/pype/ftrack/actions/action_cust_attr_doctor.py +++ b/pype/ftrack/actions/action_cust_attr_doctor.py @@ -28,7 +28,9 @@ class CustomAttributeDoctor(BaseAction): hierarchical_ca = ['handleStart', 'handleEnd', 'frameStart', 'frameEnd'] hierarchical_alternatives = { 'handleStart': 'handles', - 'handleEnd': 'handles' + 'handleEnd': 'handles', + "frameStart": "fstart", + "frameEnd": "fend" } # Roles for new custom attributes From dd66e188739abadf8a880af74bcff92fb779714a Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 26 Sep 2019 10:43:54 +0200 Subject: [PATCH 06/28] typo fix --- pype/ftrack/events/action_sync_hier_attrs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/events/action_sync_hier_attrs.py b/pype/ftrack/events/action_sync_hier_attrs.py index f8ecb9e3cd..c9d968ee5d 100644 --- a/pype/ftrack/events/action_sync_hier_attrs.py +++ b/pype/ftrack/events/action_sync_hier_attrs.py @@ -21,7 +21,7 @@ class SyncHierarchicalAttrs(BaseAction): identifier = 'sync.hierarchical.attrs' #: Action label. label = "Pype Admin" - variant = '- Sync Hier Attrs (server)' + variant = '- Sync Hier Attrs (Server)' #: Action description. description = 'Synchronize hierarchical attributes' #: Icon From 19a5747aa175b11e11b16d2a9de8d04632d811cf Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Thu, 26 Sep 2019 10:44:55 +0200 Subject: [PATCH 07/28] fix logging in job killer --- pype/ftrack/actions/action_job_killer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/ftrack/actions/action_job_killer.py b/pype/ftrack/actions/action_job_killer.py index 8584b26aa4..717f87e879 100644 --- a/pype/ftrack/actions/action_job_killer.py +++ b/pype/ftrack/actions/action_job_killer.py @@ -107,7 +107,7 @@ class JobKiller(BaseAction): 'Changing Job ({}) status: {} -> failed' ).format(job['id'], job['status'])) except Exception: - self.warning.debug(( + self.log.warning.debug(( 'Changing Job ({}) has failed' ).format(job['id'])) From 56d42251bfb06d025cc8333e9853695d86eb4329 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 30 Sep 2019 16:47:56 +0200 Subject: [PATCH 08/28] created module for logging gui --- pype/logging/gui/__init__.py | 0 pype/logging/tray/__init__.py | 5 ++++ pype/logging/tray/logging_module.py | 36 +++++++++++++++++++++++++++++ 3 files changed, 41 insertions(+) create mode 100644 pype/logging/gui/__init__.py create mode 100644 pype/logging/tray/__init__.py create mode 100644 pype/logging/tray/logging_module.py diff --git a/pype/logging/gui/__init__.py b/pype/logging/gui/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/pype/logging/tray/__init__.py b/pype/logging/tray/__init__.py new file mode 100644 index 0000000000..a2586155e7 --- /dev/null +++ b/pype/logging/tray/__init__.py @@ -0,0 +1,5 @@ +from .logging_module import LoggingModule + + +def tray_init(tray_widget, main_widget): + return LoggingModule(main_widget, tray_widget) diff --git a/pype/logging/tray/logging_module.py b/pype/logging/tray/logging_module.py new file mode 100644 index 0000000000..30f55cd680 --- /dev/null +++ b/pype/logging/tray/logging_module.py @@ -0,0 +1,36 @@ +import os +from Qt import QtWidgets + +from pypeapp import Logger + +from ..gui.app import LogsWindow + +log = Logger().get_logger("LoggingModule", "logging") + + +class LoggingModule: + def __init__(self, main_parent=None, parent=None): + self.parent = parent + + self.window = LogsWindow() + + # Definition of Tray menu + def tray_menu(self, parent_menu): + # Menu for Tray App + menu = QtWidgets.QMenu('Logging', parent_menu) + # menu.setProperty('submenu', 'on') + + show_action = QtWidgets.QAction("Show Logs", menu) + show_action.triggered.connect(self.on_show_logs) + menu.addAction(show_action) + + parent_menu.addMenu(menu) + + def tray_start(self): + pass + + def process_modules(self, modules): + return + + def on_show_logs(self): + self.window.show() From 2cbee84e3eec3b3ff3e51a7de4f596f51ed7402c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Mon, 30 Sep 2019 16:48:44 +0200 Subject: [PATCH 09/28] added first version of logging gui without filtering and NEAT features --- pype/logging/gui/app.py | 37 ++++ pype/logging/gui/lib.py | 94 ++++++++ pype/logging/gui/models.py | 169 ++++++++++++++ pype/logging/gui/widgets.py | 426 ++++++++++++++++++++++++++++++++++++ 4 files changed, 726 insertions(+) create mode 100644 pype/logging/gui/app.py create mode 100644 pype/logging/gui/lib.py create mode 100644 pype/logging/gui/models.py create mode 100644 pype/logging/gui/widgets.py diff --git a/pype/logging/gui/app.py b/pype/logging/gui/app.py new file mode 100644 index 0000000000..7cee280158 --- /dev/null +++ b/pype/logging/gui/app.py @@ -0,0 +1,37 @@ +from Qt import QtWidgets, QtCore +from .widgets import LogsWidget, LogDetailWidget +from pypeapp import style + + +class LogsWindow(QtWidgets.QWidget): + def __init__(self, parent=None): + super(LogsWindow, self).__init__(parent) + + self.setStyleSheet(style.load_stylesheet()) + self.resize(1200, 800) + logs_widget = LogsWidget(parent=self) + log_detail = LogDetailWidget(parent=self) + + main_layout = QtWidgets.QHBoxLayout() + + log_splitter = QtWidgets.QSplitter() + log_splitter.setOrientation(QtCore.Qt.Horizontal) + log_splitter.addWidget(logs_widget) + log_splitter.addWidget(log_detail) + log_splitter.setStretchFactor(0, 65) + log_splitter.setStretchFactor(1, 35) + + main_layout.addWidget(log_splitter) + + self.logs_widget = logs_widget + self.log_detail = log_detail + + self.setLayout(main_layout) + self.setWindowTitle("Logs") + + self.logs_widget.active_changed.connect(self.on_selection_changed) + + def on_selection_changed(self): + index = self.logs_widget.selected_log() + node = index.data(self.logs_widget.model.NodeRole) + self.log_detail.set_detail(node) diff --git a/pype/logging/gui/lib.py b/pype/logging/gui/lib.py new file mode 100644 index 0000000000..85782e071e --- /dev/null +++ b/pype/logging/gui/lib.py @@ -0,0 +1,94 @@ +import contextlib +from Qt import QtCore + + +def _iter_model_rows( + model, column, include_root=False +): + """Iterate over all row indices in a model""" + indices = [QtCore.QModelIndex()] # start iteration at root + + for index in indices: + # Add children to the iterations + child_rows = model.rowCount(index) + for child_row in range(child_rows): + child_index = model.index(child_row, column, index) + indices.append(child_index) + + if not include_root and not index.isValid(): + continue + + yield index + + +@contextlib.contextmanager +def preserve_states( + tree_view, column=0, role=None, + preserve_expanded=True, preserve_selection=True, + expanded_role=QtCore.Qt.DisplayRole, selection_role=QtCore.Qt.DisplayRole + +): + """Preserves row selection in QTreeView by column's data role. + + This function is created to maintain the selection status of + the model items. When refresh is triggered the items which are expanded + will stay expanded and vise versa. + + tree_view (QWidgets.QTreeView): the tree view nested in the application + column (int): the column to retrieve the data from + role (int): the role which dictates what will be returned + + Returns: + None + + """ + # When `role` is set then override both expanded and selection roles + if role: + expanded_role = role + selection_role = role + + model = tree_view.model() + selection_model = tree_view.selectionModel() + flags = selection_model.Select | selection_model.Rows + + expanded = set() + + if preserve_expanded: + for index in _iter_model_rows( + model, column=column, include_root=False + ): + if tree_view.isExpanded(index): + value = index.data(expanded_role) + expanded.add(value) + + selected = None + + if preserve_selection: + selected_rows = selection_model.selectedRows() + if selected_rows: + selected = set(row.data(selection_role) for row in selected_rows) + + try: + yield + finally: + if expanded: + for index in _iter_model_rows( + model, column=0, include_root=False + ): + value = index.data(expanded_role) + is_expanded = value in expanded + # skip if new index was created meanwhile + if is_expanded is None: + continue + tree_view.setExpanded(index, is_expanded) + + if selected: + # Go through all indices, select the ones with similar data + for index in _iter_model_rows( + model, column=column, include_root=False + ): + value = index.data(selection_role) + state = value in selected + if state: + tree_view.scrollTo(index) # Ensure item is visible + selection_model.select(index, flags) diff --git a/pype/logging/gui/models.py b/pype/logging/gui/models.py new file mode 100644 index 0000000000..6722ed0fe0 --- /dev/null +++ b/pype/logging/gui/models.py @@ -0,0 +1,169 @@ +import os +from Qt import QtCore +from pypeapp import Logger +from pypeapp.lib.log import _bootstrap_mongo_log + +log = Logger().get_logger("LogModel", "LoggingModule") + + +class LogModel(QtCore.QAbstractItemModel): + COLUMNS = [ + "user", + "host", + "lineNumber", + "method", + "module", + "fileName", + "loggerName", + "message", + "level", + "timestamp", + ] + + colums_mapping = { + "user": "User", + "host": "Host", + "lineNumber": "Line n.", + "method": "Method", + "module": "Module", + "fileName": "File name", + "loggerName": "Logger name", + "message": "Message", + "level": "Level", + "timestamp": "Timestamp", + } + + NodeRole = QtCore.Qt.UserRole + 1 + + def __init__(self, parent=None): + super(LogModel, self).__init__(parent) + self._root_node = Node() + + collection = os.environ.get('PYPE_LOG_MONGO_COL') + database = _bootstrap_mongo_log() + self.dbcon = None + if collection in database.list_collection_names(): + self.dbcon = database[collection] + + def add_log(self, log): + node = Node(log) + self._root_node.add_child(node) + + def refresh(self): + self.clear() + self.beginResetModel() + if self.dbcon: + result = self.dbcon.find({}) + for item in result: + self.add_log(item) + self.endResetModel() + + + def data(self, index, role): + if not index.isValid(): + return None + + if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole: + node = index.internalPointer() + column = index.column() + + key = self.COLUMNS[column] + if key == "timestamp": + return str(node.get(key, None)) + return node.get(key, None) + + if role == self.NodeRole: + return index.internalPointer() + + def index(self, row, column, parent): + """Return index for row/column under parent""" + + if not parent.isValid(): + parent_node = self._root_node + else: + parent_node = parent.internalPointer() + + child_item = parent_node.child(row) + if child_item: + return self.createIndex(row, column, child_item) + else: + return QtCore.QModelIndex() + + def rowCount(self, parent): + node = self._root_node + if parent.isValid(): + node = parent.internalPointer() + return node.childCount() + + def columnCount(self, parent): + return len(self.COLUMNS) + + def parent(self, index): + return QtCore.QModelIndex() + + def headerData(self, section, orientation, role): + if role == QtCore.Qt.DisplayRole: + if section < len(self.COLUMNS): + key = self.COLUMNS[section] + return self.colums_mapping.get(key, key) + + super(LogModel, self).headerData(section, orientation, role) + + def flags(self, index): + return (QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable) + + def clear(self): + self.beginResetModel() + self._root_node = Node() + self.endResetModel() + + +class Node(dict): + """A node that can be represented in a tree view. + + The node can store data just like a dictionary. + + >>> data = {"name": "John", "score": 10} + >>> node = Node(data) + >>> assert node["name"] == "John" + + """ + + def __init__(self, data=None): + super(Node, self).__init__() + + self._children = list() + self._parent = None + + if data is not None: + assert isinstance(data, dict) + self.update(data) + + def childCount(self): + return len(self._children) + + def child(self, row): + if row >= len(self._children): + log.warning("Invalid row as child: {0}".format(row)) + return + + return self._children[row] + + def children(self): + return self._children + + def parent(self): + return self._parent + + def row(self): + """ + Returns: + int: Index of this node under parent""" + if self._parent is not None: + siblings = self.parent().children() + return siblings.index(self) + + def add_child(self, child): + """Add a child to this node""" + child._parent = self + self._children.append(child) diff --git a/pype/logging/gui/widgets.py b/pype/logging/gui/widgets.py new file mode 100644 index 0000000000..66692c2c65 --- /dev/null +++ b/pype/logging/gui/widgets.py @@ -0,0 +1,426 @@ +import datetime +import inspect +from Qt import QtCore, QtWidgets, QtGui +from PyQt5.QtCore import QVariant +from .models import LogModel + +from .lib import preserve_states + + +class SearchComboBox(QtWidgets.QComboBox): + """Searchable ComboBox with empty placeholder value as first value""" + + def __init__(self, parent=None, placeholder=""): + super(SearchComboBox, self).__init__(parent) + + self.setEditable(True) + self.setInsertPolicy(self.NoInsert) + self.lineEdit().setPlaceholderText(placeholder) + + # Apply completer settings + completer = self.completer() + completer.setCompletionMode(completer.PopupCompletion) + completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive) + + # Force style sheet on popup menu + # It won't take the parent stylesheet for some reason + # todo: better fix for completer popup stylesheet + if parent: + popup = completer.popup() + popup.setStyleSheet(parent.styleSheet()) + + self.currentIndexChanged.connect(self.onIndexChange) + + def onIndexChange(self, index): + print(index) + + def populate(self, items): + self.clear() + self.addItems([""]) # ensure first item is placeholder + self.addItems(items) + + def get_valid_value(self): + """Return the current text if it's a valid value else None + + Note: The empty placeholder value is valid and returns as "" + + """ + + text = self.currentText() + lookup = set(self.itemText(i) for i in range(self.count())) + if text not in lookup: + return None + + return text + +class CheckableComboBox2(QtWidgets.QComboBox): + def __init__(self, parent=None): + super(CheckableComboBox, self).__init__(parent) + self.view().pressed.connect(self.handleItemPressed) + self._changed = False + + def handleItemPressed(self, index): + item = self.model().itemFromIndex(index) + if item.checkState() == QtCore.Qt.Checked: + item.setCheckState(QtCore.Qt.Unchecked) + else: + item.setCheckState(QtCore.Qt.Checked) + self._changed = True + + def hidePopup(self): + if not self._changed: + super(CheckableComboBox, self).hidePopup() + self._changed = False + + def itemChecked(self, index): + item = self.model().item(index, self.modelColumn()) + return item.checkState() == QtCore.Qt.Checked + + def setItemChecked(self, index, checked=True): + item = self.model().item(index, self.modelColumn()) + if checked: + item.setCheckState(QtCore.Qt.Checked) + else: + item.setCheckState(QtCore.Qt.Unchecked) + + +class SelectableMenu(QtWidgets.QMenu): + + selection_changed = QtCore.Signal() + + def mouseReleaseEvent(self, event): + action = self.activeAction() + if action and action.isEnabled(): + action.trigger() + self.selection_changed.emit() + else: + super(SelectableMenu, self).mouseReleaseEvent(event) + +class CustomCombo(QtWidgets.QWidget): + + selection_changed = QtCore.Signal() + + def __init__(self, title, parent=None): + super(CustomCombo, self).__init__(parent) + toolbutton = QtWidgets.QToolButton(self) + toolbutton.setText(title) + + toolmenu = SelectableMenu(self) + + toolbutton.setMenu(toolmenu) + toolbutton.setPopupMode(QtWidgets.QToolButton.MenuButtonPopup) + + layout = QtWidgets.QHBoxLayout() + layout.setContentsMargins(0, 0, 0, 0) + layout.addWidget(toolbutton) + + self.setLayout(layout) + + # toolmenu.selection_changed.connect(self.on_selection_changed) + toolmenu.selection_changed.connect(self.selection_changed) + + self.toolbutton = toolbutton + self.toolmenu = toolmenu + self.main_layout = layout + + def populate(self, items): + self.toolmenu.clear() + self.addItems(items) + + def addItems(self, items): + for item in items: + action = self.toolmenu.addAction(item) + action.setCheckable(True) + action.setChecked(True) + self.toolmenu.addAction(action) + + def items(self): + for action in self.toolmenu.actions(): + yield action + + +class CheckableComboBox(QtWidgets.QComboBox): + def __init__(self, parent=None): + super(CheckableComboBox, self).__init__(parent) + + view = QtWidgets.QTreeView() + view.header().hide() + view.setRootIsDecorated(False) + + model = QtGui.QStandardItemModel() + + view.pressed.connect(self.handleItemPressed) + self._changed = False + + self.setView(view) + self.setModel(model) + + self.view = view + self.model = model + + def handleItemPressed(self, index): + item = self.model.itemFromIndex(index) + if item.checkState() == QtCore.Qt.Checked: + item.setCheckState(QtCore.Qt.Unchecked) + else: + item.setCheckState(QtCore.Qt.Checked) + self._changed = True + + def hidePopup(self): + if not self._changed: + super(CheckableComboBox, self).hidePopup() + self._changed = False + + def itemChecked(self, index): + item = self.model.item(index, self.modelColumn()) + return item.checkState() == QtCore.Qt.Checked + + def setItemChecked(self, index, checked=True): + item = self.model.item(index, self.modelColumn()) + if checked: + item.setCheckState(QtCore.Qt.Checked) + else: + item.setCheckState(QtCore.Qt.Unchecked) + + def addItems(self, items): + for text, checked in items: + text_item = QtGui.QStandardItem(text) + checked_item = QtGui.QStandardItem() + checked_item.setData(QVariant(checked), QtCore.Qt.CheckStateRole) + self.model.appendRow([text_item, checked_item]) + + +class LogsWidget(QtWidgets.QWidget): + """A widget that lists the published subsets for an asset""" + + active_changed = QtCore.Signal() + + def __init__(self, parent=None): + super(LogsWidget, self).__init__(parent=parent) + + model = LogModel() + + filter_layout = QtWidgets.QHBoxLayout() + + # user_filter = SearchComboBox(self, "Users") + user_filter = CustomCombo("Users", self) + users = model.dbcon.distinct("user") + user_filter.populate(users) + user_filter.selection_changed.connect(self.user_changed) + + level_filter = CustomCombo("Levels", self) + # levels = [(level, True) for level in model.dbcon.distinct("level")] + levels = model.dbcon.distinct("level") + level_filter.addItems(levels) + + date_from_label = QtWidgets.QLabel("From:") + date_filter_from = QtWidgets.QDateTimeEdit() + + date_from_layout = QtWidgets.QVBoxLayout() + date_from_layout.addWidget(date_from_label) + date_from_layout.addWidget(date_filter_from) + + # now = datetime.datetime.now() + # QtCore.QDateTime(now.year, now.month, now.day, now.hour, now.minute, second = 0, msec = 0, timeSpec = 0) + date_to_label = QtWidgets.QLabel("To:") + date_filter_to = QtWidgets.QDateTimeEdit() + + date_to_layout = QtWidgets.QVBoxLayout() + date_to_layout.addWidget(date_to_label) + date_to_layout.addWidget(date_filter_to) + + filter_layout.addWidget(user_filter) + filter_layout.addWidget(level_filter) + + filter_layout.addLayout(date_from_layout) + filter_layout.addLayout(date_to_layout) + + view = QtWidgets.QTreeView(self) + view.setAllColumnsShowFocus(True) + + # # Set view delegates + # time_delegate = PrettyTimeDelegate() + # column = model.COLUMNS.index("time") + # view.setItemDelegateForColumn(column, time_delegate) + + layout = QtWidgets.QVBoxLayout(self) + layout.setContentsMargins(0, 0, 0, 0) + layout.addLayout(filter_layout) + layout.addWidget(view) + + view.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) + view.setSortingEnabled(True) + view.sortByColumn( + model.COLUMNS.index("timestamp"), + QtCore.Qt.AscendingOrder + ) + + view.setModel(model) + + view.customContextMenuRequested.connect(self.on_context_menu) + view.selectionModel().selectionChanged.connect(self.active_changed) + # user_filter.connect() + + # TODO remove if nothing will affect... + # header = self.view.header() + # # Enforce the columns to fit the data (purely cosmetic) + # if Qt.__binding__ in ("PySide2", "PyQt5"): + # header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents) + # else: + # header.setResizeMode(QtWidgets.QHeaderView.ResizeToContents) + + # Set signals + + # prepare + model.refresh() + + # Store to memory + self.model = model + self.view = view + + self.user_filter = user_filter + + def user_changed(self): + for action in self.user_filter.items(): + print(action) + + def on_context_menu(self, point): + # TODO will be any actions? it's ready + return + + point_index = self.view.indexAt(point) + if not point_index.isValid(): + return + + # Get selected subsets without groups + selection = self.view.selectionModel() + rows = selection.selectedRows(column=0) + + def selected_log(self): + selection = self.view.selectionModel() + rows = selection.selectedRows(column=0) + if len(rows) == 1: + return rows[0] + + return None + + +class LogDetailWidget(QtWidgets.QWidget): + """A Widget that display information about a specific version""" + data_rows = [ + "user", + "message", + "level", + "logname", + "method", + "module", + "fileName", + "lineNumber", + "host", + "timestamp" + ] + + html_text = u""" +

{user} - {timestamp}

+User
{user}
+
Level
{level}
+
Message
{message}
+
Log Name
{logname}

Method
{method}
+
File
{fileName}
+
Line
{lineNumber}
+
Host
{host}
+
Timestamp
{timestamp}
+""" + + def __init__(self, parent=None): + super(LogDetailWidget, self).__init__(parent=parent) + + layout = QtWidgets.QVBoxLayout(self) + + label = QtWidgets.QLabel("Detail") + detail_widget = LogDetailTextEdit() + detail_widget.setReadOnly(True) + layout.addWidget(label) + layout.addWidget(detail_widget) + + self.detail_widget = detail_widget + + self.setEnabled(True) + + self.set_detail(None) + + def set_detail(self, detail_data): + if not detail_data: + self.detail_widget.setText("") + return + + data = dict() + for row in self.data_rows: + value = detail_data.get(row) or "< Not set >" + data[row] = value + + + self.detail_widget.setHtml(self.html_text.format(**data)) + + +class LogDetailTextEdit(QtWidgets.QTextEdit): + """QTextEdit that displays version specific information. + + This also overrides the context menu to add actions like copying + source path to clipboard or copying the raw data of the version + to clipboard. + + """ + def __init__(self, parent=None): + super(LogDetailTextEdit, self).__init__(parent=parent) + + # self.data = { + # "source": None, + # "raw": None + # } + # + # def contextMenuEvent(self, event): + # """Context menu with additional actions""" + # menu = self.createStandardContextMenu() + # + # # Add additional actions when any text so we can assume + # # the version is set. + # if self.toPlainText().strip(): + # + # menu.addSeparator() + # action = QtWidgets.QAction("Copy source path to clipboard", + # menu) + # action.triggered.connect(self.on_copy_source) + # menu.addAction(action) + # + # action = QtWidgets.QAction("Copy raw data to clipboard", + # menu) + # action.triggered.connect(self.on_copy_raw) + # menu.addAction(action) + # + # menu.exec_(event.globalPos()) + # del menu + # + # def on_copy_source(self): + # """Copy formatted source path to clipboard""" + # source = self.data.get("source", None) + # if not source: + # return + # + # # path = source.format(root=api.registered_root()) + # # clipboard = QtWidgets.QApplication.clipboard() + # # clipboard.setText(path) + # + # def on_copy_raw(self): + # """Copy raw version data to clipboard + # + # The data is string formatted with `pprint.pformat`. + # + # """ + # raw = self.data.get("raw", None) + # if not raw: + # return + # + # raw_text = pprint.pformat(raw) + # clipboard = QtWidgets.QApplication.clipboard() + # clipboard.setText(raw_text) From 16189b56918acebb51295b93f157f0d892e5497e Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 2 Oct 2019 13:45:01 +0200 Subject: [PATCH 10/28] fix: some changes improving publishing and loading luts --- pype/plugins/global/publish/integrate_new.py | 2 + pype/plugins/nuke/load/load_luts.py | 322 +++++++++++++++++ pype/plugins/nuke/load/load_luts_ip.py | 335 ++++++++++++++++++ .../nuke/publish/validate_active_viewer.py | 24 ++ 4 files changed, 683 insertions(+) create mode 100644 pype/plugins/nuke/load/load_luts.py create mode 100644 pype/plugins/nuke/load/load_luts_ip.py create mode 100644 pype/plugins/nuke/publish/validate_active_viewer.py diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index d9e4f3f533..e87ee97087 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -429,6 +429,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): Returns: None """ + src = os.path.normpath(src) + dst = os.path.normpath(dst) self.log.debug("Copying file .. {} -> {}".format(src, dst)) dirname = os.path.dirname(dst) diff --git a/pype/plugins/nuke/load/load_luts.py b/pype/plugins/nuke/load/load_luts.py new file mode 100644 index 0000000000..4f7c19a588 --- /dev/null +++ b/pype/plugins/nuke/load/load_luts.py @@ -0,0 +1,322 @@ +from avalon import api, style, io +import nuke +import json +from collections import OrderedDict + + +class LoadLuts(api.Loader): + """Loading colorspace soft effect exported from nukestudio""" + + representations = ["lutJson"] + families = ["lut"] + + label = "Load Luts - nodes" + order = 0 + icon = "cc" + color = style.colors.light + ignore_attr = ["useLifetime"] + + def load(self, context, name, namespace, data): + """ + Loading function to get the soft effects to particular read node + + Arguments: + context (dict): context of version + name (str): name of the version + namespace (str): asset name + data (dict): compulsory attribute > not used + + Returns: + nuke node: containerised nuke node object + """ + # import dependencies + from avalon.nuke import containerise + + # get main variables + version = context['version'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + workfile_first_frame = int(nuke.root()["first_frame"].getValue()) + namespace = namespace or context['asset']['name'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = self.fname.replace("\\", "/") + + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).iteritems()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f["effects"]) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + GN = nuke.createNode("Group") + + GN["name"].setValue(object_name) + + # adding content to the group node + with GN: + pre_node = nuke.createNode("Input") + pre_node["name"].setValue("rgb") + + for ef_name, ef_val in nodes_order.items(): + node = nuke.createNode(ef_val["class"]) + for k, v in ef_val["node"].items(): + if k in self.ignore_attr: + continue + if isinstance(v, list) and len(v) > 4: + node[k].setAnimated() + for i, value in enumerate(v): + if isinstance(value, list): + for ci, cv in enumerate(value): + node[k].setValueAt( + cv, + (workfile_first_frame + i), + ci) + else: + node[k].setValueAt( + value, + (workfile_first_frame + i)) + else: + node[k].setValue(v) + node.setInput(0, pre_node) + pre_node = node + + output = nuke.createNode("Output") + output.setInput(0, pre_node) + + # try to find parent read node + self.connect_read_node(GN, namespace, json_f["assignTo"]) + + GN["tile_color"].setValue(int("0x3469ffff", 16)) + + self.log.info("Loaded lut setup: `{}`".format(GN["name"].value())) + + return containerise( + node=GN, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def update(self, container, representation): + """Update the Loader's path + + Nuke automatically tries to reset some variables when changing + the loader's path to a new file. These automatic changes are to its + inputs: + + """ + + from avalon.nuke import ( + update_container + ) + # get main variables + # Get version from io + version = io.find_one({ + "type": "version", + "_id": representation["parent"] + }) + # get corresponding node + GN = nuke.toNode(container['objectName']) + + file = api.get_representation_path(representation).replace("\\", "/") + name = container['name'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + workfile_first_frame = int(nuke.root()["first_frame"].getValue()) + namespace = container['namespace'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"representation": str(representation["_id"]), + "frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # Update the imprinted representation + update_container( + GN, + data_imprint + ) + + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).iteritems()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f["effects"]) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + # adding content to the group node + with GN: + # first remove all nodes + [nuke.delete(n) for n in nuke.allNodes()] + + # create input node + pre_node = nuke.createNode("Input") + pre_node["name"].setValue("rgb") + + for ef_name, ef_val in nodes_order.items(): + node = nuke.createNode(ef_val["class"]) + for k, v in ef_val["node"].items(): + if k in self.ignore_attr: + continue + if isinstance(v, list) and len(v) > 3: + node[k].setAnimated() + for i, value in enumerate(v): + if isinstance(value, list): + for ci, cv in enumerate(value): + node[k].setValueAt( + cv, + (workfile_first_frame + i), + ci) + else: + node[k].setValueAt( + value, + (workfile_first_frame + i)) + else: + node[k].setValue(v) + node.setInput(0, pre_node) + pre_node = node + + # create output node + output = nuke.createNode("Output") + output.setInput(0, pre_node) + + # try to find parent read node + self.connect_read_node(GN, namespace, json_f["assignTo"]) + + # get all versions in list + versions = io.find({ + "type": "version", + "parent": version["parent"] + }).distinct('name') + + max_version = max(versions) + + # change color of node + if version.get("name") not in [max_version]: + GN["tile_color"].setValue(int("0xd84f20ff", 16)) + else: + GN["tile_color"].setValue(int("0x3469ffff", 16)) + + self.log.info("udated to version: {}".format(version.get("name"))) + + def connect_read_node(self, group_node, asset, subset): + """ + Finds read node and selects it + + Arguments: + asset (str): asset name + + Returns: + nuke node: node is selected + None: if nothing found + """ + search_name = "{0}_{1}".format(asset, subset) + node = [n for n in nuke.allNodes() if search_name in n["name"].value()] + if len(node) > 0: + rn = node[0] + else: + rn = None + + # Parent read node has been found + # solving connections + if rn: + dep_nodes = rn.dependent() + + if len(dep_nodes) > 0: + for dn in dep_nodes: + dn.setInput(0, group_node) + + group_node.setInput(0, rn) + group_node.autoplace() + + def reorder_nodes(self, data): + new_order = OrderedDict() + trackNums = [v["trackIndex"] for k, v in data.items()] + subTrackNums = [v["subTrackIndex"] for k, v in data.items()] + + for trackIndex in range( + min(trackNums), max(trackNums) + 1): + for subTrackIndex in range( + min(subTrackNums), max(subTrackNums) + 1): + item = self.get_item(data, trackIndex, subTrackIndex) + if item is not {}: + new_order.update(item) + return new_order + + def get_item(self, data, trackIndex, subTrackIndex): + return {key: val for key, val in data.items() + if subTrackIndex == val["subTrackIndex"] + if trackIndex == val["trackIndex"]} + + def byteify(self, input): + """ + Converts unicode strings to strings + It goes trought all dictionary + + Arguments: + input (dict/str): input + + Returns: + dict: with fixed values and keys + + """ + + if isinstance(input, dict): + return {self.byteify(key): self.byteify(value) + for key, value in input.iteritems()} + elif isinstance(input, list): + return [self.byteify(element) for element in input] + elif isinstance(input, unicode): + return input.encode('utf-8') + else: + return input + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from avalon.nuke import viewer_update_and_undo_stop + node = nuke.toNode(container['objectName']) + with viewer_update_and_undo_stop(): + nuke.delete(node) diff --git a/pype/plugins/nuke/load/load_luts_ip.py b/pype/plugins/nuke/load/load_luts_ip.py new file mode 100644 index 0000000000..b30f84cc42 --- /dev/null +++ b/pype/plugins/nuke/load/load_luts_ip.py @@ -0,0 +1,335 @@ +from avalon import api, style, io +import nuke +import json +from collections import OrderedDict +from pype.nuke import lib + +class LoadLutsInputProcess(api.Loader): + """Loading colorspace soft effect exported from nukestudio""" + + representations = ["lutJson"] + families = ["lut"] + + label = "Load Luts - Input Process" + order = 0 + icon = "eye" + color = style.colors.alert + ignore_attr = ["useLifetime"] + + def load(self, context, name, namespace, data): + """ + Loading function to get the soft effects to particular read node + + Arguments: + context (dict): context of version + name (str): name of the version + namespace (str): asset name + data (dict): compulsory attribute > not used + + Returns: + nuke node: containerised nuke node object + """ + # import dependencies + from avalon.nuke import containerise + + # get main variables + version = context['version'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + workfile_first_frame = int(nuke.root()["first_frame"].getValue()) + namespace = namespace or context['asset']['name'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = self.fname.replace("\\", "/") + + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).iteritems()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f["effects"]) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + GN = nuke.createNode("Group") + + GN["name"].setValue(object_name) + + # adding content to the group node + with GN: + pre_node = nuke.createNode("Input") + pre_node["name"].setValue("rgb") + + for ef_name, ef_val in nodes_order.items(): + node = nuke.createNode(ef_val["class"]) + for k, v in ef_val["node"].items(): + if k in self.ignore_attr: + continue + if isinstance(v, list) and len(v) > 4: + node[k].setAnimated() + for i, value in enumerate(v): + if isinstance(value, list): + for ci, cv in enumerate(value): + node[k].setValueAt( + cv, + (workfile_first_frame + i), + ci) + else: + node[k].setValueAt( + value, + (workfile_first_frame + i)) + else: + node[k].setValue(v) + node.setInput(0, pre_node) + pre_node = node + + output = nuke.createNode("Output") + output.setInput(0, pre_node) + + # try to place it under Viewer1 + if not self.connect_active_viewer(GN): + nuke.delete(GN) + return + + GN["tile_color"].setValue(int("0x3469ffff", 16)) + + self.log.info("Loaded lut setup: `{}`".format(GN["name"].value())) + + return containerise( + node=GN, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def update(self, container, representation): + """Update the Loader's path + + Nuke automatically tries to reset some variables when changing + the loader's path to a new file. These automatic changes are to its + inputs: + + """ + + from avalon.nuke import ( + update_container + ) + # get main variables + # Get version from io + version = io.find_one({ + "type": "version", + "_id": representation["parent"] + }) + # get corresponding node + GN = nuke.toNode(container['objectName']) + + file = api.get_representation_path(representation).replace("\\", "/") + name = container['name'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + workfile_first_frame = int(nuke.root()["first_frame"].getValue()) + namespace = container['namespace'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"representation": str(representation["_id"]), + "frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # Update the imprinted representation + update_container( + GN, + data_imprint + ) + + # getting data from json file with unicode conversion + with open(file, "r") as f: + json_f = {self.byteify(key): self.byteify(value) + for key, value in json.load(f).iteritems()} + + # get correct order of nodes by positions on track and subtrack + nodes_order = self.reorder_nodes(json_f["effects"]) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + # adding content to the group node + with GN: + # first remove all nodes + [nuke.delete(n) for n in nuke.allNodes()] + + # create input node + pre_node = nuke.createNode("Input") + pre_node["name"].setValue("rgb") + + for ef_name, ef_val in nodes_order.items(): + node = nuke.createNode(ef_val["class"]) + for k, v in ef_val["node"].items(): + if k in self.ignore_attr: + continue + if isinstance(v, list) and len(v) > 3: + node[k].setAnimated() + for i, value in enumerate(v): + if isinstance(value, list): + for ci, cv in enumerate(value): + node[k].setValueAt( + cv, + (workfile_first_frame + i), + ci) + else: + node[k].setValueAt( + value, + (workfile_first_frame + i)) + else: + node[k].setValue(v) + node.setInput(0, pre_node) + pre_node = node + + # create output node + output = nuke.createNode("Output") + output.setInput(0, pre_node) + + # try to place it under Viewer1 + if not self.connect_active_viewer(GN): + nuke.delete(GN) + return + + # get all versions in list + versions = io.find({ + "type": "version", + "parent": version["parent"] + }).distinct('name') + + max_version = max(versions) + + # change color of node + if version.get("name") not in [max_version]: + GN["tile_color"].setValue(int("0xd84f20ff", 16)) + else: + GN["tile_color"].setValue(int("0x3469ffff", 16)) + + self.log.info("udated to version: {}".format(version.get("name"))) + + def connect_active_viewer(self, group_node): + """ + Finds Active viewer and + place the node under it, also adds + name of group into Input Process of the viewer + + Arguments: + group_node (nuke node): nuke group node object + + """ + group_node_name = group_node["name"].value() + + viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()] + if len(viewer) > 0: + viewer = viewer[0] + else: + self.log.error("Please create Viewer node before you run this action again") + return None + + # get coordinates of Viewer1 + xpos = viewer["xpos"].value() + ypos = viewer["ypos"].value() + + ypos += 150 + + viewer["ypos"].setValue(ypos) + + # set coordinates to group node + group_node["xpos"].setValue(xpos) + group_node["ypos"].setValue(ypos + 50) + + # add group node name to Viewer Input Process + viewer["input_process_node"].setValue(group_node_name) + + # put backdrop under + lib.create_backdrop(label="Input Process", layer=2, nodes=[viewer, group_node], color="0x7c7faaff") + + return True + + def reorder_nodes(self, data): + new_order = OrderedDict() + trackNums = [v["trackIndex"] for k, v in data.items()] + subTrackNums = [v["subTrackIndex"] for k, v in data.items()] + + for trackIndex in range( + min(trackNums), max(trackNums) + 1): + for subTrackIndex in range( + min(subTrackNums), max(subTrackNums) + 1): + item = self.get_item(data, trackIndex, subTrackIndex) + if item is not {}: + new_order.update(item) + return new_order + + def get_item(self, data, trackIndex, subTrackIndex): + return {key: val for key, val in data.items() + if subTrackIndex == val["subTrackIndex"] + if trackIndex == val["trackIndex"]} + + def byteify(self, input): + """ + Converts unicode strings to strings + It goes trought all dictionary + + Arguments: + input (dict/str): input + + Returns: + dict: with fixed values and keys + + """ + + if isinstance(input, dict): + return {self.byteify(key): self.byteify(value) + for key, value in input.iteritems()} + elif isinstance(input, list): + return [self.byteify(element) for element in input] + elif isinstance(input, unicode): + return input.encode('utf-8') + else: + return input + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from avalon.nuke import viewer_update_and_undo_stop + node = nuke.toNode(container['objectName']) + with viewer_update_and_undo_stop(): + nuke.delete(node) diff --git a/pype/plugins/nuke/publish/validate_active_viewer.py b/pype/plugins/nuke/publish/validate_active_viewer.py new file mode 100644 index 0000000000..618a7f1502 --- /dev/null +++ b/pype/plugins/nuke/publish/validate_active_viewer.py @@ -0,0 +1,24 @@ +import pyblish.api +import nuke + + +class ValidateActiveViewer(pyblish.api.ContextPlugin): + """Validate presentse of the active viewer from nodes + """ + + order = pyblish.api.ValidatorOrder + label = "Validate Active Viewer" + hosts = ["nuke"] + + def process(self, context): + viewer_process_node = context.data.get("ViewerProcess") + + assert viewer_process_node, ( + "Missing active viewer process! Please click on output write node and push key number 1-9" + ) + active_viewer = context.data["ActiveViewer"] + active_input = active_viewer.activeInput() + + assert active_input is not None, ( + "Missing active viewer input! Please click on output write node and push key number 1-9" + ) From b7c827f35d4b8d223ed62838bb15d60abfdb8b48 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 4 Oct 2019 13:13:51 +0200 Subject: [PATCH 11/28] family widget also stores and collect key of family in presets so can be trackable on pyblish --- pype/standalonepublish/widgets/__init__.py | 1 + pype/standalonepublish/widgets/widget_family.py | 7 +++++-- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/pype/standalonepublish/widgets/__init__.py b/pype/standalonepublish/widgets/__init__.py index 4c6a0e85a5..c6e0dd9a47 100644 --- a/pype/standalonepublish/widgets/__init__.py +++ b/pype/standalonepublish/widgets/__init__.py @@ -6,6 +6,7 @@ HelpRole = QtCore.Qt.UserRole + 2 FamilyRole = QtCore.Qt.UserRole + 3 ExistsRole = QtCore.Qt.UserRole + 4 PluginRole = QtCore.Qt.UserRole + 5 +PluginKeyRole = QtCore.Qt.UserRole + 6 from ..resources import get_resource from .button_from_svgs import SvgResizable, SvgButton diff --git a/pype/standalonepublish/widgets/widget_family.py b/pype/standalonepublish/widgets/widget_family.py index 63776b1df3..26eb8077d9 100644 --- a/pype/standalonepublish/widgets/widget_family.py +++ b/pype/standalonepublish/widgets/widget_family.py @@ -5,7 +5,7 @@ import json from collections import namedtuple from . import QtWidgets, QtCore -from . import HelpRole, FamilyRole, ExistsRole, PluginRole +from . import HelpRole, FamilyRole, ExistsRole, PluginRole, PluginKeyRole from . import FamilyDescriptionWidget from pypeapp import config @@ -116,8 +116,10 @@ class FamilyWidget(QtWidgets.QWidget): def collect_data(self): plugin = self.list_families.currentItem().data(PluginRole) + key = self.list_families.currentItem().data(PluginKeyRole) family = plugin.family.rsplit(".", 1)[-1] data = { + 'family_preset_key': key, 'family': family, 'subset': self.input_result.text(), 'version': self.version_spinbox.value() @@ -318,7 +320,7 @@ class FamilyWidget(QtWidgets.QWidget): has_families = False presets = config.get_presets().get('standalone_publish', {}) - for creator in presets.get('families', {}).values(): + for key, creator in presets.get('families', {}).items(): creator = namedtuple("Creator", creator.keys())(*creator.values()) label = creator.label or creator.family @@ -327,6 +329,7 @@ class FamilyWidget(QtWidgets.QWidget): item.setData(HelpRole, creator.help or "") item.setData(FamilyRole, creator.family) item.setData(PluginRole, creator) + item.setData(PluginKeyRole, key) item.setData(ExistsRole, False) self.list_families.addItem(item) From 71ad16e650e8df39fe50bfb63a6792084e34e7bb Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 4 Oct 2019 13:14:50 +0200 Subject: [PATCH 12/28] drop frame uses full path to ffprobe set in FFMPEG_PATH env --- pype/standalonepublish/widgets/widget_drop_frame.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pype/standalonepublish/widgets/widget_drop_frame.py b/pype/standalonepublish/widgets/widget_drop_frame.py index e60db892db..a5a686bae1 100644 --- a/pype/standalonepublish/widgets/widget_drop_frame.py +++ b/pype/standalonepublish/widgets/widget_drop_frame.py @@ -220,15 +220,21 @@ class DropDataFrame(QtWidgets.QFrame): self._process_data(data) def load_data_with_probe(self, filepath): + ffprobe_path = os.getenv("FFMPEG_PATH", "") + if ffprobe_path: + ffprobe_path += '/ffprobe' + else: + ffprobe_path = 'ffprobe' + args = [ - 'ffprobe', + ffprobe_path, '-v', 'quiet', '-print_format', 'json', '-show_format', '-show_streams', filepath ] ffprobe_p = subprocess.Popen( - args, + ' '.join(args), stdout=subprocess.PIPE, shell=True ) From c9b8bcc60247dbd4471c6e0280543158703f14de Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 4 Oct 2019 13:15:14 +0200 Subject: [PATCH 13/28] host of application is set to standalonepublisher instead of shell --- pype/standalonepublish/publish.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pype/standalonepublish/publish.py b/pype/standalonepublish/publish.py index 13b505666c..f199aaf84e 100644 --- a/pype/standalonepublish/publish.py +++ b/pype/standalonepublish/publish.py @@ -103,7 +103,7 @@ def avalon_api_publish(data, gui=True): "-pp", os.pathsep.join(pyblish.api.registered_paths()) ] - os.environ["PYBLISH_HOSTS"] = "shell" + os.environ["PYBLISH_HOSTS"] = "standalonepublisher" os.environ["SAPUBLISH_INPATH"] = json_data_path if gui: @@ -139,7 +139,7 @@ def cli_publish(data, gui=True): if gui: args += ["gui"] - os.environ["PYBLISH_HOSTS"] = "shell" + os.environ["PYBLISH_HOSTS"] = "standalonepublisher" os.environ["SAPUBLISH_INPATH"] = json_data_path os.environ["SAPUBLISH_OUTPATH"] = return_data_path From 6f889eec9e332ba42ed7ea01df5e385e1e4164af Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 4 Oct 2019 13:15:54 +0200 Subject: [PATCH 14/28] added conversion to int in case frameStart contain string --- pype/plugins/global/publish/integrate_new.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 6c89e22a83..a3efd10b2e 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -307,7 +307,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if repre.get("frameStart"): frame_start_padding = len(str( repre.get("frameEnd"))) - index_frame_start = repre.get("frameStart") + index_frame_start = int(repre.get("frameStart")) dst_padding_exp = src_padding_exp for i in src_collection.indexes: From e3ac16427256cb59a87eb86eb939246d7f245e70 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 4 Oct 2019 13:17:30 +0200 Subject: [PATCH 15/28] collect scene version is skipped in standalone publisher --- pype/plugins/global/publish/collect_scene_version.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pype/plugins/global/publish/collect_scene_version.py b/pype/plugins/global/publish/collect_scene_version.py index 12075e2417..3fac823b5c 100644 --- a/pype/plugins/global/publish/collect_scene_version.py +++ b/pype/plugins/global/publish/collect_scene_version.py @@ -13,6 +13,8 @@ class CollectSceneVersion(pyblish.api.ContextPlugin): label = 'Collect Version' def process(self, context): + if "standalonepublisher" in context.data.get("host"): + return filename = os.path.basename(context.data.get('currentFile')) From 4bd116b096bd529dd3c09945ce9870fd42e38863 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 4 Oct 2019 13:18:16 +0200 Subject: [PATCH 16/28] collect presets order is lowered so they are collected much earlier (before collect_context plugin) --- pype/plugins/global/publish/collect_presets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_presets.py b/pype/plugins/global/publish/collect_presets.py index 7e0d3e2f4b..4d483ec79b 100644 --- a/pype/plugins/global/publish/collect_presets.py +++ b/pype/plugins/global/publish/collect_presets.py @@ -5,7 +5,7 @@ from pypeapp import config class CollectPresets(api.ContextPlugin): """Collect Presets.""" - order = api.CollectorOrder + order = api.CollectorOrder - 0.491 label = "Collect Presets" def process(self, context): From 3e00bb57e848f0e3074c4d4ff07444f9dd843fc9 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 4 Oct 2019 13:18:51 +0200 Subject: [PATCH 17/28] added standalonepublisher to hosts of collect output repre config --- pype/plugins/global/publish/collect_output_repre_config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_output_repre_config.py b/pype/plugins/global/publish/collect_output_repre_config.py index 5595e29cab..248599f749 100644 --- a/pype/plugins/global/publish/collect_output_repre_config.py +++ b/pype/plugins/global/publish/collect_output_repre_config.py @@ -9,7 +9,7 @@ class CollectOutputRepreConfig(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder label = "Collect Config for representation" - hosts = ["shell"] + hosts = ["shell", "standalonepublisher"] def process(self, context): config_data = config.get_presets()["ftrack"]["output_representation"] From ff0009f8147cbbe4c90bb0b346d8586fcb38be0c Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 4 Oct 2019 13:21:17 +0200 Subject: [PATCH 18/28] anatomy template is not set if family don't have set anatom_template key --- .../plugins/global/publish/collect_context.py | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_context.py b/pype/plugins/global/publish/collect_context.py index 31ab95259c..61c3bcf4d8 100644 --- a/pype/plugins/global/publish/collect_context.py +++ b/pype/plugins/global/publish/collect_context.py @@ -31,9 +31,25 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): in_data = json.load(f) asset_name = in_data['asset'] + family_preset_key = in_data.get('family_preset_key', '') family = in_data['family'] subset = in_data['subset'] + # Load presets + presets = context.data.get("presets") + if not presets: + from pypeapp import config + presets = config.get_presets() + + # Get from presets anatomy key that will be used for getting template + # - default integrate new is used if not set + anatomy_key = presets.get( + "standalone_publish", {}).get( + "families", {}).get( + family_preset_key, {}).get( + "anatomy_template" + ) + project = io.find_one({'type': 'project'}) asset = io.find_one({ 'type': 'asset', @@ -63,7 +79,9 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): component['destination'] = component['files'] component['stagingDir'] = component['stagingDir'] - component['anatomy_template'] = 'render' + # Do not set anatomy_template if not specified + if anatomy_key: + component['anatomy_template'] = anatomy_key if isinstance(component['files'], list): collections, remainder = clique.assemble(component['files']) self.log.debug("collecting sequence: {}".format(collections)) From 195f9640bd87c44929db23c542497527333d2f23 Mon Sep 17 00:00:00 2001 From: iLLiCiTiT Date: Fri, 4 Oct 2019 13:22:37 +0200 Subject: [PATCH 19/28] added extractor for thumbnails in standalone publisher should create thumbnail from any image or video input --- .../global/publish/extract_thumbnail_sa.py | 126 ++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 pype/plugins/global/publish/extract_thumbnail_sa.py diff --git a/pype/plugins/global/publish/extract_thumbnail_sa.py b/pype/plugins/global/publish/extract_thumbnail_sa.py new file mode 100644 index 0000000000..7e31e3c701 --- /dev/null +++ b/pype/plugins/global/publish/extract_thumbnail_sa.py @@ -0,0 +1,126 @@ +import os +import tempfile +import subprocess +import pyblish.api +import pype.api + + +class ExtractThumbnail(pyblish.api.InstancePlugin): + """Extract jpeg thumbnail from component input from standalone publisher + + Uses jpeg file from component if possible (when single or multiple jpegs + are loaded to component selected as thumbnail) otherwise extracts from + input file/s single jpeg to temp. + """ + + label = "Extract Thumbnail" + hosts = ["standalonepublisher"] + order = pyblish.api.ExtractorOrder + + def process(self, instance): + repres = instance.data.get('representations') + if not repres: + return + + thumbnail_repre = None + for repre in repres: + if repre.get("thumbnail"): + thumbnail_repre = repre + break + + if not thumbnail_repre: + return + + files = thumbnail_repre.get("files") + if not files: + return + + if isinstance(files, list): + files_len = len(files) + file = str(files[0]) + else: + files_len = 1 + file = files + + is_jpeg = False + if file.endswith(".jpeg") or file.endswith(".jpg"): + is_jpeg = True + + if is_jpeg and files_len == 1: + # skip if already is single jpeg file + return + + elif is_jpeg: + # use first frame as thumbnail if is sequence of jpegs + full_thumbnail_path = file + self.log.info( + "For thumbnail is used file: {}".format(full_thumbnail_path) + ) + + else: + # Convert to jpeg if not yet + full_input_path = os.path.join(thumbnail_repre["stagingDir"], file) + self.log.info("input {}".format(full_input_path)) + + full_thumbnail_path = tempfile.mkstemp(suffix=".jpg")[1] + self.log.info("output {}".format(full_thumbnail_path)) + + config_data = instance.context.data.get("output_repre_config", {}) + + proj_name = os.environ.get("AVALON_PROJECT", "__default__") + profile = config_data.get( + proj_name, + config_data.get("__default__", {}) + ) + + ffmpeg_path = os.getenv("FFMPEG_PATH", "") + if ffmpeg_path: + ffmpeg_path += "/ffmpeg" + else: + ffmpeg_path = "ffmpeg" + + jpeg_items = [] + jpeg_items.append(ffmpeg_path) + # override file if already exists + jpeg_items.append("-y") + # add input filters from peresets + if profile: + jpeg_items.extend(profile.get('input', [])) + # input file + jpeg_items.append("-i {}".format(full_input_path)) + # extract only single file + jpeg_items.append("-vframes 1") + # output file + jpeg_items.append(full_thumbnail_path) + + subprocess_jpeg = " ".join(jpeg_items) + + # run subprocess + self.log.debug("Executing: {}".format(subprocess_jpeg)) + subprocess.Popen( + subprocess_jpeg, + stdout=subprocess.PIPE, + shell=True + ) + + # remove thumbnail key from origin repre + thumbnail_repre.pop("thumbnail") + + filename = os.path.basename(full_thumbnail_path) + staging_dir = os.path.dirname(full_thumbnail_path) + + # create new thumbnail representation + representation = { + 'name': 'jpg', + 'ext': 'jpg', + 'files': filename, + "stagingDir": staging_dir, + "thumbnail": True, + "tags": [] + } + + # add Delete tag when temp file was rendered + if not is_jpeg: + representation["tags"].append("delete") + + instance.data["representations"].append(representation) From 2f06a65198a3f5d71aacbae934d3283cdb2d36ca Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 4 Oct 2019 16:41:36 +0200 Subject: [PATCH 20/28] impoving publishing of baked mov - adding feature to add input process node to baking - removing collec/validate active viewer process (not necessary) - output node added to render write node --- .../collect_active_viewer.py | 1 - .../validate_active_viewer.py | 0 .../nuke/publish/extract_ouput_node.py | 20 ++--- .../nuke/publish/extract_review_data.py | 73 ++++++++++++------- .../nuke/publish/validate_rendered_frames.py | 2 + 5 files changed, 57 insertions(+), 39 deletions(-) rename pype/plugins/nuke/{publish => _publish_unused}/collect_active_viewer.py (83%) rename pype/plugins/nuke/{publish => _publish_unused}/validate_active_viewer.py (100%) diff --git a/pype/plugins/nuke/publish/collect_active_viewer.py b/pype/plugins/nuke/_publish_unused/collect_active_viewer.py similarity index 83% rename from pype/plugins/nuke/publish/collect_active_viewer.py rename to pype/plugins/nuke/_publish_unused/collect_active_viewer.py index 5dc17d8768..5a6cc02b88 100644 --- a/pype/plugins/nuke/publish/collect_active_viewer.py +++ b/pype/plugins/nuke/_publish_unused/collect_active_viewer.py @@ -11,5 +11,4 @@ class CollectActiveViewer(pyblish.api.ContextPlugin): hosts = ["nuke"] def process(self, context): - context.data["ViewerProcess"] = nuke.ViewerProcess.node() context.data["ActiveViewer"] = nuke.activeViewer() diff --git a/pype/plugins/nuke/publish/validate_active_viewer.py b/pype/plugins/nuke/_publish_unused/validate_active_viewer.py similarity index 100% rename from pype/plugins/nuke/publish/validate_active_viewer.py rename to pype/plugins/nuke/_publish_unused/validate_active_viewer.py diff --git a/pype/plugins/nuke/publish/extract_ouput_node.py b/pype/plugins/nuke/publish/extract_ouput_node.py index 4d7533f010..a144761e5f 100644 --- a/pype/plugins/nuke/publish/extract_ouput_node.py +++ b/pype/plugins/nuke/publish/extract_ouput_node.py @@ -15,21 +15,17 @@ class CreateOutputNode(pyblish.api.ContextPlugin): def process(self, context): # capture selection state with maintained_selection(): - # deselect all allNodes - self.log.info(context.data["ActiveViewer"]) + active_node = [node for inst in context[:] + for node in inst[:] + if "ak:family" in node.knobs()] - active_viewer = context.data["ActiveViewer"] - active_input = active_viewer.activeInput() - active_node = active_viewer.node() - - - last_viewer_node = active_node.input(active_input) - - name = last_viewer_node.name() - self.log.info("Node name: {}".format(name)) + if active_node: + self.log.info(active_node) + active_node = active_node[0] + self.log.info(active_node) + active_node['selected'].setValue(True) # select only instance render node - last_viewer_node['selected'].setValue(True) output_node = nuke.createNode("Output") # deselect all and select the original selection diff --git a/pype/plugins/nuke/publish/extract_review_data.py b/pype/plugins/nuke/publish/extract_review_data.py index 40c3e37434..885fe99b3d 100644 --- a/pype/plugins/nuke/publish/extract_review_data.py +++ b/pype/plugins/nuke/publish/extract_review_data.py @@ -2,7 +2,7 @@ import os import nuke import pyblish.api import pype - +import copy class ExtractReviewData(pype.api.Extractor): """Extracts movie and thumbnail with baked in luts @@ -48,9 +48,9 @@ class ExtractReviewData(pype.api.Extractor): assert instance.data['representations'][0]['files'], "Instance data files should't be empty!" - import nuke temporary_nodes = [] - stagingDir = instance.data['representations'][0]["stagingDir"].replace("\\", "/") + stagingDir = instance.data[ + 'representations'][0]["stagingDir"].replace("\\", "/") self.log.debug("StagingDir `{0}`...".format(stagingDir)) collection = instance.data.get("collection", None) @@ -70,16 +70,24 @@ class ExtractReviewData(pype.api.Extractor): first_frame = instance.data.get("frameStart", None) last_frame = instance.data.get("frameEnd", None) - node = previous_node = nuke.createNode("Read") + rnode = nuke.createNode("Read") - node["file"].setValue( + rnode["file"].setValue( os.path.join(stagingDir, fname).replace("\\", "/")) - node["first"].setValue(first_frame) - node["origfirst"].setValue(first_frame) - node["last"].setValue(last_frame) - node["origlast"].setValue(last_frame) - temporary_nodes.append(node) + rnode["first"].setValue(first_frame) + rnode["origfirst"].setValue(first_frame) + rnode["last"].setValue(last_frame) + rnode["origlast"].setValue(last_frame) + temporary_nodes.append(rnode) + previous_node = rnode + + # get input process and connect it to baking + ipn = self.get_view_process_node() + if ipn is not None: + ipn.setInput(0, previous_node) + previous_node = ipn + temporary_nodes.append(ipn) reformat_node = nuke.createNode("Reformat") @@ -95,22 +103,10 @@ class ExtractReviewData(pype.api.Extractor): previous_node = reformat_node temporary_nodes.append(reformat_node) - viewer_process_node = instance.context.data.get("ViewerProcess") - dag_node = None - if viewer_process_node: - dag_node = nuke.createNode(viewer_process_node.Class()) - dag_node.setInput(0, previous_node) - previous_node = dag_node - temporary_nodes.append(dag_node) - # Copy viewer process values - excludedKnobs = ["name", "xpos", "ypos"] - for item in viewer_process_node.knobs().keys(): - if item not in excludedKnobs and item in dag_node.knobs(): - x1 = viewer_process_node[item] - x2 = dag_node[item] - x2.fromScript(x1.toScript(False)) - else: - self.log.warning("No viewer node found.") + dag_node = nuke.createNode("OCIODisplay") + dag_node.setInput(0, previous_node) + previous_node = dag_node + temporary_nodes.append(dag_node) # create write node write_node = nuke.createNode("Write") @@ -164,3 +160,28 @@ class ExtractReviewData(pype.api.Extractor): # Clean up for node in temporary_nodes: nuke.delete(node) + + def get_view_process_node(self): + + # Select only the target node + if nuke.selectedNodes(): + [n.setSelected(False) for n in nuke.selectedNodes()] + + for v in [n for n in nuke.allNodes() + if "Viewer" in n.Class()]: + ip = v['input_process'].getValue() + ipn = v['input_process_node'].getValue() + if "VIEWER_INPUT" not in ipn and ip: + ipn_orig = nuke.toNode(ipn) + ipn_orig.setSelected(True) + + if ipn_orig: + nuke.nodeCopy('%clipboard%') + + [n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all + + nuke.nodePaste('%clipboard%') + + ipn = nuke.selectedNode() + + return ipn diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py index 85cbe7b2c0..3887b5d5b7 100644 --- a/pype/plugins/nuke/publish/validate_rendered_frames.py +++ b/pype/plugins/nuke/publish/validate_rendered_frames.py @@ -81,3 +81,5 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): ).format(__name__) instance.data['collection'] = collection + + return From 185e3c29a7157e3264db06c344fa12c0329c30b1 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 4 Oct 2019 16:45:45 +0200 Subject: [PATCH 21/28] fix: standalone publishing and image sequence had troubles --- pype/plugins/ftrack/publish/integrate_remove_components.py | 3 +++ pype/plugins/global/publish/integrate_new.py | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/pype/plugins/ftrack/publish/integrate_remove_components.py b/pype/plugins/ftrack/publish/integrate_remove_components.py index a215ee1b97..bad50f7200 100644 --- a/pype/plugins/ftrack/publish/integrate_remove_components.py +++ b/pype/plugins/ftrack/publish/integrate_remove_components.py @@ -17,6 +17,9 @@ class IntegrateCleanComponentData(pyblish.api.InstancePlugin): for comp in instance.data['representations']: self.log.debug('component {}'.format(comp)) + + if "%" in comp['published_path'] or "#" in comp['published_path']: + continue if comp.get('thumbnail') or ("thumbnail" in comp.get('tags', [])): os.remove(comp['published_path']) diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 6c89e22a83..61881b2a34 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -307,7 +307,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if repre.get("frameStart"): frame_start_padding = len(str( repre.get("frameEnd"))) - index_frame_start = repre.get("frameStart") + index_frame_start = int(repre.get("frameStart")) dst_padding_exp = src_padding_exp for i in src_collection.indexes: @@ -322,7 +322,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): dst_padding = dst_padding_exp % index_frame_start index_frame_start += 1 - dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail) + dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail).replace("..", ".") self.log.debug("destination: `{}`".format(dst)) src = os.path.join(stagingdir, src_file_name) self.log.debug("source: {}".format(src)) @@ -357,7 +357,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): src = os.path.join(stagingdir, fname) anatomy_filled = anatomy.format(template_data) dst = os.path.normpath( - anatomy_filled[template_name]["path"]) + anatomy_filled[template_name]["path"]).replace("..", ".") instance.data["transfers"].append([src, dst]) From 776e8922bcff08fe37453173dfaea82126a571aa Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Fri, 4 Oct 2019 16:50:18 +0200 Subject: [PATCH 22/28] fix: unnecessary import of module --- pype/plugins/nuke/publish/extract_review_data.py | 1 - 1 file changed, 1 deletion(-) diff --git a/pype/plugins/nuke/publish/extract_review_data.py b/pype/plugins/nuke/publish/extract_review_data.py index 885fe99b3d..08eba5bb1e 100644 --- a/pype/plugins/nuke/publish/extract_review_data.py +++ b/pype/plugins/nuke/publish/extract_review_data.py @@ -2,7 +2,6 @@ import os import nuke import pyblish.api import pype -import copy class ExtractReviewData(pype.api.Extractor): """Extracts movie and thumbnail with baked in luts From 97673d503ccdd70e5b991ebbbf144a5c836aa80e Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 7 Oct 2019 12:32:35 +0200 Subject: [PATCH 23/28] fix(standalonepublish): getting `frameStart` and `frameEnd` from representaions --- pype/plugins/global/publish/collect_context.py | 2 ++ pype/plugins/global/publish/extract_thumbnail_sa.py | 6 +++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/collect_context.py b/pype/plugins/global/publish/collect_context.py index 61c3bcf4d8..f538509a9b 100644 --- a/pype/plugins/global/publish/collect_context.py +++ b/pype/plugins/global/publish/collect_context.py @@ -66,6 +66,8 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): "label": subset, "name": subset, "family": family, + "frameStart": in_data.get("representations", [None])[0].get("frameStart", None), + "frameEnd": in_data.get("representations", [None])[0].get("frameEnd", None), "families": [family, 'ftrack'], }) self.log.info("collected instance: {}".format(instance.data)) diff --git a/pype/plugins/global/publish/extract_thumbnail_sa.py b/pype/plugins/global/publish/extract_thumbnail_sa.py index 7e31e3c701..f42985b560 100644 --- a/pype/plugins/global/publish/extract_thumbnail_sa.py +++ b/pype/plugins/global/publish/extract_thumbnail_sa.py @@ -119,8 +119,8 @@ class ExtractThumbnail(pyblish.api.InstancePlugin): "tags": [] } - # add Delete tag when temp file was rendered - if not is_jpeg: - representation["tags"].append("delete") + # # add Delete tag when temp file was rendered + # if not is_jpeg: + # representation["tags"].append("delete") instance.data["representations"].append(representation) From 0c64aeb84ff3a5ace706f1a472f015ef67c19cdf Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 7 Oct 2019 13:29:20 +0200 Subject: [PATCH 24/28] fix(global): little cleanup - was used old label `template` were `anatomy` should be - it was getting filled templates as object not as dictionary --- pype/plugins/global/publish/collect_anatomy.py | 4 ++-- pype/plugins/global/publish/collect_templates.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/pype/plugins/global/publish/collect_anatomy.py b/pype/plugins/global/publish/collect_anatomy.py index b053a3a0d1..9412209850 100644 --- a/pype/plugins/global/publish/collect_anatomy.py +++ b/pype/plugins/global/publish/collect_anatomy.py @@ -9,11 +9,11 @@ from pypeapp import Anatomy import pyblish.api -class CollectTemplates(pyblish.api.ContextPlugin): +class CollectAnatomy(pyblish.api.ContextPlugin): """Collect Anatomy into Context""" order = pyblish.api.CollectorOrder - label = "Collect Templates" + label = "Collect Anatomy" def process(self, context): context.data['anatomy'] = Anatomy() diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py index 8113f1d763..b3aecca21a 100644 --- a/pype/plugins/global/publish/collect_templates.py +++ b/pype/plugins/global/publish/collect_templates.py @@ -82,5 +82,5 @@ class CollectTemplates(pyblish.api.InstancePlugin): # We take the parent folder of representation 'filepath' instance.data["assumedDestination"] = os.path.dirname( - (anatomy.format(template_data)).publish.path + (anatomy.format(template_data))["publish"]["path"] ) From ce749e6dcdde1df0a7100700168e19baf92518fb Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 7 Oct 2019 15:07:44 +0200 Subject: [PATCH 25/28] fix(global): adding hosts to filter out unneeded hosts --- pype/plugins/global/publish/collect_templates.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py index b3aecca21a..f65433380b 100644 --- a/pype/plugins/global/publish/collect_templates.py +++ b/pype/plugins/global/publish/collect_templates.py @@ -23,6 +23,7 @@ class CollectTemplates(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.1 label = "Collect and fill Templates" + hosts = ["maya", "nuke", "standalonepublisher"] def process(self, instance): # get all the stuff from the database From dcfb0aeea423f97311e992ef8bd257660ca72b53 Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Mon, 7 Oct 2019 17:17:37 +0200 Subject: [PATCH 26/28] rounding fps attribute --- pype/plugins/nuke/publish/validate_script.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pype/plugins/nuke/publish/validate_script.py b/pype/plugins/nuke/publish/validate_script.py index 837e30dab7..307e3ade59 100644 --- a/pype/plugins/nuke/publish/validate_script.py +++ b/pype/plugins/nuke/publish/validate_script.py @@ -83,6 +83,8 @@ class ValidateScript(pyblish.api.InstancePlugin): # Set frame range with handles # asset_attributes["frameStart"] -= handle_start # asset_attributes["frameEnd"] += handle_end + if len(str(asset_attributes["fps"])) > 4: + asset_attributes["fps"] = float("{0:.8f}".format(asset_attributes["fps"])) # Get values from nukescript script_attributes = { From 1d0375f13d4c0e1b19b073b034d0c90f1d7d7186 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ond=C5=99ej=20Samohel?= Date: Tue, 8 Oct 2019 11:32:56 +0000 Subject: [PATCH 27/28] fixed case where `host` in context doesn't exists yet --- pype/plugins/global/publish/collect_scene_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pype/plugins/global/publish/collect_scene_version.py b/pype/plugins/global/publish/collect_scene_version.py index 3fac823b5c..0d76015909 100644 --- a/pype/plugins/global/publish/collect_scene_version.py +++ b/pype/plugins/global/publish/collect_scene_version.py @@ -13,7 +13,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin): label = 'Collect Version' def process(self, context): - if "standalonepublisher" in context.data.get("host"): + if "standalonepublisher" in context.data.get("host", []): return filename = os.path.basename(context.data.get('currentFile')) From c6253b7493991887b1e18df3f84f74da33fac20f Mon Sep 17 00:00:00 2001 From: Jakub Jezek Date: Wed, 16 Oct 2019 14:11:22 +0200 Subject: [PATCH 28/28] fix(nuke): missing variable issue --- pype/plugins/nuke/publish/extract_review_data.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pype/plugins/nuke/publish/extract_review_data.py b/pype/plugins/nuke/publish/extract_review_data.py index 08eba5bb1e..791b9d7969 100644 --- a/pype/plugins/nuke/publish/extract_review_data.py +++ b/pype/plugins/nuke/publish/extract_review_data.py @@ -166,6 +166,7 @@ class ExtractReviewData(pype.api.Extractor): if nuke.selectedNodes(): [n.setSelected(False) for n in nuke.selectedNodes()] + ipn_orig = None for v in [n for n in nuke.allNodes() if "Viewer" in n.Class()]: ip = v['input_process'].getValue()