diff --git a/.flake8 b/.flake8 index 9de8d23bb2..67ed2d77a3 100644 --- a/.flake8 +++ b/.flake8 @@ -1,5 +1,7 @@ [flake8] # ignore = D203 +ignore = BLK100 +max-line-length = 79 exclude = .git, __pycache__, diff --git a/.hound.yml b/.hound.yml new file mode 100644 index 0000000000..409cc4416a --- /dev/null +++ b/.hound.yml @@ -0,0 +1,4 @@ +flake8: + enabled: true + config_file: .flake8 + diff --git a/LICENSE b/LICENSE index dfcd71eb3f..63249bb52b 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2018 orbi tools s.r.o +Copyright (c) 2020 Orbi Tools s.r.o. Permission is hereby granted, free of charge, to any person obtaining a copy diff --git a/README.md b/README.md index e254b0ad87..8110887cbd 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,11 @@ Pype ==== -The base studio _config_ for [Avalon](https://getavalon.github.io/) +Welcome to PYPE _config_ for [Avalon](https://getavalon.github.io/) -Currently this config is dependent on our customised avalon instalation so it won't work with vanilla avalon core. We're working on open sourcing all of the necessary code though. You can still get inspiration or take our individual validators and scripts which should work just fine in other pipelines. +To get all the key information about the project, go to [PYPE.club](http://pype.club) + + +Currently this config is dependent on our customised avalon instalation so it won't work with vanilla avalon core. To install it you'll need to download [pype-setup](github.com/pypeclub/pype-setup), which is able to deploy everything for you if you follow the documentation. _This configuration acts as a starting point for all pype club clients wth avalon deployment._ - -Code convention ---------------- - -Below are some of the standard practices applied to this repositories. - -- **Etiquette: PEP8** - - All code is written in PEP8. It is recommended you use a linter as you work, flake8 and pylinter are both good options. -- **Etiquette: Napoleon docstrings** - - Any docstrings are made in Google Napoleon format. See [Napoleon](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) for details. - -- **Etiquette: Semantic Versioning** - - This project follows [semantic versioning](http://semver.org). -- **Etiquette: Underscore means private** - - Anything prefixed with an underscore means that it is internal to wherever it is used. For example, a variable name is only ever used in the parent function or class. A module is not for use by the end-user. In contrast, anything without an underscore is public, but not necessarily part of the API. Members of the API resides in `api.py`. - -- **API: Idempotence** - - A public function must be able to be called twice and produce the exact same result. This means no changing of state without restoring previous state when finishing. For example, if a function requires changing the current selection in Autodesk Maya, it must restore the previous selection prior to completing. diff --git a/pype/__init__.py b/pype/__init__.py index 91b72d7de5..5cd9832558 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -9,8 +9,9 @@ from pypeapp import config import logging log = logging.getLogger(__name__) -__version__ = "2.3.0" +__version__ = "2.6.0" +PROJECT_PLUGINS_PATH = os.environ.get("PYPE_PROJECT_PLUGINS") PACKAGE_DIR = os.path.dirname(__file__) PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") @@ -72,6 +73,18 @@ def install(): pyblish.register_discovery_filter(filter_pyblish_plugins) avalon.register_plugin_path(avalon.Loader, LOAD_PATH) + # Register project specific plugins + project_name = os.environ.get("AVALON_PROJECT") + if PROJECT_PLUGINS_PATH and project_name: + for path in PROJECT_PLUGINS_PATH.split(os.pathsep): + if not path: + continue + plugin_path = os.path.join(path, project_name, "plugins") + if os.path.exists(plugin_path): + pyblish.register_plugin_path(plugin_path) + avalon.register_plugin_path(avalon.Loader, plugin_path) + avalon.register_plugin_path(avalon.Creator, plugin_path) + # apply monkey patched discover to original one avalon.discover = patched_discover diff --git a/pype/ftrack/actions/action_create_project_structure.py b/pype/ftrack/actions/action_create_project_structure.py index 4589802f3a..6124ebe843 100644 --- a/pype/ftrack/actions/action_create_project_structure.py +++ b/pype/ftrack/actions/action_create_project_structure.py @@ -19,7 +19,7 @@ class CreateProjectFolders(BaseAction): #: Action description. description = 'Creates folder structure' #: roles that are allowed to register this action - role_list = ['Pypeclub', 'Administrator'] + role_list = ['Pypeclub', 'Administrator', 'Project Manager'] icon = '{}/ftrack/action_icons/CreateProjectFolders.svg'.format( os.environ.get('PYPE_STATICS_SERVER', '') ) diff --git a/pype/ftrack/actions/action_delete_asset.py b/pype/ftrack/actions/action_delete_asset.py index 7eb9126fca..fc9e66e4f8 100644 --- a/pype/ftrack/actions/action_delete_asset.py +++ b/pype/ftrack/actions/action_delete_asset.py @@ -99,6 +99,7 @@ class DeleteAssetSubset(BaseAction): # Filter event even more (skip task entities) # - task entities are not relevant for avalon + entity_mapping = {} for entity in entities: ftrack_id = entity["id"] if ftrack_id not in ftrack_ids: @@ -107,6 +108,8 @@ class DeleteAssetSubset(BaseAction): if entity.entity_type.lower() == "task": ftrack_ids.remove(ftrack_id) + entity_mapping[ftrack_id] = entity + if not ftrack_ids: # It is bug if this happens! return { @@ -122,11 +125,41 @@ class DeleteAssetSubset(BaseAction): project_name = project["full_name"] self.dbcon.Session["AVALON_PROJECT"] = project_name - selected_av_entities = self.dbcon.find({ + selected_av_entities = list(self.dbcon.find({ "type": "asset", "data.ftrackId": {"$in": ftrack_ids} - }) - selected_av_entities = [ent for ent in selected_av_entities] + })) + found_without_ftrack_id = {} + if len(selected_av_entities) != len(ftrack_ids): + found_ftrack_ids = [ + ent["data"]["ftrackId"] for ent in selected_av_entities + ] + for ftrack_id, entity in entity_mapping.items(): + if ftrack_id in found_ftrack_ids: + continue + + av_ents_by_name = list(self.dbcon.find({ + "type": "asset", + "name": entity["name"] + })) + if not av_ents_by_name: + continue + + ent_path_items = [ent["name"] for ent in entity["link"]] + parents = ent_path_items[1:len(ent_path_items)-1:] + # TODO we should say to user that + # few of them are missing in avalon + for av_ent in av_ents_by_name: + if av_ent["data"]["parents"] != parents: + continue + + # TODO we should say to user that found entity + # with same name does not match same ftrack id? + if "ftrackId" not in av_ent["data"]: + selected_av_entities.append(av_ent) + found_without_ftrack_id[str(av_ent["_id"])] = ftrack_id + break + if not selected_av_entities: return { "success": False, @@ -155,7 +188,8 @@ class DeleteAssetSubset(BaseAction): "created_at": datetime.now(), "project_name": project_name, "subset_ids_by_name": {}, - "subset_ids_by_parent": {} + "subset_ids_by_parent": {}, + "without_ftrack_id": found_without_ftrack_id } id_item = { @@ -413,14 +447,21 @@ class DeleteAssetSubset(BaseAction): asset_ids_to_archive = [] ftrack_ids_to_delete = [] if len(assets_to_delete) > 0: + map_av_ftrack_id = spec_data["without_ftrack_id"] # Prepare data when deleting whole avalon asset avalon_assets = self.dbcon.find({"type": "asset"}) avalon_assets_by_parent = collections.defaultdict(list) for asset in avalon_assets: + asset_id = asset["_id"] parent_id = asset["data"]["visualParent"] avalon_assets_by_parent[parent_id].append(asset) - if asset["_id"] in assets_to_delete: - ftrack_id = asset["data"]["ftrackId"] + if asset_id in assets_to_delete: + ftrack_id = map_av_ftrack_id.get(str(asset_id)) + if not ftrack_id: + ftrack_id = asset["data"].get("ftrackId") + + if not ftrack_id: + continue ftrack_ids_to_delete.append(ftrack_id) children_queue = Queue() diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py new file mode 100644 index 0000000000..f6a66318c9 --- /dev/null +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -0,0 +1,534 @@ +import os +import collections +import uuid + +import clique +from pymongo import UpdateOne + +from pype.ftrack import BaseAction +from pype.ftrack.lib.io_nonsingleton import DbConnector + +import avalon.pipeline + + +class DeleteOldVersions(BaseAction): + + identifier = "delete.old.versions" + label = "Pype Admin" + variant = "- Delete old versions" + description = ( + "Delete files from older publishes so project can be" + " archived with only lates versions." + ) + role_list = ["Pypeclub", "Project Manager", "Administrator"] + icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format( + os.environ.get('PYPE_STATICS_SERVER', '') + ) + + dbcon = DbConnector() + + inteface_title = "Choose your preferences" + splitter_item = {"type": "label", "value": "---"} + sequence_splitter = "__sequence_splitter__" + + def discover(self, session, entities, event): + ''' Validation ''' + selection = event["data"].get("selection") or [] + for entity in selection: + entity_type = (entity.get("entityType") or "").lower() + if entity_type == "assetversion": + return True + return False + + def interface(self, session, entities, event): + items = [] + root = os.environ.get("AVALON_PROJECTS") + if not root: + msg = "Root path to projects is not set." + items.append({ + "type": "label", + "value": "ERROR: {}".format(msg) + }) + self.show_interface( + items=items, title=self.inteface_title, event=event + ) + return { + "success": False, + "message": msg + } + + if not os.path.exists(root): + msg = "Root path does not exists \"{}\".".format(str(root)) + items.append({ + "type": "label", + "value": "ERROR: {}".format(msg) + }) + self.show_interface( + items=items, title=self.inteface_title, event=event + ) + return { + "success": False, + "message": msg + } + + values = event["data"].get("values") + if values: + versions_count = int(values["last_versions_count"]) + if versions_count >= 1: + return + items.append({ + "type": "label", + "value": ( + "# You have to keep at least 1 version!" + ) + }) + + items.append({ + "type": "label", + "value": ( + "WARNING: This will remove published files of older" + " versions from disk so we don't recommend use" + " this action on \"live\" project." + ) + }) + + items.append(self.splitter_item) + + # How many versions to keep + items.append({ + "type": "label", + "value": "## Choose how many versions you want to keep:" + }) + items.append({ + "type": "label", + "value": ( + "NOTE: We do recommend to keep 2 versions." + ) + }) + items.append({ + "type": "number", + "name": "last_versions_count", + "label": "Versions", + "value": 2 + }) + + items.append(self.splitter_item) + + items.append({ + "type": "label", + "value": ( + "## Remove publish folder even if there" + " are other than published files:" + ) + }) + items.append({ + "type": "label", + "value": ( + "WARNING: This may remove more than you want." + ) + }) + items.append({ + "type": "boolean", + "name": "force_delete_publish_folder", + "label": "Are You sure?", + "value": False + }) + + return { + "items": items, + "title": self.inteface_title + } + + def launch(self, session, entities, event): + values = event["data"].get("values") + if not values: + return + + versions_count = int(values["last_versions_count"]) + force_to_remove = values["force_delete_publish_folder"] + + _val1 = "OFF" + if force_to_remove: + _val1 = "ON" + + _val3 = "s" + if versions_count == 1: + _val3 = "" + + self.log.debug(( + "Process started. Force to delete publish folder is set to [{0}]" + " and will keep {1} latest version{2}." + ).format(_val1, versions_count, _val3)) + + self.dbcon.install() + + project = None + avalon_asset_names = [] + asset_versions_by_parent_id = collections.defaultdict(list) + subset_names_by_asset_name = collections.defaultdict(list) + + ftrack_assets_by_name = {} + for entity in entities: + ftrack_asset = entity["asset"] + + parent_ent = ftrack_asset["parent"] + parent_ftrack_id = parent_ent["id"] + parent_name = parent_ent["name"] + + if parent_name not in avalon_asset_names: + avalon_asset_names.append(parent_name) + + # Group asset versions by parent entity + asset_versions_by_parent_id[parent_ftrack_id].append(entity) + + # Get project + if project is None: + project = parent_ent["project"] + + # Collect subset names per asset + subset_name = ftrack_asset["name"] + subset_names_by_asset_name[parent_name].append(subset_name) + + if subset_name not in ftrack_assets_by_name: + ftrack_assets_by_name[subset_name] = ftrack_asset + + # Set Mongo collection + project_name = project["full_name"] + self.dbcon.Session["AVALON_PROJECT"] = project_name + self.log.debug("Project is set to {}".format(project_name)) + + # Get Assets from avalon database + assets = list(self.dbcon.find({ + "type": "asset", + "name": {"$in": avalon_asset_names} + })) + asset_id_to_name_map = { + asset["_id"]: asset["name"] for asset in assets + } + asset_ids = list(asset_id_to_name_map.keys()) + + self.log.debug("Collected assets ({})".format(len(asset_ids))) + + # Get Subsets + subsets = list(self.dbcon.find({ + "type": "subset", + "parent": {"$in": asset_ids} + })) + subsets_by_id = {} + subset_ids = [] + for subset in subsets: + asset_id = subset["parent"] + asset_name = asset_id_to_name_map[asset_id] + available_subsets = subset_names_by_asset_name[asset_name] + + if subset["name"] not in available_subsets: + continue + + subset_ids.append(subset["_id"]) + subsets_by_id[subset["_id"]] = subset + + self.log.debug("Collected subsets ({})".format(len(subset_ids))) + + # Get Versions + versions = list(self.dbcon.find({ + "type": "version", + "parent": {"$in": subset_ids} + })) + + versions_by_parent = collections.defaultdict(list) + for ent in versions: + versions_by_parent[ent["parent"]].append(ent) + + def sort_func(ent): + return int(ent["name"]) + + all_last_versions = [] + for parent_id, _versions in versions_by_parent.items(): + for idx, version in enumerate( + sorted(_versions, key=sort_func, reverse=True) + ): + if idx >= versions_count: + break + all_last_versions.append(version) + + self.log.debug("Collected versions ({})".format(len(versions))) + + # Filter latest versions + for version in all_last_versions: + versions.remove(version) + + # Update versions_by_parent without filtered versions + versions_by_parent = collections.defaultdict(list) + for ent in versions: + versions_by_parent[ent["parent"]].append(ent) + + # Filter already deleted versions + versions_to_pop = [] + for version in versions: + version_tags = version["data"].get("tags") + if version_tags and "deleted" in version_tags: + versions_to_pop.append(version) + + for version in versions_to_pop: + subset = subsets_by_id[version["parent"]] + asset_id = subset["parent"] + asset_name = asset_id_to_name_map[asset_id] + msg = "Asset: \"{}\" | Subset: \"{}\" | Version: \"{}\"".format( + asset_name, subset["name"], version["name"] + ) + self.log.warning(( + "Skipping version. Already tagged as `deleted`. < {} >" + ).format(msg)) + versions.remove(version) + + version_ids = [ent["_id"] for ent in versions] + + self.log.debug( + "Filtered versions to delete ({})".format(len(version_ids)) + ) + + if not version_ids: + msg = "Skipping processing. Nothing to delete." + self.log.debug(msg) + return { + "success": True, + "message": msg + } + + repres = list(self.dbcon.find({ + "type": "representation", + "parent": {"$in": version_ids} + })) + + self.log.debug( + "Collected representations to remove ({})".format(len(repres)) + ) + + dir_paths = {} + file_paths_by_dir = collections.defaultdict(list) + for repre in repres: + file_path, seq_path = self.path_from_represenation(repre) + if file_path is None: + self.log.warning(( + "Could not format path for represenation \"{}\"" + ).format(str(repre))) + continue + + dir_path = os.path.dirname(file_path) + dir_id = None + for _dir_id, _dir_path in dir_paths.items(): + if _dir_path == dir_path: + dir_id = _dir_id + break + + if dir_id is None: + dir_id = uuid.uuid4() + dir_paths[dir_id] = dir_path + + file_paths_by_dir[dir_id].append([file_path, seq_path]) + + dir_ids_to_pop = [] + for dir_id, dir_path in dir_paths.items(): + if os.path.exists(dir_path): + continue + + dir_ids_to_pop.append(dir_id) + + # Pop dirs from both dictionaries + for dir_id in dir_ids_to_pop: + dir_paths.pop(dir_id) + paths = file_paths_by_dir.pop(dir_id) + # TODO report of missing directories? + paths_msg = ", ".join([ + "'{}'".format(path[0].replace("\\", "/")) for path in paths + ]) + self.log.warning(( + "Folder does not exist. Deleting it's files skipped: {}" + ).format(paths_msg)) + + if force_to_remove: + self.delete_whole_dir_paths(dir_paths.values()) + else: + self.delete_only_repre_files(dir_paths, file_paths_by_dir) + + mongo_changes_bulk = [] + for version in versions: + orig_version_tags = version["data"].get("tags") or [] + version_tags = [tag for tag in orig_version_tags] + if "deleted" not in version_tags: + version_tags.append("deleted") + + if version_tags == orig_version_tags: + continue + + update_query = {"_id": version["_id"]} + update_data = {"$set": {"data.tags": version_tags}} + mongo_changes_bulk.append(UpdateOne(update_query, update_data)) + + if mongo_changes_bulk: + self.dbcon.bulk_write(mongo_changes_bulk) + + self.dbcon.uninstall() + + # Set attribute `is_published` to `False` on ftrack AssetVersions + for subset_id, _versions in versions_by_parent.items(): + subset_name = None + for subset in subsets: + if subset["_id"] == subset_id: + subset_name = subset["name"] + break + + if subset_name is None: + self.log.warning( + "Subset with ID `{}` was not found.".format(str(subset_id)) + ) + continue + + ftrack_asset = ftrack_assets_by_name.get(subset_name) + if not ftrack_asset: + self.log.warning(( + "Could not find Ftrack asset with name `{}`" + ).format(subset_name)) + continue + + version_numbers = [int(ver["name"]) for ver in _versions] + for version in ftrack_asset["versions"]: + if int(version["version"]) in version_numbers: + version["is_published"] = False + + try: + session.commit() + + except Exception: + msg = ( + "Could not set `is_published` attribute to `False`" + " for selected AssetVersions." + ) + self.log.warning(msg, exc_info=True) + + return { + "success": False, + "message": msg + } + + return True + + def delete_whole_dir_paths(self, dir_paths): + for dir_path in dir_paths: + # Delete all files and fodlers in dir path + for root, dirs, files in os.walk(dir_path, topdown=False): + for name in files: + os.remove(os.path.join(root, name)) + + for name in dirs: + os.rmdir(os.path.join(root, name)) + + # Delete even the folder and it's parents folders if they are empty + while True: + if not os.path.exists(dir_path): + dir_path = os.path.dirname(dir_path) + continue + + if len(os.listdir(dir_path)) != 0: + break + + os.rmdir(os.path.join(dir_path)) + + def delete_only_repre_files(self, dir_paths, file_paths): + for dir_id, dir_path in dir_paths.items(): + dir_files = os.listdir(dir_path) + collections, remainders = clique.assemble(dir_files) + for file_path, seq_path in file_paths[dir_id]: + file_path_base = os.path.split(file_path)[1] + # Just remove file if `frame` key was not in context or + # filled path is in remainders (single file sequence) + if not seq_path or file_path_base in remainders: + if not os.path.exists(file_path): + self.log.warning( + "File was not found: {}".format(file_path) + ) + continue + os.remove(file_path) + self.log.debug("Removed file: {}".format(file_path)) + remainders.remove(file_path_base) + continue + + seq_path_base = os.path.split(seq_path)[1] + head, tail = seq_path_base.split(self.sequence_splitter) + + final_col = None + for collection in collections: + if head != collection.head or tail != collection.tail: + continue + final_col = collection + break + + if final_col is not None: + # Fill full path to head + final_col.head = os.path.join(dir_path, final_col.head) + for _file_path in final_col: + if os.path.exists(_file_path): + os.remove(_file_path) + _seq_path = final_col.format("{head}{padding}{tail}") + self.log.debug("Removed files: {}".format(_seq_path)) + collections.remove(final_col) + + elif os.path.exists(file_path): + os.remove(file_path) + self.log.debug("Removed file: {}".format(file_path)) + + else: + self.log.warning( + "File was not found: {}".format(file_path) + ) + + # Delete as much as possible parent folders + for dir_path in dir_paths.values(): + while True: + if not os.path.exists(dir_path): + dir_path = os.path.dirname(dir_path) + continue + + if len(os.listdir(dir_path)) != 0: + break + + self.log.debug("Removed folder: {}".format(dir_path)) + os.rmdir(dir_path) + + def path_from_represenation(self, representation): + try: + template = representation["data"]["template"] + + except KeyError: + return (None, None) + + root = os.environ["AVALON_PROJECTS"] + if not root: + return (None, None) + + sequence_path = None + try: + context = representation["context"] + context["root"] = root + path = avalon.pipeline.format_template_with_optional_keys( + context, template + ) + if "frame" in context: + context["frame"] = self.sequence_splitter + sequence_path = os.path.normpath( + avalon.pipeline.format_template_with_optional_keys( + context, template + ) + ) + + except KeyError: + # Template references unavailable data + return (None, None) + + return (os.path.normpath(path), sequence_path) + + +def register(session, plugins_presets={}): + '''Register plugin. Called when used as an plugin.''' + + DeleteOldVersions(session, plugins_presets).register() diff --git a/pype/ftrack/actions/action_delivery.py b/pype/ftrack/actions/action_delivery.py index afd20d12d1..29fdfe39ae 100644 --- a/pype/ftrack/actions/action_delivery.py +++ b/pype/ftrack/actions/action_delivery.py @@ -312,42 +312,32 @@ class Delivery(BaseAction): anatomy_data = copy.deepcopy(repre["context"]) anatomy_data["root"] = location_path - anatomy_filled = anatomy.format(anatomy_data) - test_path = ( - anatomy_filled - .get("delivery", {}) - .get(anatomy_name) - ) + anatomy_filled = anatomy.format_all(anatomy_data) + test_path = anatomy_filled["delivery"][anatomy_name] - if not test_path: + if not test_path.solved: msg = ( "Missing keys in Representation's context" " for anatomy template \"{}\"." ).format(anatomy_name) - all_anatomies = anatomy.format_all(anatomy_data) - result = None - for anatomies in all_anatomies.values(): - for key, temp in anatomies.get("delivery", {}).items(): - if key != anatomy_name: - continue + if test_path.missing_keys: + keys = ", ".join(test_path.missing_keys) + sub_msg = ( + "Representation: {}
- Missing keys: \"{}\"
" + ).format(str(repre["_id"]), keys) - result = temp - break + if test_path.invalid_types: + items = [] + for key, value in test_path.invalid_types.items(): + items.append("\"{}\" {}".format(key, str(value))) - # TODO log error! - missing keys in anatomy - if result: - missing_keys = [ - key[1] for key in string.Formatter().parse(result) - if key[1] is not None - ] - else: - missing_keys = ["unknown"] + keys = ", ".join(items) + sub_msg = ( + "Representation: {}
" + "- Invalid value DataType: \"{}\"
" + ).format(str(repre["_id"]), keys) - keys = ", ".join(missing_keys) - sub_msg = ( - "Representation: {}
- Missing keys: \"{}\"
" - ).format(str(repre["_id"]), keys) self.report_items[msg].append(sub_msg) self.log.warning( "{} Representation: \"{}\" Filled: <{}>".format( diff --git a/pype/ftrack/actions/action_store_thumbnails_to_avalon.py b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py new file mode 100644 index 0000000000..7adc36f4b5 --- /dev/null +++ b/pype/ftrack/actions/action_store_thumbnails_to_avalon.py @@ -0,0 +1,350 @@ +import os +import requests +import errno +import json + +from bson.objectid import ObjectId +from pype.ftrack import BaseAction +from pype.ftrack.lib import ( + get_project_from_entity, + get_avalon_entities_for_assetversion +) +from pypeapp import Anatomy +from pype.ftrack.lib.io_nonsingleton import DbConnector + + +class StoreThumbnailsToAvalon(BaseAction): + # Action identifier + identifier = "store.thubmnail.to.avalon" + # Action label + label = "Pype Admin" + # Action variant + variant = "- Store Thumbnails to avalon" + # Action description + description = 'Test action' + # roles that are allowed to register this action + role_list = ["Pypeclub", "Administrator", "Project Manager"] + + icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format( + os.environ.get('PYPE_STATICS_SERVER', '') + ) + + thumbnail_key = "AVALON_THUMBNAIL_ROOT" + db_con = DbConnector() + + def discover(self, session, entities, event): + for entity in entities: + if entity.entity_type.lower() == "assetversion": + return True + return False + + def launch(self, session, entities, event): + # DEBUG LINE + # root_path = r"C:\Users\jakub.trllo\Desktop\Tests\ftrack_thumbnails" + + user = session.query( + "User where username is '{0}'".format(session.api_user) + ).one() + action_job = session.create("Job", { + "user": user, + "status": "running", + "data": json.dumps({ + "description": "Storing thumbnails to avalon." + }) + }) + session.commit() + + thumbnail_roots = os.environ.get(self.thumbnail_key) + if not thumbnail_roots: + msg = "`{}` environment is not set".format(self.thumbnail_key) + + action_job["status"] = "failed" + session.commit() + + self.log.warning(msg) + + return { + "success": False, + "message": msg + } + + existing_thumbnail_root = None + for path in thumbnail_roots.split(os.pathsep): + if os.path.exists(path): + existing_thumbnail_root = path + break + + if existing_thumbnail_root is None: + msg = ( + "Can't access paths, set in `{}` ({})" + ).format(self.thumbnail_key, thumbnail_roots) + + action_job["status"] = "failed" + session.commit() + + self.log.warning(msg) + + return { + "success": False, + "message": msg + } + + project = get_project_from_entity(entities[0]) + project_name = project["full_name"] + anatomy = Anatomy(project_name) + + if "publish" not in anatomy.templates: + msg = "Anatomy does not have set publish key!" + + action_job["status"] = "failed" + session.commit() + + self.log.warning(msg) + + return { + "success": False, + "message": msg + } + + if "thumbnail" not in anatomy.templates["publish"]: + msg = ( + "There is not set \"thumbnail\"" + " template in Antomy for project \"{}\"" + ).format(project_name) + + action_job["status"] = "failed" + session.commit() + + self.log.warning(msg) + + return { + "success": False, + "message": msg + } + + example_template_data = { + "_id": "ID", + "thumbnail_root": "THUBMNAIL_ROOT", + "thumbnail_type": "THUMBNAIL_TYPE", + "ext": ".EXT", + "project": { + "name": "PROJECT_NAME", + "code": "PROJECT_CODE" + }, + "asset": "ASSET_NAME", + "subset": "SUBSET_NAME", + "version": "VERSION_NAME", + "hierarchy": "HIERARCHY" + } + tmp_filled = anatomy.format_all(example_template_data) + thumbnail_result = tmp_filled["publish"]["thumbnail"] + if not thumbnail_result.solved: + missing_keys = thumbnail_result.missing_keys + invalid_types = thumbnail_result.invalid_types + submsg = "" + if missing_keys: + submsg += "Missing keys: {}".format(", ".join( + ["\"{}\"".format(key) for key in missing_keys] + )) + + if invalid_types: + items = [] + for key, value in invalid_types.items(): + items.append("{}{}".format(str(key), str(value))) + submsg += "Invalid types: {}".format(", ".join(items)) + + msg = ( + "Thumbnail Anatomy template expects more keys than action" + " can offer. {}" + ).format(submsg) + + action_job["status"] = "failed" + session.commit() + + self.log.warning(msg) + + return { + "success": False, + "message": msg + } + + thumbnail_template = anatomy.templates["publish"]["thumbnail"] + + self.db_con.install() + + for entity in entities: + # Skip if entity is not AssetVersion (never should happend, but..) + if entity.entity_type.lower() != "assetversion": + continue + + # Skip if AssetVersion don't have thumbnail + thumbnail_ent = entity["thumbnail"] + if thumbnail_ent is None: + self.log.debug(( + "Skipping. AssetVersion don't " + "have set thumbnail. {}" + ).format(entity["id"])) + continue + + avalon_ents_result = get_avalon_entities_for_assetversion( + entity, self.db_con + ) + version_full_path = ( + "Asset: \"{project_name}/{asset_path}\"" + " | Subset: \"{subset_name}\"" + " | Version: \"{version_name}\"" + ).format(**avalon_ents_result) + + version = avalon_ents_result["version"] + if not version: + self.log.warning(( + "AssetVersion does not have version in avalon. {}" + ).format(version_full_path)) + continue + + thumbnail_id = version["data"].get("thumbnail_id") + if thumbnail_id: + self.log.info(( + "AssetVersion skipped, already has thubmanil set. {}" + ).format(version_full_path)) + continue + + # Get thumbnail extension + file_ext = thumbnail_ent["file_type"] + if not file_ext.startswith("."): + file_ext = ".{}".format(file_ext) + + avalon_project = avalon_ents_result["project"] + avalon_asset = avalon_ents_result["asset"] + hierarchy = "" + parents = avalon_asset["data"].get("parents") or [] + if parents: + hierarchy = "/".join(parents) + + # Prepare anatomy template fill data + # 1. Create new id for thumbnail entity + thumbnail_id = ObjectId() + + template_data = { + "_id": str(thumbnail_id), + "thumbnail_root": existing_thumbnail_root, + "thumbnail_type": "thumbnail", + "ext": file_ext, + "project": { + "name": avalon_project["name"], + "code": avalon_project["data"].get("code") + }, + "asset": avalon_ents_result["asset_name"], + "subset": avalon_ents_result["subset_name"], + "version": avalon_ents_result["version_name"], + "hierarchy": hierarchy + } + + anatomy_filled = anatomy.format(template_data) + thumbnail_path = anatomy_filled["publish"]["thumbnail"] + thumbnail_path = thumbnail_path.replace("..", ".") + thumbnail_path = os.path.normpath(thumbnail_path) + + downloaded = False + for loc in (thumbnail_ent.get("component_locations") or []): + res_id = loc.get("resource_identifier") + if not res_id: + continue + + thubmnail_url = self.get_thumbnail_url(res_id) + if self.download_file(thubmnail_url, thumbnail_path): + downloaded = True + break + + if not downloaded: + self.log.warning( + "Could not download thumbnail for {}".format( + version_full_path + ) + ) + continue + + # Clean template data from keys that are dynamic + template_data.pop("_id") + template_data.pop("thumbnail_root") + + thumbnail_entity = { + "_id": thumbnail_id, + "type": "thumbnail", + "schema": "pype:thumbnail-1.0", + "data": { + "template": thumbnail_template, + "template_data": template_data + } + } + + # Create thumbnail entity + self.db_con.insert_one(thumbnail_entity) + self.log.debug( + "Creating entity in database {}".format(str(thumbnail_entity)) + ) + + # Set thumbnail id for version + self.db_con.update_one( + {"_id": version["_id"]}, + {"$set": {"data.thumbnail_id": thumbnail_id}} + ) + + self.db_con.update_one( + {"_id": avalon_asset["_id"]}, + {"$set": {"data.thumbnail_id": thumbnail_id}} + ) + + action_job["status"] = "done" + session.commit() + + return True + + def get_thumbnail_url(self, resource_identifier, size=None): + # TODO use ftrack_api method rather (find way how to use it) + url_string = ( + u'{url}/component/thumbnail?id={id}&username={username}' + u'&apiKey={apiKey}' + ) + url = url_string.format( + url=self.session.server_url, + id=resource_identifier, + username=self.session.api_user, + apiKey=self.session.api_key + ) + if size: + url += u'&size={0}'.format(size) + + return url + + def download_file(self, source_url, dst_file_path): + dir_path = os.path.dirname(dst_file_path) + try: + os.makedirs(dir_path) + except OSError as exc: + if exc.errno != errno.EEXIST: + self.log.warning( + "Could not create folder: \"{}\"".format(dir_path) + ) + return False + + self.log.debug( + "Downloading file \"{}\" -> \"{}\"".format( + source_url, dst_file_path + ) + ) + file_open = open(dst_file_path, "wb") + try: + file_open.write(requests.get(source_url).content) + except Exception: + self.log.warning( + "Download of image `{}` failed.".format(source_url) + ) + return False + finally: + file_open.close() + return True + + +def register(session, plugins_presets={}): + StoreThumbnailsToAvalon(session, plugins_presets).register() diff --git a/pype/ftrack/events/event_first_version_status.py b/pype/ftrack/events/event_first_version_status.py new file mode 100644 index 0000000000..2e2b98ad5f --- /dev/null +++ b/pype/ftrack/events/event_first_version_status.py @@ -0,0 +1,188 @@ +from pype.ftrack import BaseEvent + + +class FirstVersionStatus(BaseEvent): + + # WARNING Priority MUST be higher + # than handler in `event_version_to_task_statuses.py` + priority = 200 + + keys_enum = ["task", "task_type"] + # This should be set with presets + task_status_map = [] + + # EXAMPLE of `task_status_map` + __example_status_map__ = [{ + # `key` specify where to look for name (is enumerator of `keys_enum`) + # By default is set to "task" + "key": "task", + # speicification of name + "name": "compositing", + # Status to set to the asset version + "status": "Blocking" + }] + + def register(self, *args, **kwargs): + result = super(FirstVersionStatus, self).register(*args, **kwargs) + + valid_task_status_map = [] + for item in self.task_status_map: + key = (item.get("key") or "task").lower() + name = (item.get("name") or "").lower() + status = (item.get("status") or "").lower() + if not (key and name and status): + self.log.warning(( + "Invalid item in Task -> Status mapping. {}" + ).format(str(item))) + continue + + if key not in self.keys_enum: + expected_msg = "" + last_key_idx = len(self.keys_enum) - 1 + for idx, key in enumerate(self.keys_enum): + if idx == 0: + joining_part = "`{}`" + elif idx == last_key_idx: + joining_part = "or `{}`" + else: + joining_part = ", `{}`" + expected_msg += joining_part.format(key) + + self.log.warning(( + "Invalid key `{}`. Expected: {}." + ).format(key, expected_msg)) + continue + + valid_task_status_map.append({ + "key": key, + "name": name, + "status": status + }) + + self.task_status_map = valid_task_status_map + if not self.task_status_map: + self.log.warning(( + "Event handler `{}` don't have set presets." + ).format(self.__class__.__name__)) + + return result + + def launch(self, session, event): + """Set task's status for first created Asset Version.""" + + if not self.task_status_map: + return + + entities_info = self.filter_event_ents(event) + if not entities_info: + return + + entity_ids = [] + for entity_info in entities_info: + entity_ids.append(entity_info["entityId"]) + + joined_entity_ids = ",".join( + ["\"{}\"".format(entity_id) for entity_id in entity_ids] + ) + asset_versions = session.query( + "AssetVersion where id in ({})".format(joined_entity_ids) + ).all() + + asset_version_statuses = None + + project_schema = None + for asset_version in asset_versions: + task_entity = asset_version["task"] + found_item = None + for item in self.task_status_map: + if ( + item["key"] == "task" and + task_entity["name"].lower() != item["name"] + ): + continue + + elif ( + item["key"] == "task_type" and + task_entity["type"]["name"].lower() != item["name"] + ): + continue + + found_item = item + break + + if not found_item: + continue + + if project_schema is None: + project_schema = task_entity["project"]["project_schema"] + + # Get all available statuses for Task + if asset_version_statuses is None: + statuses = project_schema.get_statuses("AssetVersion") + + # map lowered status name with it's object + asset_version_statuses = { + status["name"].lower(): status for status in statuses + } + + ent_path = "/".join( + [ent["name"] for ent in task_entity["link"]] + + [ + str(asset_version["asset"]["name"]), + str(asset_version["version"]) + ] + ) + + new_status = asset_version_statuses.get(found_item["status"]) + if not new_status: + self.log.warning( + "AssetVersion doesn't have status `{}`." + ).format(found_item["status"]) + continue + + try: + asset_version["status"] = new_status + session.commit() + self.log.debug("[ {} ] Status updated to [ {} ]".format( + ent_path, new_status['name'] + )) + + except Exception: + session.rollback() + self.log.warning( + "[ {} ] Status couldn't be set.".format(ent_path), + exc_info=True + ) + + def filter_event_ents(self, event): + filtered_ents = [] + for entity in event["data"].get("entities", []): + # Care only about add actions + if entity["action"] != "add": + continue + + # Filter AssetVersions + if entity["entityType"] != "assetversion": + continue + + entity_changes = entity.get("changes") or {} + + # Check if version of Asset Version is `1` + version_num = entity_changes.get("version", {}).get("new") + if version_num != 1: + continue + + # Skip in Asset Version don't have task + task_id = entity_changes.get("taskid", {}).get("new") + if not task_id: + continue + + filtered_ents.append(entity) + + return filtered_ents + + +def register(session, plugins_presets): + '''Register plugin. Called when used as an plugin.''' + + FirstVersionStatus(session, plugins_presets).register() diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 23284a2ae6..faf7539540 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -3,6 +3,7 @@ import collections import copy import queue import time +import datetime import atexit import traceback @@ -25,13 +26,9 @@ class SyncToAvalonEvent(BaseEvent): dbcon = DbConnector() - ignore_entTypes = [ - "socialfeed", "socialnotification", "note", - "assetversion", "job", "user", "reviewsessionobject", "timer", - "timelog", "auth_userrole", "appointment" - ] + interest_entTypes = ["show", "task"] ignore_ent_types = ["Milestone"] - ignore_keys = ["statusid"] + ignore_keys = ["statusid", "thumbid"] project_query = ( "select full_name, name, custom_attributes" @@ -51,9 +48,39 @@ class SyncToAvalonEvent(BaseEvent): def __init__(self, session, plugins_presets={}): '''Expects a ftrack_api.Session instance''' + # Debug settings + # - time expiration in seconds + self.debug_print_time_expiration = 5 * 60 + # - store current time + self.debug_print_time = datetime.datetime.now() + # - store synchronize entity types to be able to use + # only entityTypes in interest instead of filtering by ignored + self.debug_sync_types = collections.defaultdict(list) + + # Set processing session to not use global self.set_process_session(session) super().__init__(session, plugins_presets) + def debug_logs(self): + """This is debug method for printing small debugs messages. """ + now_datetime = datetime.datetime.now() + delta = now_datetime - self.debug_print_time + if delta.total_seconds() < self.debug_print_time_expiration: + return + + self.debug_print_time = now_datetime + known_types_items = [] + for entityType, entity_type in self.debug_sync_types.items(): + ent_types_msg = ", ".join(entity_type) + known_types_items.append( + "<{}> ({})".format(entityType, ent_types_msg) + ) + + known_entityTypes = ", ".join(known_types_items) + self.log.debug( + "DEBUG MESSAGE: Known types {}".format(known_entityTypes) + ) + @property def cur_project(self): if self._cur_project is None: @@ -106,9 +133,10 @@ class SyncToAvalonEvent(BaseEvent): if self._avalon_ents_by_id is None: self._avalon_ents_by_id = {} proj, ents = self.avalon_entities - self._avalon_ents_by_id[proj["_id"]] = proj - for ent in ents: - self._avalon_ents_by_id[ent["_id"]] = ent + if proj: + self._avalon_ents_by_id[proj["_id"]] = proj + for ent in ents: + self._avalon_ents_by_id[ent["_id"]] = ent return self._avalon_ents_by_id @property @@ -128,13 +156,14 @@ class SyncToAvalonEvent(BaseEvent): if self._avalon_ents_by_ftrack_id is None: self._avalon_ents_by_ftrack_id = {} proj, ents = self.avalon_entities - ftrack_id = proj["data"]["ftrackId"] - self._avalon_ents_by_ftrack_id[ftrack_id] = proj - for ent in ents: - ftrack_id = ent["data"].get("ftrackId") - if ftrack_id is None: - continue - self._avalon_ents_by_ftrack_id[ftrack_id] = ent + if proj: + ftrack_id = proj["data"]["ftrackId"] + self._avalon_ents_by_ftrack_id[ftrack_id] = proj + for ent in ents: + ftrack_id = ent["data"].get("ftrackId") + if ftrack_id is None: + continue + self._avalon_ents_by_ftrack_id[ftrack_id] = ent return self._avalon_ents_by_ftrack_id @property @@ -477,15 +506,26 @@ class SyncToAvalonEvent(BaseEvent): found_actions = set() for ent_info in entities_info: entityType = ent_info["entityType"] - if entityType in self.ignore_entTypes: + if entityType not in self.interest_entTypes: continue entity_type = ent_info.get("entity_type") if not entity_type or entity_type in self.ignore_ent_types: continue + if entity_type not in self.debug_sync_types[entityType]: + self.debug_sync_types[entityType].append(entity_type) + action = ent_info["action"] ftrack_id = ent_info["entityId"] + if isinstance(ftrack_id, list): + self.log.warning(( + "BUG REPORT: Entity info has `entityId` as `list` \"{}\"" + ).format(ent_info)) + if len(ftrack_id) == 0: + continue + ftrack_id = ftrack_id[0] + if action == "move": ent_keys = ent_info["keys"] # Seprate update info from move action @@ -565,8 +605,7 @@ class SyncToAvalonEvent(BaseEvent): if auto_sync is not True: return True - debug_msg = "" - debug_msg += "Updated: {}".format(len(updated)) + debug_msg = "Updated: {}".format(len(updated)) debug_action_map = { "add": "Created", "remove": "Removed", @@ -626,6 +665,8 @@ class SyncToAvalonEvent(BaseEvent): self.ftrack_added = entities_by_action["add"] self.ftrack_updated = updated + self.debug_logs() + self.log.debug("Synchronization begins") try: time_1 = time.time() @@ -1437,7 +1478,7 @@ class SyncToAvalonEvent(BaseEvent): .get("name", {}) .get("new") ) - avalon_ent_by_name = self.avalon_ents_by_name.get(name) + avalon_ent_by_name = self.avalon_ents_by_name.get(name) or {} avalon_ent_by_name_ftrack_id = ( avalon_ent_by_name .get("data", {}) @@ -1537,6 +1578,14 @@ class SyncToAvalonEvent(BaseEvent): entity_type_conf_ids[entity_type] = configuration_id break + if not configuration_id: + self.log.warning( + "BUG REPORT: Missing configuration for `{} < {} >`".format( + entity_type, ent_info["entityType"] + ) + ) + continue + _entity_key = collections.OrderedDict({ "configuration_id": configuration_id, "entity_id": ftrack_id @@ -1555,7 +1604,7 @@ class SyncToAvalonEvent(BaseEvent): try: # Commit changes of mongo_id to empty string self.process_session.commit() - self.log.debug("Commititng unsetting") + self.log.debug("Committing unsetting") except Exception: self.process_session.rollback() # TODO logging @@ -1635,7 +1684,7 @@ class SyncToAvalonEvent(BaseEvent): new_name, "task", schema_patterns=self.regex_schemas ) if not passed_regex: - self.regex_failed.append(ent_infos["entityId"]) + self.regex_failed.append(ent_info["entityId"]) continue if new_name not in self.task_changes_by_avalon_id[mongo_id]: @@ -1820,6 +1869,13 @@ class SyncToAvalonEvent(BaseEvent): obj_type_id = ent_info["objectTypeId"] ent_cust_attrs = cust_attrs_by_obj_id.get(obj_type_id) + if ent_cust_attrs is None: + self.log.warning(( + "BUG REPORT: Entity has ent type without" + " custom attributes <{}> \"{}\"" + ).format(entType, ent_info)) + continue + for key, values in ent_info["changes"].items(): if key in hier_attrs_keys: self.hier_cust_attrs_changes[key].append(ftrack_id) diff --git a/pype/ftrack/events/event_user_assigment.py b/pype/ftrack/events/event_user_assigment.py index 87994d34b2..eaacfd959a 100644 --- a/pype/ftrack/events/event_user_assigment.py +++ b/pype/ftrack/events/event_user_assigment.py @@ -207,7 +207,9 @@ class UserAssigmentEvent(BaseEvent): # formatting work dir is easiest part as we can use whole path work_dir = anatomy.format(data)['avalon']['work'] # we also need publish but not whole - publish = anatomy.format_all(data)['partial']['avalon']['publish'] + filled_all = anatomy.format_all(data) + publish = filled_all['avalon']['publish'] + # now find path to {asset} m = re.search("(^.+?{})".format(data['asset']), publish) diff --git a/pype/ftrack/ftrack_server/event_server_cli.py b/pype/ftrack/ftrack_server/event_server_cli.py index b09b0bc84e..4c1d1667c3 100644 --- a/pype/ftrack/ftrack_server/event_server_cli.py +++ b/pype/ftrack/ftrack_server/event_server_cli.py @@ -4,9 +4,13 @@ import signal import datetime import subprocess import socket +import json +import platform import argparse +import getpass import atexit import time +import uuid import ftrack_api from pype.ftrack.lib import credentials @@ -63,10 +67,19 @@ def validate_credentials(url, user, api): ) session.close() except Exception as e: - print( - 'ERROR: Can\'t log into Ftrack with used credentials:' - ' Ftrack server: "{}" // Username: {} // API key: {}' - ).format(url, user, api) + print("Can't log into Ftrack with used credentials:") + ftrack_cred = { + "Ftrack server": str(url), + "Username": str(user), + "API key": str(api) + } + item_lens = [len(key) + 1 for key in ftrack_cred.keys()] + justify_len = max(*item_lens) + for key, value in ftrack_cred.items(): + print("{} {}".format( + (key + ":").ljust(justify_len, " "), + value + )) return False print('DEBUG: Credentials Username: "{}", API key: "{}" are valid.'.format( @@ -175,6 +188,7 @@ def main_loop(ftrack_url): otherwise thread will be killed. """ + os.environ["FTRACK_EVENT_SUB_ID"] = str(uuid.uuid1()) # Get mongo hostname and port for testing mongo connection mongo_list = ftrack_events_mongo_settings() mongo_hostname = mongo_list[0] @@ -202,6 +216,13 @@ def main_loop(ftrack_url): processor_last_failed = datetime.datetime.now() processor_failed_count = 0 + statuser_name = "StorerThread" + statuser_port = 10021 + statuser_path = "{}/sub_event_status.py".format(file_path) + statuser_thread = None + statuser_last_failed = datetime.datetime.now() + statuser_failed_count = 0 + ftrack_accessible = False mongo_accessible = False @@ -210,7 +231,7 @@ def main_loop(ftrack_url): # stop threads on exit # TODO check if works and args have thread objects! - def on_exit(processor_thread, storer_thread): + def on_exit(processor_thread, storer_thread, statuser_thread): if processor_thread is not None: processor_thread.stop() processor_thread.join() @@ -221,9 +242,27 @@ def main_loop(ftrack_url): storer_thread.join() storer_thread = None + if statuser_thread is not None: + statuser_thread.stop() + statuser_thread.join() + statuser_thread = None + atexit.register( - on_exit, processor_thread=processor_thread, storer_thread=storer_thread + on_exit, + processor_thread=processor_thread, + storer_thread=storer_thread, + statuser_thread=statuser_thread ) + + system_name, pc_name = platform.uname()[:2] + host_name = socket.gethostname() + main_info = { + "created_at": datetime.datetime.now().strftime("%Y.%m.%d %H:%M:%S"), + "Username": getpass.getuser(), + "Host Name": host_name, + "Host IP": socket.gethostbyname(host_name) + } + main_info_str = json.dumps(main_info) # Main loop while True: # Check if accessible Ftrack and Mongo url @@ -261,6 +300,52 @@ def main_loop(ftrack_url): printed_ftrack_error = False printed_mongo_error = False + # ====== STATUSER ======= + if statuser_thread is None: + if statuser_failed_count < max_fail_count: + statuser_thread = socket_thread.StatusSocketThread( + statuser_name, statuser_port, statuser_path, + [main_info_str] + ) + statuser_thread.start() + + elif statuser_failed_count == max_fail_count: + print(( + "Statuser failed {}times in row" + " I'll try to run again {}s later" + ).format(str(max_fail_count), str(wait_time_after_max_fail))) + statuser_failed_count += 1 + + elif (( + datetime.datetime.now() - statuser_last_failed + ).seconds > wait_time_after_max_fail): + statuser_failed_count = 0 + + # If thread failed test Ftrack and Mongo connection + elif not statuser_thread.isAlive(): + statuser_thread.join() + statuser_thread = None + ftrack_accessible = False + mongo_accessible = False + + _processor_last_failed = datetime.datetime.now() + delta_time = ( + _processor_last_failed - statuser_last_failed + ).seconds + + if delta_time < min_fail_seconds: + statuser_failed_count += 1 + else: + statuser_failed_count = 0 + statuser_last_failed = _processor_last_failed + + elif statuser_thread.stop_subprocess: + print("Main process was stopped by action") + on_exit(processor_thread, storer_thread, statuser_thread) + os.kill(os.getpid(), signal.SIGTERM) + return 1 + + # ====== STORER ======= # Run backup thread which does not requeire mongo to work if storer_thread is None: if storer_failed_count < max_fail_count: @@ -268,6 +353,7 @@ def main_loop(ftrack_url): storer_name, storer_port, storer_path ) storer_thread.start() + elif storer_failed_count == max_fail_count: print(( "Storer failed {}times I'll try to run again {}s later" @@ -295,6 +381,7 @@ def main_loop(ftrack_url): storer_failed_count = 0 storer_last_failed = _storer_last_failed + # ====== PROCESSOR ======= if processor_thread is None: if processor_failed_count < max_fail_count: processor_thread = socket_thread.SocketThread( @@ -336,6 +423,10 @@ def main_loop(ftrack_url): processor_failed_count = 0 processor_last_failed = _processor_last_failed + if statuser_thread is not None: + statuser_thread.set_process("storer", storer_thread) + statuser_thread.set_process("processor", processor_thread) + time.sleep(1) @@ -446,9 +537,9 @@ def main(argv): event_paths = kwargs.ftrackeventpaths if not kwargs.noloadcred: - cred = credentials._get_credentials(True) + cred = credentials.get_credentials(ftrack_url) username = cred.get('username') - api_key = cred.get('apiKey') + api_key = cred.get('api_key') if kwargs.ftrackuser: username = kwargs.ftrackuser @@ -482,7 +573,7 @@ def main(argv): return 1 if kwargs.storecred: - credentials._save_credentials(username, api_key, True) + credentials.save_credentials(username, api_key, ftrack_url) # Set Ftrack environments os.environ["FTRACK_SERVER"] = ftrack_url diff --git a/pype/ftrack/ftrack_server/ftrack_server.py b/pype/ftrack/ftrack_server/ftrack_server.py index eebc3f6ec4..8464203c1d 100644 --- a/pype/ftrack/ftrack_server/ftrack_server.py +++ b/pype/ftrack/ftrack_server/ftrack_server.py @@ -100,9 +100,9 @@ class FtrackServer: log.warning(msg, exc_info=e) if len(register_functions_dict) < 1: - raise Exception(( - "There are no events with register function." - " Registered paths: \"{}\"" + log.warning(( + "There are no events with `register` function" + " in registered paths: \"{}\"" ).format("| ".join(paths))) # Load presets for setting plugins @@ -122,7 +122,7 @@ class FtrackServer: else: register(self.session, plugins_presets=plugins_presets) - if function_counter%7 == 0: + if function_counter % 7 == 0: time.sleep(0.1) function_counter += 1 except Exception as exc: diff --git a/pype/ftrack/ftrack_server/lib.py b/pype/ftrack/ftrack_server/lib.py index fefba580e0..e623cab8fb 100644 --- a/pype/ftrack/ftrack_server/lib.py +++ b/pype/ftrack/ftrack_server/lib.py @@ -28,6 +28,10 @@ from pypeapp import Logger from pype.ftrack.lib.custom_db_connector import DbConnector +TOPIC_STATUS_SERVER = "pype.event.server.status" +TOPIC_STATUS_SERVER_RESULT = "pype.event.server.status.result" + + def ftrack_events_mongo_settings(): host = None port = None @@ -123,20 +127,59 @@ def check_ftrack_url(url, log_errors=True): return url -class StorerEventHub(ftrack_api.event.hub.EventHub): +class SocketBaseEventHub(ftrack_api.event.hub.EventHub): + + hearbeat_msg = b"hearbeat" + heartbeat_callbacks = [] + def __init__(self, *args, **kwargs): self.sock = kwargs.pop("sock") - super(StorerEventHub, self).__init__(*args, **kwargs) + super(SocketBaseEventHub, self).__init__(*args, **kwargs) def _handle_packet(self, code, packet_identifier, path, data): """Override `_handle_packet` which extend heartbeat""" code_name = self._code_name_mapping[code] if code_name == "heartbeat": # Reply with heartbeat. - self.sock.sendall(b"storer") - return self._send_packet(self._code_name_mapping['heartbeat']) + for callback in self.heartbeat_callbacks: + callback() - elif code_name == "connect": + self.sock.sendall(self.hearbeat_msg) + return self._send_packet(self._code_name_mapping["heartbeat"]) + + return super(SocketBaseEventHub, self)._handle_packet( + code, packet_identifier, path, data + ) + + +class StatusEventHub(SocketBaseEventHub): + def _handle_packet(self, code, packet_identifier, path, data): + """Override `_handle_packet` which extend heartbeat""" + code_name = self._code_name_mapping[code] + if code_name == "connect": + event = ftrack_api.event.base.Event( + topic="pype.status.started", + data={}, + source={ + "id": self.id, + "user": {"username": self._api_user} + } + ) + self._event_queue.put(event) + + return super(StatusEventHub, self)._handle_packet( + code, packet_identifier, path, data + ) + + +class StorerEventHub(SocketBaseEventHub): + + hearbeat_msg = b"storer" + + def _handle_packet(self, code, packet_identifier, path, data): + """Override `_handle_packet` which extend heartbeat""" + code_name = self._code_name_mapping[code] + if code_name == "connect": event = ftrack_api.event.base.Event( topic="pype.storer.started", data={}, @@ -152,7 +195,9 @@ class StorerEventHub(ftrack_api.event.hub.EventHub): ) -class ProcessEventHub(ftrack_api.event.hub.EventHub): +class ProcessEventHub(SocketBaseEventHub): + + hearbeat_msg = b"processor" url, database, table_name = get_ftrack_event_mongo_info() is_table_created = False @@ -164,7 +209,6 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub): database_name=self.database, table_name=self.table_name ) - self.sock = kwargs.pop("sock") super(ProcessEventHub, self).__init__(*args, **kwargs) def prepare_dbcon(self): @@ -260,42 +304,10 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub): code_name = self._code_name_mapping[code] if code_name == "event": return - if code_name == "heartbeat": - self.sock.sendall(b"processor") - return self._send_packet(self._code_name_mapping["heartbeat"]) return super()._handle_packet(code, packet_identifier, path, data) -class UserEventHub(ftrack_api.event.hub.EventHub): - def __init__(self, *args, **kwargs): - self.sock = kwargs.pop("sock") - super(UserEventHub, self).__init__(*args, **kwargs) - - def _handle_packet(self, code, packet_identifier, path, data): - """Override `_handle_packet` which extend heartbeat""" - code_name = self._code_name_mapping[code] - if code_name == "heartbeat": - # Reply with heartbeat. - self.sock.sendall(b"hearbeat") - return self._send_packet(self._code_name_mapping['heartbeat']) - - elif code_name == "connect": - event = ftrack_api.event.base.Event( - topic="pype.storer.started", - data={}, - source={ - "id": self.id, - "user": {"username": self._api_user} - } - ) - self._event_queue.put(event) - - return super(UserEventHub, self)._handle_packet( - code, packet_identifier, path, data - ) - - class SocketSession(ftrack_api.session.Session): '''An isolated session for interaction with an ftrack server.''' def __init__( diff --git a/pype/ftrack/ftrack_server/socket_thread.py b/pype/ftrack/ftrack_server/socket_thread.py index c688693c77..942965f9e2 100644 --- a/pype/ftrack/ftrack_server/socket_thread.py +++ b/pype/ftrack/ftrack_server/socket_thread.py @@ -1,7 +1,9 @@ import os +import sys import time import socket import threading +import traceback import subprocess from pypeapp import Logger @@ -11,13 +13,15 @@ class SocketThread(threading.Thread): MAX_TIMEOUT = 35 - def __init__(self, name, port, filepath): + def __init__(self, name, port, filepath, additional_args=[]): super(SocketThread, self).__init__() - self.log = Logger().get_logger("SocketThread", "Event Thread") + self.log = Logger().get_logger(self.__class__.__name__) self.setName(name) self.name = name self.port = port self.filepath = filepath + self.additional_args = additional_args + self.sock = None self.subproc = None self.connection = None @@ -52,8 +56,13 @@ class SocketThread(threading.Thread): ) self.subproc = subprocess.Popen( - ["python", self.filepath, "-port", str(self.port)], - stdout=subprocess.PIPE + [ + sys.executable, + self.filepath, + *self.additional_args, + str(self.port) + ], + stdin=subprocess.PIPE ) # Listen for incoming connections @@ -115,11 +124,6 @@ class SocketThread(threading.Thread): if self.subproc.poll() is None: self.subproc.terminate() - lines = self.subproc.stdout.readlines() - if lines: - print("*** Socked Thread stdout ***") - for line in lines: - os.write(1, line) self.finished = True def get_data_from_con(self, connection): @@ -132,3 +136,52 @@ class SocketThread(threading.Thread): if data == b"MongoError": self.mongo_error = True connection.sendall(data) + + +class StatusSocketThread(SocketThread): + process_name_mapping = { + b"RestartS": "storer", + b"RestartP": "processor", + b"RestartM": "main" + } + + def __init__(self, *args, **kwargs): + self.process_threads = {} + self.stop_subprocess = False + super(StatusSocketThread, self).__init__(*args, **kwargs) + + def set_process(self, process_name, thread): + try: + if not self.subproc: + self.process_threads[process_name] = None + return + + if ( + process_name in self.process_threads and + self.process_threads[process_name] == thread + ): + return + + self.process_threads[process_name] = thread + self.subproc.stdin.write( + str.encode("reset:{}\r\n".format(process_name)) + ) + self.subproc.stdin.flush() + + except Exception: + print("Could not set thread in StatusSocketThread") + traceback.print_exception(*sys.exc_info()) + + def _handle_data(self, connection, data): + if not data: + return + + process_name = self.process_name_mapping.get(data) + if process_name: + if process_name == "main": + self.stop_subprocess = True + else: + subp = self.process_threads.get(process_name) + if subp: + subp.stop() + connection.sendall(data) diff --git a/pype/ftrack/ftrack_server/sub_event_processor.py b/pype/ftrack/ftrack_server/sub_event_processor.py index 9c971ca916..2a3ad3e76d 100644 --- a/pype/ftrack/ftrack_server/sub_event_processor.py +++ b/pype/ftrack/ftrack_server/sub_event_processor.py @@ -1,13 +1,59 @@ +import os import sys import signal import socket +import datetime from ftrack_server import FtrackServer -from pype.ftrack.ftrack_server.lib import SocketSession, ProcessEventHub +from pype.ftrack.ftrack_server.lib import ( + SocketSession, ProcessEventHub, TOPIC_STATUS_SERVER +) +import ftrack_api from pypeapp import Logger log = Logger().get_logger("Event processor") +subprocess_started = datetime.datetime.now() + + +class SessionFactory: + session = None + + +def send_status(event): + subprocess_id = event["data"].get("subprocess_id") + if not subprocess_id: + return + + if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]: + return + + session = SessionFactory.session + if not session: + return + + new_event_data = { + "subprocess_id": subprocess_id, + "source": "processor", + "status_info": { + "created_at": subprocess_started.strftime("%Y.%m.%d %H:%M:%S") + } + } + + new_event = ftrack_api.event.base.Event( + topic="pype.event.server.status.result", + data=new_event_data + ) + + session.event_hub.publish(new_event) + + +def register(session): + '''Registers the event, subscribing the discover and launch topics.''' + session.event_hub.subscribe( + "topic={}".format(TOPIC_STATUS_SERVER), send_status + ) + def main(args): port = int(args[-1]) @@ -24,6 +70,9 @@ def main(args): session = SocketSession( auto_connect_event_hub=True, sock=sock, Eventhub=ProcessEventHub ) + register(session) + SessionFactory.session = session + server = FtrackServer("event") log.debug("Launched Ftrack Event processor") server.run_server(session) diff --git a/pype/ftrack/ftrack_server/sub_event_status.py b/pype/ftrack/ftrack_server/sub_event_status.py new file mode 100644 index 0000000000..d3e6a3d647 --- /dev/null +++ b/pype/ftrack/ftrack_server/sub_event_status.py @@ -0,0 +1,436 @@ +import os +import sys +import json +import threading +import signal +import socket +import datetime + +import ftrack_api +from ftrack_server import FtrackServer +from pype.ftrack.ftrack_server.lib import ( + SocketSession, StatusEventHub, + TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT +) +from pypeapp import Logger, config + +log = Logger().get_logger("Event storer") +action_identifier = ( + "event.server.status" + os.environ["FTRACK_EVENT_SUB_ID"] +) +host_ip = socket.gethostbyname(socket.gethostname()) +action_data = { + "label": "Pype Admin", + "variant": "- Event server Status ({})".format(host_ip), + "description": "Get Infromation about event server", + "actionIdentifier": action_identifier, + "icon": "{}/ftrack/action_icons/PypeAdmin.svg".format( + os.environ.get( + "PYPE_STATICS_SERVER", + "http://localhost:{}".format( + config.get_presets().get("services", {}).get( + "rest_api", {} + ).get("default_port", 8021) + ) + ) + ) +} + + +class ObjectFactory: + session = None + status_factory = None + checker_thread = None + last_trigger = None + + +class Status: + default_item = { + "type": "label", + "value": "Process info is not available at this moment." + } + + def __init__(self, name, label, parent): + self.name = name + self.label = label or name + self.parent = parent + + self.info = None + self.last_update = None + + def update(self, info): + self.last_update = datetime.datetime.now() + self.info = info + + def get_delta_string(self, delta): + days, hours, minutes = ( + delta.days, delta.seconds // 3600, delta.seconds // 60 % 60 + ) + delta_items = [ + "{}d".format(days), + "{}h".format(hours), + "{}m".format(minutes) + ] + if not days: + delta_items.pop(0) + if not hours: + delta_items.pop(0) + delta_items.append("{}s".format(delta.seconds % 60)) + if not minutes: + delta_items.pop(0) + + return " ".join(delta_items) + + def get_items(self): + items = [] + last_update = "N/A" + if self.last_update: + delta = datetime.datetime.now() - self.last_update + last_update = "{} ago".format( + self.get_delta_string(delta) + ) + + last_update = "Updated: {}".format(last_update) + items.append({ + "type": "label", + "value": "#{}".format(self.label) + }) + items.append({ + "type": "label", + "value": "##{}".format(last_update) + }) + + if not self.info: + if self.info is None: + trigger_info_get() + items.append(self.default_item) + return items + + info = {} + for key, value in self.info.items(): + if key not in ["created_at:", "created_at"]: + info[key] = value + continue + + datetime_value = datetime.datetime.strptime( + value, "%Y.%m.%d %H:%M:%S" + ) + delta = datetime.datetime.now() - datetime_value + + running_for = self.get_delta_string(delta) + info["Started at"] = "{} [running: {}]".format(value, running_for) + + for key, value in info.items(): + items.append({ + "type": "label", + "value": "{}: {}".format(key, value) + }) + + return items + + +class StatusFactory: + + note_item = { + "type": "label", + "value": ( + "HINT: To refresh data uncheck" + " all checkboxes and hit `Submit` button." + ) + } + splitter_item = { + "type": "label", + "value": "---" + } + + def __init__(self, statuses={}): + self.statuses = [] + for status in statuses.items(): + self.create_status(*status) + + def __getitem__(self, key): + return self.get(key) + + def get(self, key, default=None): + for status in self.statuses: + if status.name == key: + return status + return default + + def is_filled(self): + for status in self.statuses: + if status.info is None: + return False + return True + + def create_status(self, name, label): + new_status = Status(name, label, self) + self.statuses.append(new_status) + + def process_event_result(self, event): + subprocess_id = event["data"].get("subprocess_id") + if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]: + return + + source = event["data"]["source"] + data = event["data"]["status_info"] + + self.update_status_info(source, data) + + def update_status_info(self, process_name, info): + for status in self.statuses: + if status.name == process_name: + status.update(info) + break + + def bool_items(self): + items = [] + items.append({ + "type": "label", + "value": "#Restart process" + }) + items.append({ + "type": "label", + "value": ( + "WARNING: Main process may shut down when checked" + " if does not run as a service!" + ) + }) + + name_labels = {} + for status in self.statuses: + name_labels[status.name] = status.label + + for name, label in name_labels.items(): + items.append({ + "type": "boolean", + "value": False, + "label": label, + "name": name + }) + return items + + def items(self): + items = [] + items.append(self.note_item) + items.extend(self.bool_items()) + + for status in self.statuses: + items.append(self.splitter_item) + items.extend(status.get_items()) + + return items + + +def server_activity_validate_user(event): + """Validate user permissions to show server info.""" + session = ObjectFactory.session + + username = event["source"].get("user", {}).get("username") + if not username: + return False + + user_ent = session.query( + "User where username = \"{}\"".format(username) + ).first() + if not user_ent: + return False + + role_list = ["Pypeclub", "Administrator"] + for role in user_ent["user_security_roles"]: + if role["security_role"]["name"] in role_list: + return True + return False + + +def server_activity_discover(event): + """Discover action in actions menu conditions.""" + session = ObjectFactory.session + if session is None: + return + + if not server_activity_validate_user(event): + return + + return {"items": [action_data]} + + +def server_activity(event): + session = ObjectFactory.session + if session is None: + msg = "Session is not set. Can't trigger Reset action." + log.warning(msg) + return { + "success": False, + "message": msg + } + + if not server_activity_validate_user(event): + return { + "success": False, + "message": "You don't have permissions to see Event server status!" + } + + values = event["data"].get("values") or {} + is_checked = False + for value in values.values(): + if value: + is_checked = True + break + + if not is_checked: + return { + "items": ObjectFactory.status_factory.items(), + "title": "Server current status" + } + + session = ObjectFactory.session + if values["main"]: + session.event_hub.sock.sendall(b"RestartM") + return + + if values["storer"]: + session.event_hub.sock.sendall(b"RestartS") + + if values["processor"]: + session.event_hub.sock.sendall(b"RestartP") + + +def trigger_info_get(): + if ObjectFactory.last_trigger: + delta = datetime.datetime.now() - ObjectFactory.last_trigger + if delta.seconds() < 5: + return + + session = ObjectFactory.session + session.event_hub.publish( + ftrack_api.event.base.Event( + topic=TOPIC_STATUS_SERVER, + data={"subprocess_id": os.environ["FTRACK_EVENT_SUB_ID"]} + ), + on_error="ignore" + ) + + +def on_start(event): + session = ObjectFactory.session + source_id = event.get("source", {}).get("id") + if not source_id or source_id != session.event_hub.id: + return + + if session is None: + log.warning("Session is not set. Can't trigger Sync to avalon action.") + return True + trigger_info_get() + + +def register(session): + '''Registers the event, subscribing the discover and launch topics.''' + session.event_hub.subscribe( + "topic=ftrack.action.discover", + server_activity_discover + ) + session.event_hub.subscribe("topic=pype.status.started", on_start) + + status_launch_subscription = ( + "topic=ftrack.action.launch and data.actionIdentifier={}" + ).format(action_identifier) + + session.event_hub.subscribe( + status_launch_subscription, + server_activity + ) + + session.event_hub.subscribe( + "topic={}".format(TOPIC_STATUS_SERVER_RESULT), + ObjectFactory.status_factory.process_event_result + ) + + +def heartbeat(): + if ObjectFactory.status_factory.is_filled(): + return + + trigger_info_get() + + +def main(args): + port = int(args[-1]) + server_info = json.loads(args[-2]) + + # Create a TCP/IP socket + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + + # Connect the socket to the port where the server is listening + server_address = ("localhost", port) + log.debug("Statuser connected to {} port {}".format(*server_address)) + sock.connect(server_address) + sock.sendall(b"CreatedStatus") + # store socket connection object + ObjectFactory.sock = sock + + ObjectFactory.status_factory["main"].update(server_info) + _returncode = 0 + try: + session = SocketSession( + auto_connect_event_hub=True, sock=sock, Eventhub=StatusEventHub + ) + ObjectFactory.session = session + session.event_hub.heartbeat_callbacks.append(heartbeat) + register(session) + server = FtrackServer("event") + log.debug("Launched Ftrack Event statuser") + + server.run_server(session, load_files=False) + + except Exception: + _returncode = 1 + log.error("ServerInfo subprocess crashed", exc_info=True) + + finally: + log.debug("Ending. Closing socket.") + sock.close() + return _returncode + + +class OutputChecker(threading.Thread): + read_input = True + + def run(self): + while self.read_input: + for line in sys.stdin: + line = line.rstrip().lower() + if not line.startswith("reset:"): + continue + process_name = line.replace("reset:", "") + + ObjectFactory.status_factory.update_status_info( + process_name, None + ) + + def stop(self): + self.read_input = False + + +if __name__ == "__main__": + # Register interupt signal + def signal_handler(sig, frame): + print("You pressed Ctrl+C. Process ended.") + ObjectFactory.checker_thread.stop() + sys.exit(0) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + statuse_names = { + "main": "Main process", + "storer": "Event Storer", + "processor": "Event Processor" + } + ObjectFactory.status_factory = StatusFactory(statuse_names) + + checker_thread = OutputChecker() + ObjectFactory.checker_thread = checker_thread + checker_thread.start() + + sys.exit(main(sys.argv)) diff --git a/pype/ftrack/ftrack_server/sub_event_storer.py b/pype/ftrack/ftrack_server/sub_event_storer.py index dfe8e21654..b4b9b8a7ab 100644 --- a/pype/ftrack/ftrack_server/sub_event_storer.py +++ b/pype/ftrack/ftrack_server/sub_event_storer.py @@ -8,14 +8,15 @@ import pymongo import ftrack_api from ftrack_server import FtrackServer from pype.ftrack.ftrack_server.lib import ( + SocketSession, StorerEventHub, get_ftrack_event_mongo_info, - SocketSession, - StorerEventHub + TOPIC_STATUS_SERVER, TOPIC_STATUS_SERVER_RESULT ) from pype.ftrack.lib.custom_db_connector import DbConnector from pypeapp import Logger log = Logger().get_logger("Event storer") +subprocess_started = datetime.datetime.now() class SessionFactory: @@ -138,11 +139,42 @@ def trigger_sync(event): ) +def send_status(event): + session = SessionFactory.session + if not session: + return + + subprocess_id = event["data"].get("subprocess_id") + if not subprocess_id: + return + + if subprocess_id != os.environ["FTRACK_EVENT_SUB_ID"]: + return + + new_event_data = { + "subprocess_id": os.environ["FTRACK_EVENT_SUB_ID"], + "source": "storer", + "status_info": { + "created_at": subprocess_started.strftime("%Y.%m.%d %H:%M:%S") + } + } + + new_event = ftrack_api.event.base.Event( + topic=TOPIC_STATUS_SERVER_RESULT, + data=new_event_data + ) + + session.event_hub.publish(new_event) + + def register(session): '''Registers the event, subscribing the discover and launch topics.''' install_db() session.event_hub.subscribe("topic=*", launch) session.event_hub.subscribe("topic=pype.storer.started", trigger_sync) + session.event_hub.subscribe( + "topic={}".format(TOPIC_STATUS_SERVER), send_status + ) def main(args): diff --git a/pype/ftrack/ftrack_server/sub_user_server.py b/pype/ftrack/ftrack_server/sub_user_server.py index 68066b33ce..8c1497a562 100644 --- a/pype/ftrack/ftrack_server/sub_user_server.py +++ b/pype/ftrack/ftrack_server/sub_user_server.py @@ -2,12 +2,14 @@ import sys import signal import socket +import traceback + from ftrack_server import FtrackServer -from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub +from pype.ftrack.ftrack_server.lib import SocketSession, SocketBaseEventHub from pypeapp import Logger -log = Logger().get_logger(__name__) +log = Logger().get_logger("FtrackUserServer") def main(args): @@ -18,17 +20,21 @@ def main(args): # Connect the socket to the port where the server is listening server_address = ("localhost", port) - log.debug("Storer connected to {} port {}".format(*server_address)) + log.debug( + "User Ftrack Server connected to {} port {}".format(*server_address) + ) sock.connect(server_address) sock.sendall(b"CreatedUser") try: session = SocketSession( - auto_connect_event_hub=True, sock=sock, Eventhub=UserEventHub + auto_connect_event_hub=True, sock=sock, Eventhub=SocketBaseEventHub ) server = FtrackServer("action") - log.debug("Launched Ftrack Event storer") + log.debug("Launched User Ftrack Server") server.run_server(session=session) + except Exception: + traceback.print_exception(*sys.exc_info()) finally: log.debug("Closing socket") @@ -42,7 +48,6 @@ if __name__ == "__main__": log.info( "Process was forced to stop. Process ended." ) - log.info("Process ended.") sys.exit(0) signal.signal(signal.SIGINT, signal_handler) diff --git a/pype/ftrack/lib/__init__.py b/pype/ftrack/lib/__init__.py index 9af9ded943..9da3b819b3 100644 --- a/pype/ftrack/lib/__init__.py +++ b/pype/ftrack/lib/__init__.py @@ -1,6 +1,11 @@ from . import avalon_sync -from .credentials import * +from . import credentials from .ftrack_app_handler import * from .ftrack_event_handler import * from .ftrack_action_handler import * from .ftrack_base_handler import * + +from .lib import ( + get_project_from_entity, + get_avalon_entities_for_assetversion +) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 8cebd12a59..6f928914bf 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -236,6 +236,7 @@ class SyncEntitiesFactory: " from TypedContext where project_id is \"{}\"" ) ignore_custom_attr_key = "avalon_ignore_sync" + ignore_entity_types = ["milestone"] report_splitter = {"type": "label", "value": "---"} @@ -366,7 +367,10 @@ class SyncEntitiesFactory: parent_id = entity["parent_id"] entity_type = entity.entity_type entity_type_low = entity_type.lower() - if entity_type_low == "task": + if entity_type_low in self.ignore_entity_types: + continue + + elif entity_type_low == "task": entities_dict[parent_id]["tasks"].append(entity["name"]) continue @@ -1722,7 +1726,11 @@ class SyncEntitiesFactory: self.avalon_project_id = new_id self._avalon_ents_by_id[str(new_id)] = project_item + if self._avalon_ents_by_ftrack_id is None: + self._avalon_ents_by_ftrack_id = {} self._avalon_ents_by_ftrack_id[self.ft_project_id] = str(new_id) + if self._avalon_ents_by_name is None: + self._avalon_ents_by_name = {} self._avalon_ents_by_name[project_item["name"]] = str(new_id) self.create_list.append(project_item) @@ -1991,7 +1999,7 @@ class SyncEntitiesFactory: vis_par = ent["data"]["visualParent"] if ( vis_par is not None and - str(vis_par) in self.deleted_entities + str(vis_par) in _deleted_entities ): continue _ready.append(mongo_id) @@ -2059,9 +2067,10 @@ class SyncEntitiesFactory: # different hierarchy - can't recreate entity continue - _vis_parent = str(deleted_entity["data"]["visualParent"]) + _vis_parent = deleted_entity["data"]["visualParent"] if _vis_parent is None: _vis_parent = self.avalon_project_id + _vis_parent = str(_vis_parent) ftrack_parent_id = self.avalon_ftrack_mapper[_vis_parent] self.create_ftrack_ent_from_avalon_ent( deleted_entity, ftrack_parent_id diff --git a/pype/ftrack/lib/credentials.py b/pype/ftrack/lib/credentials.py index 7e305942f2..16b1fb25fb 100644 --- a/pype/ftrack/lib/credentials.py +++ b/pype/ftrack/lib/credentials.py @@ -2,85 +2,140 @@ import os import json import ftrack_api import appdirs +import getpass +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse -config_path = os.path.normpath(appdirs.user_data_dir('pype-app', 'pype')) -action_file_name = 'ftrack_cred.json' -event_file_name = 'ftrack_event_cred.json' -action_fpath = os.path.join(config_path, action_file_name) -event_fpath = os.path.join(config_path, event_file_name) -folders = set([os.path.dirname(action_fpath), os.path.dirname(event_fpath)]) +CONFIG_PATH = os.path.normpath(appdirs.user_data_dir("pype-app", "pype")) +CREDENTIALS_FILE_NAME = "ftrack_cred.json" +CREDENTIALS_PATH = os.path.join(CONFIG_PATH, CREDENTIALS_FILE_NAME) +CREDENTIALS_FOLDER = os.path.dirname(CREDENTIALS_PATH) -for folder in folders: - if not os.path.isdir(folder): - os.makedirs(folder) +if not os.path.isdir(CREDENTIALS_FOLDER): + os.makedirs(CREDENTIALS_FOLDER) + +USER_GETTER = None -def _get_credentials(event=False): - if event: - fpath = event_fpath - else: - fpath = action_fpath +def get_ftrack_hostname(ftrack_server=None): + if not ftrack_server: + ftrack_server = os.environ["FTRACK_SERVER"] + if "//" not in ftrack_server: + ftrack_server = "//" + ftrack_server + + return urlparse(ftrack_server).hostname + + +def get_user(): + if USER_GETTER: + return USER_GETTER() + return getpass.getuser() + + +def get_credentials(ftrack_server=None, user=None): credentials = {} - try: - file = open(fpath, 'r') - credentials = json.load(file) - except Exception: - file = open(fpath, 'w') + if not os.path.exists(CREDENTIALS_PATH): + with open(CREDENTIALS_PATH, "w") as file: + file.write(json.dumps(credentials)) + file.close() + return credentials - file.close() + with open(CREDENTIALS_PATH, "r") as file: + content = file.read() + + hostname = get_ftrack_hostname(ftrack_server) + if not user: + user = get_user() + + content_json = json.loads(content or "{}") + credentials = content_json.get(hostname, {}).get(user) or {} return credentials -def _save_credentials(username, apiKey, event=False, auto_connect=None): - data = { - 'username': username, - 'apiKey': apiKey +def save_credentials(ft_user, ft_api_key, ftrack_server=None, user=None): + hostname = get_ftrack_hostname(ftrack_server) + if not user: + user = get_user() + + with open(CREDENTIALS_PATH, "r") as file: + content = file.read() + + content_json = json.loads(content or "{}") + if hostname not in content_json: + content_json[hostname] = {} + + content_json[hostname][user] = { + "username": ft_user, + "api_key": ft_api_key } - if event: - fpath = event_fpath - if auto_connect is None: - cred = _get_credentials(True) - auto_connect = cred.get('auto_connect', False) - data['auto_connect'] = auto_connect - else: - fpath = action_fpath + # Deprecated keys + if "username" in content_json: + content_json.pop("username") + if "apiKey" in content_json: + content_json.pop("apiKey") - file = open(fpath, 'w') - file.write(json.dumps(data)) - file.close() + with open(CREDENTIALS_PATH, "w") as file: + file.write(json.dumps(content_json, indent=4)) -def _clear_credentials(event=False): - if event: - fpath = event_fpath - else: - fpath = action_fpath - open(fpath, 'w').close() - _set_env(None, None) +def clear_credentials(ft_user=None, ftrack_server=None, user=None): + if not ft_user: + ft_user = os.environ.get("FTRACK_API_USER") + + if not ft_user: + return + + hostname = get_ftrack_hostname(ftrack_server) + if not user: + user = get_user() + + with open(CREDENTIALS_PATH, "r") as file: + content = file.read() + + content_json = json.loads(content or "{}") + if hostname not in content_json: + content_json[hostname] = {} + + content_json[hostname].pop(user, None) + + with open(CREDENTIALS_PATH, "w") as file: + file.write(json.dumps(content_json)) -def _set_env(username, apiKey): - if not username: - username = '' - if not apiKey: - apiKey = '' - os.environ['FTRACK_API_USER'] = username - os.environ['FTRACK_API_KEY'] = apiKey +def set_env(ft_user=None, ft_api_key=None): + os.environ["FTRACK_API_USER"] = ft_user or "" + os.environ["FTRACK_API_KEY"] = ft_api_key or "" -def _check_credentials(username=None, apiKey=None): +def get_env_credentials(): + return ( + os.environ.get("FTRACK_API_USER"), + os.environ.get("FTRACK_API_KEY") + ) - if username and apiKey: - _set_env(username, apiKey) + +def check_credentials(ft_user, ft_api_key, ftrack_server=None): + if not ftrack_server: + ftrack_server = os.environ["FTRACK_SERVER"] + + if not ft_user or not ft_api_key: + return False try: - session = ftrack_api.Session() + session = ftrack_api.Session( + server_url=ftrack_server, + api_key=ft_api_key, + api_user=ft_user + ) session.close() - except Exception as e: + + except Exception: return False return True diff --git a/pype/ftrack/lib/ftrack_app_handler.py b/pype/ftrack/lib/ftrack_app_handler.py index 9dc735987d..2b46dd43d8 100644 --- a/pype/ftrack/lib/ftrack_app_handler.py +++ b/pype/ftrack/lib/ftrack_app_handler.py @@ -193,6 +193,8 @@ class AppAction(BaseHandler): if parents: hierarchy = os.path.join(*parents) + os.environ["AVALON_HIERARCHY"] = hierarchy + application = avalonlib.get_application(os.environ["AVALON_APP_NAME"]) data = { diff --git a/pype/ftrack/lib/ftrack_base_handler.py b/pype/ftrack/lib/ftrack_base_handler.py index 8329505ffb..f11cb020e9 100644 --- a/pype/ftrack/lib/ftrack_base_handler.py +++ b/pype/ftrack/lib/ftrack_base_handler.py @@ -49,7 +49,7 @@ class BaseHandler(object): ).format( str(type(session)), str(ftrack_api.session.Session), - str(session_processor.ProcessSession) + str(SocketSession) )) self._session = session diff --git a/pype/ftrack/lib/lib.py b/pype/ftrack/lib/lib.py new file mode 100644 index 0000000000..aee297fc7e --- /dev/null +++ b/pype/ftrack/lib/lib.py @@ -0,0 +1,135 @@ +from bson.objectid import ObjectId + +from .avalon_sync import CustAttrIdKey +import avalon.io + + +def get_project_from_entity(entity): + # TODO add more entities + ent_type_lowered = entity.entity_type.lower() + if ent_type_lowered == "project": + return entity + + elif ent_type_lowered == "assetversion": + return entity["asset"]["parent"]["project"] + + elif "project" in entity: + return entity["project"] + + return None + + +def get_avalon_entities_for_assetversion(asset_version, db_con=None): + output = { + "success": True, + "message": None, + "project": None, + "project_name": None, + "asset": None, + "asset_name": None, + "asset_path": None, + "subset": None, + "subset_name": None, + "version": None, + "version_name": None, + "representations": None + } + + if db_con is None: + db_con = avalon.io + db_con.install() + + ft_asset = asset_version["asset"] + subset_name = ft_asset["name"] + version = asset_version["version"] + parent = ft_asset["parent"] + ent_path = "/".join( + [ent["name"] for ent in parent["link"]] + ) + project = get_project_from_entity(asset_version) + project_name = project["full_name"] + + output["project_name"] = project_name + output["asset_name"] = parent["name"] + output["asset_path"] = ent_path + output["subset_name"] = subset_name + output["version_name"] = version + + db_con.Session["AVALON_PROJECT"] = project_name + + avalon_project = db_con.find_one({"type": "project"}) + output["project"] = avalon_project + + if not avalon_project: + output["success"] = False + output["message"] = "Project not synchronized to avalon `{}`".format( + project_name + ) + return output + + asset_ent = None + asset_mongo_id = parent["custom_attributes"].get(CustAttrIdKey) + if asset_mongo_id: + try: + asset_mongo_id = ObjectId(asset_mongo_id) + asset_ent = db_con.find_one({ + "type": "asset", + "_id": asset_mongo_id + }) + except Exception: + pass + + if not asset_ent: + asset_ent = db_con.find_one({ + "type": "asset", + "data.ftrackId": parent["id"] + }) + + output["asset"] = asset_ent + + if not asset_ent: + output["success"] = False + output["message"] = "Not synchronized entity to avalon `{}`".format( + ent_path + ) + return output + + asset_mongo_id = asset_ent["_id"] + + subset_ent = db_con.find_one({ + "type": "subset", + "parent": asset_mongo_id, + "name": subset_name + }) + + output["subset"] = subset_ent + + if not subset_ent: + output["success"] = False + output["message"] = ( + "Subset `{}` does not exist under Asset `{}`" + ).format(subset_name, ent_path) + return output + + version_ent = db_con.find_one({ + "type": "version", + "name": version, + "parent": subset_ent["_id"] + }) + + output["version"] = version_ent + + if not version_ent: + output["success"] = False + output["message"] = ( + "Version `{}` does not exist under Subset `{}` | Asset `{}`" + ).format(version, subset_name, ent_path) + return output + + repre_ents = list(db_con.find({ + "type": "representation", + "parent": version_ent["_id"] + })) + + output["representations"] = repre_ents + return output diff --git a/pype/ftrack/tray/ftrack_module.py b/pype/ftrack/tray/ftrack_module.py index dab751c001..5811209a02 100644 --- a/pype/ftrack/tray/ftrack_module.py +++ b/pype/ftrack/tray/ftrack_module.py @@ -34,29 +34,28 @@ class FtrackModule: def validate(self): validation = False - cred = credentials._get_credentials() - try: - if 'username' in cred and 'apiKey' in cred: - validation = credentials._check_credentials( - cred['username'], - cred['apiKey'] - ) - if validation is False: - self.show_login_widget() - else: - self.show_login_widget() - - except Exception as e: - log.error("We are unable to connect to Ftrack: {0}".format(e)) - - validation = credentials._check_credentials() - if validation is True: + cred = credentials.get_credentials() + ft_user = cred.get("username") + ft_api_key = cred.get("api_key") + validation = credentials.check_credentials(ft_user, ft_api_key) + if validation: + credentials.set_env(ft_user, ft_api_key) log.info("Connected to Ftrack successfully") self.loginChange() - else: - log.warning("Please sign in to Ftrack") - self.bool_logged = False - self.set_menu_visibility() + + return validation + + if not validation and ft_user and ft_api_key: + log.warning( + "Current Ftrack credentials are not valid. {}: {} - {}".format( + str(os.environ.get("FTRACK_SERVER")), ft_user, ft_api_key + ) + ) + + log.info("Please sign in to Ftrack") + self.bool_logged = False + self.show_login_widget() + self.set_menu_visibility() return validation @@ -67,7 +66,7 @@ class FtrackModule: self.start_action_server() def logout(self): - credentials._clear_credentials() + credentials.clear_credentials() self.stop_action_server() log.info("Logged out of Ftrack") @@ -171,7 +170,7 @@ class FtrackModule: # If thread failed test Ftrack and Mongo connection elif not self.thread_socket_server.isAlive(): - self.thread_socket_server_thread.join() + self.thread_socket_server.join() self.thread_socket_server = None ftrack_accessible = False @@ -307,11 +306,23 @@ class FtrackModule: except Exception as e: log.error("During Killing Timer event server: {0}".format(e)) + def changed_user(self): + self.stop_action_server() + credentials.set_env() + self.validate() + def process_modules(self, modules): if 'TimersManager' in modules: self.timer_manager = modules['TimersManager'] self.timer_manager.add_module(self) + if "UserModule" in modules: + credentials.USER_GETTER = modules["UserModule"].get_user + modules["UserModule"].register_callback_on_user_change( + self.changed_user + ) + + def start_timer_manager(self, data): if self.thread_timer is not None: self.thread_timer.ftrack_start_timer(data) @@ -336,7 +347,7 @@ class FtrackEventsThread(QtCore.QThread): def __init__(self, parent): super(FtrackEventsThread, self).__init__() - cred = credentials._get_credentials() + cred = credentials.get_credentials() self.username = cred['username'] self.user = None self.last_task = None diff --git a/pype/ftrack/tray/login_dialog.py b/pype/ftrack/tray/login_dialog.py index 4dcbec5ab3..5f3777f93e 100644 --- a/pype/ftrack/tray/login_dialog.py +++ b/pype/ftrack/tray/login_dialog.py @@ -204,11 +204,11 @@ class Login_Dialog_ui(QtWidgets.QWidget): self.setError("{0} {1}".format(msg, " and ".join(missing))) return - verification = credentials._check_credentials(username, apiKey) + verification = credentials.check_credentials(username, apiKey) if verification: - credentials._save_credentials(username, apiKey, self.is_event) - credentials._set_env(username, apiKey) + credentials.save_credentials(username, apiKey, self.is_event) + credentials.set_env(username, apiKey) if self.parent is not None: self.parent.loginChange() self._close_widget() @@ -304,11 +304,11 @@ class Login_Dialog_ui(QtWidgets.QWidget): self._login_server_thread.start(url) return - verification = credentials._check_credentials(username, apiKey) + verification = credentials.check_credentials(username, apiKey) if verification is True: - credentials._save_credentials(username, apiKey, self.is_event) - credentials._set_env(username, apiKey) + credentials.save_credentials(username, apiKey, self.is_event) + credentials.set_env(username, apiKey) if self.parent is not None: self.parent.loginChange() self._close_widget() diff --git a/pype/lib.py b/pype/lib.py index f26395d930..ad3a863854 100644 --- a/pype/lib.py +++ b/pype/lib.py @@ -13,6 +13,62 @@ import avalon log = logging.getLogger(__name__) +def get_paths_from_environ(env_key, return_first=False): + """Return existing paths from specific envirnment variable. + + :param env_key: Environment key where should look for paths. + :type env_key: str + :param return_first: Return first path on `True`, list of all on `False`. + :type return_first: boolean + + Difference when none of paths exists: + - when `return_first` is set to `False` then function returns empty list. + - when `return_first` is set to `True` then function returns `None`. + """ + + existing_paths = [] + paths = os.environ.get(env_key) or "" + path_items = paths.split(os.pathsep) + for path in path_items: + # Skip empty string + if not path: + continue + # Normalize path + path = os.path.normpath(path) + # Check if path exists + if os.path.exists(path): + # Return path if `return_first` is set to True + if return_first: + return path + # Store path + existing_paths.append(path) + + # Return None if none of paths exists + if return_first: + return None + # Return all existing paths from environment variable + return existing_paths + + +def get_ffmpeg_tool_path(tool="ffmpeg"): + """Find path to ffmpeg tool in FFMPEG_PATH paths. + + Function looks for tool in paths set in FFMPEG_PATH environment. If tool + exists then returns it's full path. + + Returns tool name itself when tool path was not found. (FFmpeg path may be + set in PATH environment variable) + """ + + dir_paths = get_paths_from_environ("FFMPEG_PATH") + for dir_path in dir_paths: + for file_name in os.listdir(dir_path): + base, ext = os.path.splitext(file_name) + if base.lower() == tool.lower(): + return os.path.join(dir_path, tool) + return tool + + # Special naming case for subprocess since its a built-in method. def _subprocess(*args, **kwargs): """Convenience method for getting output errors for subprocess.""" @@ -361,23 +417,7 @@ def _get_host_name(): def get_asset(asset_name=None): - entity_data_keys_from_project_when_miss = [ - "frameStart", "frameEnd", "handleStart", "handleEnd", "fps", - "resolutionWidth", "resolutionHeight" - ] - - entity_keys_from_project_when_miss = [] - - alternatives = { - "handleStart": "handles", - "handleEnd": "handles" - } - - defaults = { - "handleStart": 0, - "handleEnd": 0 - } - + """ Returning asset document from database """ if not asset_name: asset_name = avalon.api.Session["AVALON_ASSET"] @@ -385,57 +425,10 @@ def get_asset(asset_name=None): "name": asset_name, "type": "asset" }) + if not asset_document: raise TypeError("Entity \"{}\" was not found in DB".format(asset_name)) - project_document = io.find_one({"type": "project"}) - - for key in entity_data_keys_from_project_when_miss: - if asset_document["data"].get(key): - continue - - value = project_document["data"].get(key) - if value is not None or key not in alternatives: - asset_document["data"][key] = value - continue - - alt_key = alternatives[key] - value = asset_document["data"].get(alt_key) - if value is not None: - asset_document["data"][key] = value - continue - - value = project_document["data"].get(alt_key) - if value: - asset_document["data"][key] = value - continue - - if key in defaults: - asset_document["data"][key] = defaults[key] - - for key in entity_keys_from_project_when_miss: - if asset_document.get(key): - continue - - value = project_document.get(key) - if value is not None or key not in alternatives: - asset_document[key] = value - continue - - alt_key = alternatives[key] - value = asset_document.get(alt_key) - if value: - asset_document[key] = value - continue - - value = project_document.get(alt_key) - if value: - asset_document[key] = value - continue - - if key in defaults: - asset_document[key] = defaults[key] - return asset_document diff --git a/pype/maya/__init__.py b/pype/maya/__init__.py index b4dbc52bc8..fdc061f069 100644 --- a/pype/maya/__init__.py +++ b/pype/maya/__init__.py @@ -8,7 +8,6 @@ from avalon import api as avalon, pipeline, maya from avalon.maya.pipeline import IS_HEADLESS from avalon.tools import workfiles from pyblish import api as pyblish -from pypeapp import config from ..lib import ( any_outdated @@ -156,12 +155,19 @@ def on_open(_): from avalon.vendor.Qt import QtWidgets from ..widgets import popup + cmds.evalDeferred( + "from pype.maya import lib;lib.remove_render_layer_observer()") + cmds.evalDeferred( + "from pype.maya import lib;lib.add_render_layer_observer()") + cmds.evalDeferred( + "from pype.maya import lib;lib.add_render_layer_change_observer()") # # Update current task for the current scene # update_task_from_path(cmds.file(query=True, sceneName=True)) # Validate FPS after update_task_from_path to # ensure it is using correct FPS for the asset lib.validate_fps() + lib.fix_incompatible_containers() if any_outdated(): log.warning("Scene has outdated content.") @@ -193,6 +199,12 @@ def on_new(_): """Set project resolution and fps when create a new file""" avalon.logger.info("Running callback on new..") with maya.suspended_refresh(): + cmds.evalDeferred( + "from pype.maya import lib;lib.remove_render_layer_observer()") + cmds.evalDeferred( + "from pype.maya import lib;lib.add_render_layer_observer()") + cmds.evalDeferred( + "from pype.maya import lib;lib.add_render_layer_change_observer()") lib.set_context_settings() @@ -217,3 +229,10 @@ def on_task_changed(*args): # Run maya.pipeline._on_task_changed() + with maya.suspended_refresh(): + lib.set_context_settings() + lib.update_content_on_context_change() + + lib.show_message("Context was changed", + ("Context was changed to {}".format( + avalon.Session["AVALON_ASSET"]))) diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 0890d3863e..a06810ea94 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2176,18 +2176,29 @@ def load_capture_preset(path=None, data=None): 4: 'nolights'} for key in preset[id]: if key == 'high_quality': - temp_options2['multiSampleEnable'] = True - temp_options2['multiSampleCount'] = 8 - temp_options2['textureMaxResolution'] = 1024 - temp_options2['enableTextureMaxRes'] = True + if preset[id][key] == True: + temp_options2['multiSampleEnable'] = True + temp_options2['multiSampleCount'] = 4 + temp_options2['textureMaxResolution'] = 1024 + temp_options2['enableTextureMaxRes'] = True + temp_options2['textureMaxResMode'] = 1 + else: + temp_options2['multiSampleEnable'] = False + temp_options2['multiSampleCount'] = 4 + temp_options2['textureMaxResolution'] = 512 + temp_options2['enableTextureMaxRes'] = True + temp_options2['textureMaxResMode'] = 0 + + if key == 'ssaoEnable': + if preset[id][key] == True: + temp_options2['ssaoEnable'] = True + else: + temp_options2['ssaoEnable'] = False if key == 'alphaCut': temp_options2['transparencyAlgorithm'] = 5 temp_options2['transparencyQuality'] = 1 - if key == 'ssaoEnable': - temp_options2['ssaoEnable'] = True - if key == 'headsUpDisplay': temp_options['headsUpDisplay'] = True @@ -2318,6 +2329,25 @@ def get_attr_in_layer(attr, layer): return cmds.getAttr(attr) +def fix_incompatible_containers(): + """Return whether the current scene has any outdated content""" + + host = avalon.api.registered_host() + for container in host.ls(): + loader = container['loader'] + + print(container['loader']) + + if loader in ["MayaAsciiLoader", + "AbcLoader", + "ModelLoader", + "CameraLoader", + "RigLoader", + "FBXLoader"]: + cmds.setAttr(container["objectName"] + ".loader", + "ReferenceLoader", type="string") + + def _null(*args): pass @@ -2369,15 +2399,19 @@ class shelf(): if not item.get('command'): item['command'] = self._null if item['type'] == 'button': - self.addButon(item['name'], command=item['command']) + self.addButon(item['name'], + command=item['command'], + icon=item['icon']) if item['type'] == 'menuItem': self.addMenuItem(item['parent'], item['name'], - command=item['command']) + command=item['command'], + icon=item['icon']) if item['type'] == 'subMenu': self.addMenuItem(item['parent'], item['name'], - command=item['command']) + command=item['command'], + icon=item['icon']) def addButon(self, label, icon="commandButton.png", command=_null, doubleCommand=_null): @@ -2387,7 +2421,8 @@ class shelf(): ''' cmds.setParent(self.name) if icon: - icon = self.iconPath + icon + icon = os.path.join(self.iconPath, icon) + print(icon) cmds.shelfButton(width=37, height=37, image=icon, label=label, command=command, dcc=doubleCommand, imageOverlayLabel=label, olb=self.labelBackground, @@ -2399,7 +2434,8 @@ class shelf(): double click command and image. ''' if icon: - icon = self.iconPath + icon + icon = os.path.join(self.iconPath, icon) + print(icon) return cmds.menuItem(p=parent, label=label, c=command, i="") def addSubMenu(self, parent, label, icon=None): @@ -2408,7 +2444,8 @@ class shelf(): the specified parent popup menu. ''' if icon: - icon = self.iconPath + icon + icon = os.path.join(self.iconPath, icon) + print(icon) return cmds.menuItem(p=parent, label=label, i=icon, subMenu=1) def _cleanOldShelf(self): @@ -2422,3 +2459,177 @@ class shelf(): cmds.deleteUI(each) else: cmds.shelfLayout(self.name, p="ShelfLayout") + + +def _get_render_instance(): + objectset = cmds.ls("*.id", long=True, type="objectSet", + recursive=True, objectsOnly=True) + + for objset in objectset: + + if not cmds.attributeQuery("id", node=objset, exists=True): + continue + + id_attr = "{}.id".format(objset) + if cmds.getAttr(id_attr) != "pyblish.avalon.instance": + continue + + has_family = cmds.attributeQuery("family", + node=objset, + exists=True) + if not has_family: + continue + + if cmds.getAttr("{}.family".format(objset)) == 'rendering': + return objset + + return None + + +renderItemObserverList = [] + + +class RenderSetupListObserver: + + def listItemAdded(self, item): + print("--- adding ...") + self._add_render_layer(item) + + def listItemRemoved(self, item): + print("--- removing ...") + self._remove_render_layer(item.name()) + + def _add_render_layer(self, item): + render_set = _get_render_instance() + layer_name = item.name() + + if not render_set: + return + + members = cmds.sets(render_set, query=True) or [] + if not "LAYER_{}".format(layer_name) in members: + print(" - creating set for {}".format(layer_name)) + set = cmds.sets(n="LAYER_{}".format(layer_name), empty=True) + cmds.sets(set, forceElement=render_set) + rio = RenderSetupItemObserver(item) + print("- adding observer for {}".format(item.name())) + item.addItemObserver(rio.itemChanged) + renderItemObserverList.append(rio) + + def _remove_render_layer(self, layer_name): + render_set = _get_render_instance() + + if not render_set: + return + + members = cmds.sets(render_set, query=True) + if "LAYER_{}".format(layer_name) in members: + print(" - removing set for {}".format(layer_name)) + cmds.delete("LAYER_{}".format(layer_name)) + + +class RenderSetupItemObserver(): + + def __init__(self, item): + self.item = item + self.original_name = item.name() + + def itemChanged(self, *args, **kwargs): + if self.item.name() == self.original_name: + return + + render_set = _get_render_instance() + + if not render_set: + return + + members = cmds.sets(render_set, query=True) + if "LAYER_{}".format(self.original_name) in members: + print(" <> renaming {} to {}".format(self.original_name, + self.item.name())) + cmds.rename("LAYER_{}".format(self.original_name), + "LAYER_{}".format(self.item.name())) + self.original_name = self.item.name() + + +renderListObserver = RenderSetupListObserver() + + +def add_render_layer_change_observer(): + import maya.app.renderSetup.model.renderSetup as renderSetup + + rs = renderSetup.instance() + render_set = _get_render_instance() + if not render_set: + return + + members = cmds.sets(render_set, query=True) + layers = rs.getRenderLayers() + for layer in layers: + if "LAYER_{}".format(layer.name()) in members: + rio = RenderSetupItemObserver(layer) + print("- adding observer for {}".format(layer.name())) + layer.addItemObserver(rio.itemChanged) + renderItemObserverList.append(rio) + + +def add_render_layer_observer(): + import maya.app.renderSetup.model.renderSetup as renderSetup + + print("> adding renderSetup observer ...") + rs = renderSetup.instance() + rs.addListObserver(renderListObserver) + pass + + +def remove_render_layer_observer(): + import maya.app.renderSetup.model.renderSetup as renderSetup + + print("< removing renderSetup observer ...") + rs = renderSetup.instance() + try: + rs.removeListObserver(renderListObserver) + except ValueError: + # no observer set yet + pass + + +def update_content_on_context_change(): + """ + This will update scene content to match new asset on context change + """ + scene_sets = cmds.listSets(allSets=True) + new_asset = api.Session["AVALON_ASSET"] + new_data = lib.get_asset()["data"] + for s in scene_sets: + try: + if cmds.getAttr("{}.id".format(s)) == "pyblish.avalon.instance": + attr = cmds.listAttr(s) + print(s) + if "asset" in attr: + print(" - setting asset to: [ {} ]".format(new_asset)) + cmds.setAttr("{}.asset".format(s), + new_asset, type="string") + if "frameStart" in attr: + cmds.setAttr("{}.frameStart".format(s), + new_data["frameStart"]) + if "frameEnd" in attr: + cmds.setAttr("{}.frameEnd".format(s), + new_data["frameEnd"],) + except ValueError: + pass + + +def show_message(title, msg): + from avalon.vendor.Qt import QtWidgets + from ..widgets import message_window + + # Find maya main window + top_level_widgets = {w.objectName(): w for w in + QtWidgets.QApplication.topLevelWidgets()} + + parent = top_level_widgets.get("MayaWindow", None) + if parent is None: + pass + else: + message_window.message(title=title, message=msg, parent=parent) diff --git a/pype/maya/menu.py b/pype/maya/menu.py index 5254337f03..806944c117 100644 --- a/pype/maya/menu.py +++ b/pype/maya/menu.py @@ -15,12 +15,13 @@ log = logging.getLogger(__name__) def _get_menu(): """Return the menu instance if it currently exists in Maya""" - app = QtWidgets.QApplication.instance() - widgets = dict((w.objectName(), w) for w in app.allWidgets()) + widgets = dict(( + w.objectName(), w) for w in QtWidgets.QApplication.allWidgets()) menu = widgets.get(self._menu) return menu + def deferred(): log.info("Attempting to install scripts menu..") diff --git a/pype/maya/plugin.py b/pype/maya/plugin.py index 327cf47cbd..ed244d56df 100644 --- a/pype/maya/plugin.py +++ b/pype/maya/plugin.py @@ -1,4 +1,5 @@ from avalon import api +from avalon.vendor import qargparse def get_reference_node_parents(ref): @@ -33,11 +34,29 @@ class ReferenceLoader(api.Loader): `update` logic. """ - def load(self, - context, - name=None, - namespace=None, - data=None): + + options = [ + qargparse.Integer( + "count", + label="Count", + default=1, + min=1, + help="How many times to load?" + ), + qargparse.Double3( + "offset", + label="Position Offset", + help="Offset loaded models for easier selection." + ) + ] + + def load( + self, + context, + name=None, + namespace=None, + options=None + ): import os from avalon.maya import lib @@ -46,29 +65,46 @@ class ReferenceLoader(api.Loader): assert os.path.exists(self.fname), "%s does not exist." % self.fname asset = context['asset'] + loaded_containers = [] - namespace = namespace or lib.unique_namespace( - asset["name"] + "_", - prefix="_" if asset["name"][0].isdigit() else "", - suffix="_", - ) + count = options.get("count") or 1 + for c in range(0, count): + namespace = namespace or lib.unique_namespace( + asset["name"] + "_", + prefix="_" if asset["name"][0].isdigit() else "", + suffix="_", + ) - self.process_reference(context=context, - name=name, - namespace=namespace, - data=data) + # Offset loaded subset + if "offset" in options: + offset = [i * c for i in options["offset"]] + options["translate"] = offset - # Only containerize if any nodes were loaded by the Loader - nodes = self[:] - if not nodes: - return + self.log.info(options) - return containerise( - name=name, - namespace=namespace, - nodes=nodes, - context=context, - loader=self.__class__.__name__) + self.process_reference( + context=context, + name=name, + namespace=namespace, + options=options + ) + + # Only containerize if any nodes were loaded by the Loader + nodes = self[:] + if not nodes: + return + + loaded_containers.append(containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__ + )) + + c += 1 + namespace = None + return loaded_containers def process_reference(self, context, name, namespace, data): """To be implemented by subclass""" diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py index 141cf4c13d..e775468996 100644 --- a/pype/nuke/__init__.py +++ b/pype/nuke/__init__.py @@ -33,41 +33,6 @@ if os.getenv("PYBLISH_GUI", None): pyblish.register_gui(os.getenv("PYBLISH_GUI", None)) -class NukeHandler(logging.Handler): - ''' - Nuke Handler - emits logs into nuke's script editor. - warning will emit nuke.warning() - critical and fatal would popup msg dialog to alert of the error. - ''' - - def __init__(self): - logging.Handler.__init__(self) - self.set_name("Pype_Nuke_Handler") - - def emit(self, record): - # Formated message: - msg = self.format(record) - - if record.levelname.lower() in [ - # "warning", - "critical", - "fatal", - "error" - ]: - msg = self.format(record) - nuke.message(msg) - - -'''Adding Nuke Logging Handler''' -log.info([handler.get_name() for handler in logging.root.handlers[:]]) -nuke_handler = NukeHandler() -if nuke_handler.get_name() \ - not in [handler.get_name() - for handler in logging.root.handlers[:]]: - logging.getLogger().addHandler(nuke_handler) - logging.getLogger().setLevel(logging.INFO) -log.info([handler.get_name() for handler in logging.root.handlers[:]]) - def reload_config(): """Attempt to reload pipeline at run-time. @@ -113,7 +78,7 @@ def install(): family_states = [ "write", "review", - "nukenodes" + "nukenodes" "gizmo" ] @@ -128,11 +93,11 @@ def install(): # Set context settings. nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root") + nuke.addOnCreate(workfile_settings.set_favorites, nodeClass="Root") menu.install() - def launch_workfiles_app(): '''Function letting start workfiles after start of host ''' diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 7aa0395da5..8e241dad16 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -15,13 +15,11 @@ import nuke from .presets import ( get_colorspace_preset, get_node_dataflow_preset, - get_node_colorspace_preset -) - -from .presets import ( + get_node_colorspace_preset, get_anatomy ) -# TODO: remove get_anatomy and import directly Anatomy() here + +from .utils import set_context_favorites from pypeapp import Logger log = Logger().get_logger(__name__, "nuke") @@ -50,8 +48,6 @@ def checkInventoryVersions(): and check if the node is having actual version. If not then it will color it to red. """ - # TODO: make it for all nodes not just Read (Loader - # get all Loader nodes by avalon attribute metadata for each in nuke.allNodes(): if each.Class() == 'Read': @@ -93,7 +89,6 @@ def checkInventoryVersions(): def writes_version_sync(): ''' Callback synchronizing version of publishable write nodes ''' - # TODO: make it work with new write node group try: rootVersion = pype.get_version_from_path(nuke.root().name()) padding = len(rootVersion) @@ -130,7 +125,8 @@ def writes_version_sync(): os.makedirs(os.path.dirname(node_new_file), 0o766) except Exception as e: log.warning( - "Write node: `{}` has no version in path: {}".format(each.name(), e)) + "Write node: `{}` has no version in path: {}".format( + each.name(), e)) def version_up_script(): @@ -183,9 +179,12 @@ def format_anatomy(data): try: padding = int(anatomy.templates['render']['padding']) except KeyError as e: - log.error("`padding` key is not in `render` " - "Anatomy template. Please, add it there and restart " - "the pipeline (padding: \"4\"): `{}`".format(e)) + msg = ("`padding` key is not in `render` " + "Anatomy template. Please, add it there and restart " + "the pipeline (padding: \"4\"): `{}`").format(e) + + log.error(msg) + nuke.message(msg) version = data.get("version", None) if not version: @@ -196,7 +195,7 @@ def format_anatomy(data): "root": api.Session["AVALON_PROJECTS"], "subset": data["avalon"]["subset"], "asset": data["avalon"]["asset"], - "task": api.Session["AVALON_TASK"].lower(), + "task": api.Session["AVALON_TASK"], "family": data["avalon"]["family"], "project": {"name": project_document["name"], "code": project_document["data"].get("code", '')}, @@ -265,7 +264,9 @@ def create_write_node(name, data, input=None, prenodes=None): anatomy_filled = format_anatomy(data) except Exception as e: - log.error("problem with resolving anatomy tepmlate: {}".format(e)) + msg = "problem with resolving anatomy tepmlate: {}".format(e) + log.error(msg) + nuke.message(msg) # build file path to workfiles fpath = str(anatomy_filled["work"]["folder"]).replace("\\", "/") @@ -372,7 +373,7 @@ def create_write_node(name, data, input=None, prenodes=None): now_node.setInput(0, prev_node) # imprinting group node - GN = avalon.nuke.imprint(GN, data["avalon"]) + avalon.nuke.imprint(GN, data["avalon"]) divider = nuke.Text_Knob('') GN.addKnob(divider) @@ -430,7 +431,7 @@ def add_deadline_tab(node): node.addKnob(nuke.Tab_Knob("Deadline")) knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size") - knob.setValue(1) + knob.setValue(0) node.addKnob(knob) knob = nuke.Int_Knob("deadlinePriority", "Priority") @@ -517,11 +518,6 @@ class WorkfileSettings(object): self.data = kwargs def get_nodes(self, nodes=None, nodes_filter=None): - # filter out only dictionaries for node creation - # - # print("\n\n") - # pprint(self._nodes) - # if not isinstance(nodes, list) and not isinstance(nodes_filter, list): return [n for n in nuke.allNodes()] @@ -543,8 +539,11 @@ class WorkfileSettings(object): viewer_dict (dict): adjustments from presets ''' - assert isinstance(viewer_dict, dict), log.error( - "set_viewers_colorspace(): argument should be dictionary") + if not isinstance(viewer_dict, dict): + msg = "set_viewers_colorspace(): argument should be dictionary" + log.error(msg) + nuke.message(msg) + return filter_knobs = [ "viewerProcess", @@ -592,8 +591,10 @@ class WorkfileSettings(object): root_dict (dict): adjustmensts from presets ''' - assert isinstance(root_dict, dict), log.error( - "set_root_colorspace(): argument should be dictionary") + if not isinstance(root_dict, dict): + msg = "set_root_colorspace(): argument should be dictionary" + log.error(msg) + nuke.message(msg) log.debug(">> root_dict: {}".format(root_dict)) @@ -618,7 +619,8 @@ class WorkfileSettings(object): # third set ocio custom path if root_dict.get("customOCIOConfigPath"): self._root_node["customOCIOConfigPath"].setValue( - str(root_dict["customOCIOConfigPath"]).format(**os.environ) + str(root_dict["customOCIOConfigPath"]).format( + **os.environ).replace("\\", "/") ) log.debug("nuke.root()['{}'] changed to: {}".format( "customOCIOConfigPath", root_dict["customOCIOConfigPath"])) @@ -638,12 +640,105 @@ class WorkfileSettings(object): write_dict (dict): nuke write node as dictionary ''' - # TODO: complete this function so any write node in # scene will have fixed colorspace following presets for the project - assert isinstance(write_dict, dict), log.error( - "set_root_colorspace(): argument should be dictionary") + if not isinstance(write_dict, dict): + msg = "set_root_colorspace(): argument should be dictionary" + log.error(msg) + return - log.debug("__ set_writes_colorspace(): {}".format(write_dict)) + from avalon.nuke import get_avalon_knob_data + + for node in nuke.allNodes(): + + if node.Class() in ["Viewer", "Dot"]: + continue + + # get data from avalon knob + avalon_knob_data = get_avalon_knob_data(node, ["avalon:", "ak:"]) + + if not avalon_knob_data: + continue + + if avalon_knob_data["id"] != "pyblish.avalon.instance": + continue + + # establish families + families = [avalon_knob_data["family"]] + if avalon_knob_data.get("families"): + families.append(avalon_knob_data.get("families")) + + # except disabled nodes but exclude backdrops in test + for fmly, knob in write_dict.items(): + write = None + if (fmly in families): + # Add all nodes in group instances. + if node.Class() == "Group": + node.begin() + for x in nuke.allNodes(): + if x.Class() == "Write": + write = x + node.end() + elif node.Class() == "Write": + write = node + else: + log.warning("Wrong write node Class") + + write["colorspace"].setValue(str(knob["colorspace"])) + log.info( + "Setting `{0}` to `{1}`".format( + write.name(), + knob["colorspace"])) + + def set_reads_colorspace(self, reads): + """ Setting colorspace to Read nodes + + Looping trought all read nodes and tries to set colorspace based on regex rules in presets + """ + changes = dict() + for n in nuke.allNodes(): + file = nuke.filename(n) + if not n.Class() == "Read": + continue + + # load nuke presets for Read's colorspace + read_clrs_presets = get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + log.debug(preset_clrsp) + if preset_clrsp is not None: + current = n["colorspace"].value() + future = str(preset_clrsp) + if current != future: + changes.update({ + n.name(): { + "from": current, + "to": future + } + }) + log.debug(changes) + if changes: + msg = "Read nodes are not set to correct colospace:\n\n" + for nname, knobs in changes.items(): + msg += str(" - node: '{0}' is now '{1}' " + "but should be '{2}'\n").format( + nname, knobs["from"], knobs["to"] + ) + + msg += "\nWould you like to change it?" + + if nuke.ask(msg): + for nname, knobs in changes.items(): + n = nuke.toNode(nname) + n["colorspace"].setValue(knobs["to"]) + log.info( + "Setting `{0}` to `{1}`".format( + nname, + knobs["to"])) def set_colorspace(self): ''' Setting colorpace following presets @@ -653,25 +748,33 @@ class WorkfileSettings(object): try: self.set_root_colorspace(nuke_colorspace["root"]) except AttributeError: - log.error( - "set_colorspace(): missing `root` settings in template") + msg = "set_colorspace(): missing `root` settings in template" + try: self.set_viewers_colorspace(nuke_colorspace["viewer"]) except AttributeError: - log.error( - "set_colorspace(): missing `viewer` settings in template") + msg = "set_colorspace(): missing `viewer` settings in template" + nuke.message(msg) + log.error(msg) + try: self.set_writes_colorspace(nuke_colorspace["write"]) except AttributeError: - log.error( - "set_colorspace(): missing `write` settings in template") + msg = "set_colorspace(): missing `write` settings in template" + nuke.message(msg) + log.error(msg) + + reads = nuke_colorspace.get("read") + if reads: + self.set_reads_colorspace(reads) try: for key in nuke_colorspace: log.debug("Preset's colorspace key: {}".format(key)) except TypeError: - log.error("Nuke is not in templates! \n\n\n" - "contact your supervisor!") + msg = "Nuke is not in templates! Contact your supervisor!" + nuke.message(msg) + log.error(msg) def reset_frame_range_handles(self): """Set frame range to current asset""" @@ -683,6 +786,8 @@ class WorkfileSettings(object): return data = self._asset_entity["data"] + log.debug("__ asset data: `{}`".format(data)) + missing_cols = [] check_cols = ["fps", "frameStart", "frameEnd", "handleStart", "handleEnd"] @@ -758,13 +863,13 @@ class WorkfileSettings(object): } if any(x for x in data.values() if x is None): - log.error( - "Missing set shot attributes in DB." - "\nContact your supervisor!." - "\n\nWidth: `{width}`" - "\nHeight: `{height}`" - "\nPixel Asspect: `{pixel_aspect}`".format(**data) - ) + msg = ("Missing set shot attributes in DB." + "\nContact your supervisor!." + "\n\nWidth: `{width}`" + "\nHeight: `{height}`" + "\nPixel Asspect: `{pixel_aspect}`").format(**data) + log.error(msg) + nuke.message(msg) bbox = self._asset_entity.get('data', {}).get('crop') @@ -781,10 +886,10 @@ class WorkfileSettings(object): ) except Exception as e: bbox = None - log.error( - "{}: {} \nFormat:Crop need to be set with dots, example: " - "0.0.1920.1080, /nSetting to default".format(__name__, e) - ) + msg = ("{}:{} \nFormat:Crop need to be set with dots, example: " + "0.0.1920.1080, /nSetting to default").format(__name__, e) + log.error(msg) + nuke.message(msg) existing_format = None for format in nuke.formats(): @@ -839,6 +944,26 @@ class WorkfileSettings(object): # add colorspace menu item self.set_colorspace() + def set_favorites(self): + projects_root = os.getenv("AVALON_PROJECTS") + work_dir = os.getenv("AVALON_WORKDIR") + asset = os.getenv("AVALON_ASSET") + project = os.getenv("AVALON_PROJECT") + hierarchy = os.getenv("AVALON_HIERARCHY") + favorite_items = OrderedDict() + + # project + favorite_items.update({"Project dir": os.path.join( + projects_root, project).replace("\\", "/")}) + # shot + favorite_items.update({"Shot dir": os.path.join( + projects_root, project, + hierarchy, asset).replace("\\", "/")}) + # workdir + favorite_items.update({"Work dir": work_dir}) + + set_context_favorites(favorite_items) + def get_hierarchical_attr(entity, attr, default=None): attr_parts = attr.split('.') @@ -962,7 +1087,7 @@ class BuildWorkfile(WorkfileSettings): "project": {"name": self._project["name"], "code": self._project["data"].get("code", '')}, "asset": self._asset or os.environ["AVALON_ASSET"], - "task": kwargs.get("task") or api.Session["AVALON_TASK"].lower(), + "task": kwargs.get("task") or api.Session["AVALON_TASK"], "hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(), "version": kwargs.get("version", {}).get("name", 1), "user": getpass.getuser(), @@ -1000,7 +1125,8 @@ class BuildWorkfile(WorkfileSettings): def process(self, regex_filter=None, version=None, - representations=["exr", "dpx", "lutJson", "mov", "preview"]): + representations=["exr", "dpx", "lutJson", "mov", + "preview", "png"]): """ A short description. @@ -1041,9 +1167,10 @@ class BuildWorkfile(WorkfileSettings): wn["render"].setValue(True) vn.setInput(0, wn) - bdn = self.create_backdrop(label="Render write \n\n\n\nOUTPUT", - color='0xcc1102ff', layer=-1, - nodes=[wn]) + # adding backdrop under write + self.create_backdrop(label="Render write \n\n\n\nOUTPUT", + color='0xcc1102ff', layer=-1, + nodes=[wn]) # move position self.position_up(4) @@ -1057,10 +1184,12 @@ class BuildWorkfile(WorkfileSettings): version=version, representations=representations) - log.info("__ subsets: `{}`".format(subsets)) + for name, subset in subsets.items(): + log.debug("___________________") + log.debug(name) + log.debug(subset["version"]) nodes_backdrop = list() - for name, subset in subsets.items(): if "lut" in name: continue @@ -1090,9 +1219,10 @@ class BuildWorkfile(WorkfileSettings): # move position self.position_right() - bdn = self.create_backdrop(label="Loaded Reads", - color='0x2d7702ff', layer=-1, - nodes=nodes_backdrop) + # adding backdrop under all read nodes + self.create_backdrop(label="Loaded Reads", + color='0x2d7702ff', layer=-1, + nodes=nodes_backdrop) def read_loader(self, representation): """ @@ -1240,8 +1370,8 @@ class ExporterReview: else: self.fname = os.path.basename(self.path_in) self.fhead = os.path.splitext(self.fname)[0] + "." - self.first_frame = self.instance.data.get("frameStart", None) - self.last_frame = self.instance.data.get("frameEnd", None) + self.first_frame = self.instance.data.get("frameStartHandle", None) + self.last_frame = self.instance.data.get("frameEndHandle", None) if "#" in self.fhead: self.fhead = self.fhead.replace("#", "")[:-1] @@ -1256,7 +1386,7 @@ class ExporterReview: 'ext': self.ext, 'files': self.file, "stagingDir": self.staging_dir, - "anatomy_template": "publish", + "anatomy_template": "render", "tags": [self.name.replace("_", "-")] + add_tags } diff --git a/pype/nuke/presets.py b/pype/nuke/presets.py index e0c12e2671..a413ccc878 100644 --- a/pype/nuke/presets.py +++ b/pype/nuke/presets.py @@ -1,6 +1,6 @@ from pype import api as pype from pypeapp import Anatomy, config - +import nuke log = pype.Logger().get_logger(__name__, "nuke") @@ -28,7 +28,7 @@ def get_node_dataflow_preset(**kwarg): families = kwarg.get("families", []) preset = kwarg.get("preset", None) # omit < 2.0.0v - assert any([host, cls]), log.error( + assert any([host, cls]), nuke.message( "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) nuke_dataflow = get_dataflow_preset().get(str(host), None) @@ -56,8 +56,10 @@ def get_node_colorspace_preset(**kwarg): families = kwarg.get("families", []) preset = kwarg.get("preset", None) # omit < 2.0.0v - assert any([host, cls]), log.error( - "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) + if not any([host, cls]): + msg = "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__) + log.error(msg) + nuke.message(msg) nuke_colorspace = get_colorspace_preset().get(str(host), None) nuke_colorspace_node = nuke_colorspace.get(str(cls), None) diff --git a/pype/nuke/utils.py b/pype/nuke/utils.py index 7583221696..aa5bc1077e 100644 --- a/pype/nuke/utils.py +++ b/pype/nuke/utils.py @@ -3,6 +3,23 @@ import nuke from avalon.nuke import lib as anlib +def set_context_favorites(favorites={}): + """ Addig favorite folders to nuke's browser + + Argumets: + favorites (dict): couples of {name:path} + """ + dir = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) + icon_path = os.path.join(dir, 'res', 'icons', 'folder-favorite3.png') + + for name, path in favorites.items(): + nuke.addFavoriteDir( + name, + path, + nuke.IMAGE | nuke.SCRIPT | nuke.GEO, + icon=icon_path) + + def get_node_outputs(node): ''' Return a dictionary of the nodes and pipes that are connected to node diff --git a/pype/nukestudio/__init__.py b/pype/nukestudio/__init__.py index 097f077e15..75825d188a 100644 --- a/pype/nukestudio/__init__.py +++ b/pype/nukestudio/__init__.py @@ -51,7 +51,7 @@ if os.getenv("PYBLISH_GUI", None): pyblish.register_gui(os.getenv("PYBLISH_GUI", None)) -def install(config): +def install(): """ Installing Nukestudio integration for avalon diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py index c71e2cb999..774a9d45bf 100644 --- a/pype/nukestudio/lib.py +++ b/pype/nukestudio/lib.py @@ -1,4 +1,5 @@ import os +import re import sys import hiero import pyblish.api @@ -7,7 +8,6 @@ from avalon.vendor.Qt import (QtWidgets, QtGui) import pype.api as pype from pypeapp import Logger - log = Logger().get_logger(__name__, "nukestudio") cached_process = None @@ -361,3 +361,449 @@ def CreateNukeWorkfile(nodes=None, nodes=nuke_script.getNodes(), **kwargs ) + + +class ClipLoader: + + active_bin = None + + def __init__(self, plugin_cls, context, sequence=None, track=None, **kwargs): + """ Initialize object + + Arguments: + plugin_cls (api.Loader): plugin object + context (dict): loader plugin context + sequnce (hiero.core.Sequence): sequence object + track (hiero.core.Track): track object + kwargs (dict)[optional]: possible keys: + projectBinPath: "path/to/binItem" + hieroWorkfileName: "name_of_hiero_project_file_no_extension" + + """ + self.cls = plugin_cls + self.context = context + self.kwargs = kwargs + self.active_project = self._get_active_project() + self.project_bin = self.active_project.clipsBin() + + self.data = dict() + + assert self._set_data(), str("Cannot Load selected data, look into " + "database or call your supervisor") + + # inject asset data to representation dict + self._get_asset_data() + log.debug("__init__ self.data: `{}`".format(self.data)) + + # add active components to class + self.active_sequence = self._get_active_sequence(sequence) + self.active_track = self._get_active_track(track) + + def _set_data(self): + """ Gets context and convert it to self.data + data structure: + { + "name": "assetName_subsetName_representationName" + "path": "path/to/file/created/by/get_repr..", + "binPath": "projectBinPath", + } + """ + # create name + repr = self.context["representation"] + repr_cntx = repr["context"] + asset = str(repr_cntx["asset"]) + subset = str(repr_cntx["subset"]) + representation = str(repr_cntx["representation"]) + self.data["clip_name"] = "_".join([asset, subset, representation]) + self.data["track_name"] = "_".join([subset, representation]) + + # gets file path + file = self.cls.fname + if not file: + repr_id = repr["_id"] + log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return None + self.data["path"] = file.replace("\\", "/") + + # convert to hashed path + if repr_cntx.get("frame"): + self._fix_path_hashes() + + # solve project bin structure path + hierarchy = str("/".join(( + "Loader", + repr_cntx["hierarchy"].replace("\\", "/"), + asset + ))) + + self.data["binPath"] = self.kwargs.get( + "projectBinPath", + hierarchy + ) + + return True + + def _fix_path_hashes(self): + """ Convert file path where it is needed padding with hashes + """ + file = self.data["path"] + if "#" not in file: + frame = self.context["representation"]["context"].get("frame") + padding = len(frame) + file = file.replace(frame, "#"*padding) + self.data["path"] = file + + def _get_active_project(self): + """ Get hiero active project object + """ + fname = self.kwargs.get("hieroWorkfileName", "") + + return next((p for p in hiero.core.projects() + if fname in p.name()), + hiero.core.projects()[-1]) + + def _get_asset_data(self): + """ Get all available asset data + + joint `data` key with asset.data dict into the representaion + + """ + asset_name = self.context["representation"]["context"]["asset"] + self.data["assetData"] = pype.get_asset(asset_name)["data"] + + def _make_project_bin(self, hierarchy): + """ Creare bins by given hierarchy path + + It will also make sure no duplicit bins will be created + + Arguments: + hierarchy (str): path devided by slashes "bin0/bin1/bin2" + + Returns: + bin (hiero.core.BinItem): with the bin to be used for mediaItem + """ + if self.active_bin: + return self.active_bin + + assert hierarchy != "", "Please add hierarchy!" + log.debug("__ hierarchy1: `{}`".format(hierarchy)) + if '/' in hierarchy: + hierarchy = hierarchy.split('/') + else: + hierarchy = [hierarchy] + + parent_bin = None + for i, name in enumerate(hierarchy): + # if first index and list is more then one long + if i == 0: + bin = next((bin for bin in self.project_bin.bins() + if name in bin.name()), None) + if not bin: + bin = hiero.core.Bin(name) + self.project_bin.addItem(bin) + log.debug("__ bin.name: `{}`".format(bin.name())) + parent_bin = bin + + # if second to prelast + elif (i >= 1) and (i <= (len(hierarchy) - 1)): + bin = next((bin for bin in parent_bin.bins() + if name in bin.name()), None) + if not bin: + bin = hiero.core.Bin(name) + parent_bin.addItem(bin) + + parent_bin = bin + + return parent_bin + + def _make_track_item(self): + """ Create track item with """ + pass + + def _set_clip_color(self, last_version=True): + """ Sets color of clip on clip/track item + + Arguments: + last_version (bool): True = green | False = red + """ + pass + + def _set_container_tag(self, item, metadata): + """ Sets container tag to given clip/track item + + Arguments: + item (hiero.core.BinItem or hiero.core.TrackItem) + metadata (dict): data to be added to tag + """ + pass + + def _get_active_sequence(self, sequence): + if not sequence: + return hiero.ui.activeSequence() + else: + return sequence + + def _get_active_track(self, track): + if not track: + track_name = self.data["track_name"] + else: + track_name = track.name() + + track_pass = next( + (t for t in self.active_sequence.videoTracks() + if t.name() in track_name), None + ) + + if not track_pass: + track_pass = hiero.core.VideoTrack(track_name) + self.active_sequence.addTrack(track_pass) + + return track_pass + + def load(self): + log.debug("__ active_project: `{}`".format(self.active_project)) + log.debug("__ active_sequence: `{}`".format(self.active_sequence)) + + # create project bin for the media to be imported into + self.active_bin = self._make_project_bin(self.data["binPath"]) + log.debug("__ active_bin: `{}`".format(self.active_bin)) + + log.debug("__ version.data: `{}`".format( + self.context["version"]["data"])) + + # create mediaItem in active project bin + # create clip media + media = hiero.core.MediaSource(self.data["path"]) + media_duration = int(media.duration()) + + handle_start = int(self.data["assetData"]["handleStart"]) + handle_end = int(self.data["assetData"]["handleEnd"]) + + clip_in = int(self.data["assetData"]["clipIn"]) + clip_out = int(self.data["assetData"]["clipOut"]) + + log.debug("__ media_duration: `{}`".format(media_duration)) + log.debug("__ handle_start: `{}`".format(handle_start)) + log.debug("__ handle_end: `{}`".format(handle_end)) + log.debug("__ clip_in: `{}`".format(clip_in)) + log.debug("__ clip_out: `{}`".format(clip_out)) + + # check if slate is included + # either in version data families or by calculating frame diff + slate_on = next( + (f for f in self.context["version"]["data"]["families"] + if "slate" in f), + None) or bool((( + clip_out - clip_in + 1) + handle_start + handle_end + ) - media_duration) + + log.debug("__ slate_on: `{}`".format(slate_on)) + + # calculate slate differences + if slate_on: + media_duration -= 1 + handle_start += 1 + + fps = self.data["assetData"]["fps"] + + # create Clip from Media + _clip = hiero.core.Clip(media) + _clip.setName(self.data["clip_name"]) + + # add Clip to bin if not there yet + if self.data["clip_name"] not in [ + b.name() + for b in self.active_bin.items()]: + binItem = hiero.core.BinItem(_clip) + self.active_bin.addItem(binItem) + + _source = next((item for item in self.active_bin.items() + if self.data["clip_name"] in item.name()), None) + + if not _source: + log.warning("Problem with created Source clip: `{}`".format( + self.data["clip_name"])) + + version = next((s for s in _source.items()), None) + clip = version.item() + + # add to track as clip item + track_item = hiero.core.TrackItem( + self.data["clip_name"], hiero.core.TrackItem.kVideo) + + track_item.setSource(clip) + + track_item.setSourceIn(handle_start) + track_item.setTimelineIn(clip_in) + + track_item.setSourceOut(media_duration - handle_end) + track_item.setTimelineOut(clip_out) + track_item.setPlaybackSpeed(1) + self.active_track.addTrackItem(track_item) + + log.info("Loading clips: `{}`".format(self.data["clip_name"])) + + +def create_nk_workfile_clips(nk_workfiles, seq=None): + ''' + nk_workfile is list of dictionaries like: + [{ + 'path': 'P:/Jakub_testy_pipeline/test_v01.nk', + 'name': 'test', + 'handleStart': 15, # added asymetrically to handles + 'handleEnd': 10, # added asymetrically to handles + "clipIn": 16, + "frameStart": 991, + "frameEnd": 1023, + 'task': 'Comp-tracking', + 'work_dir': 'VFX_PR', + 'shot': '00010' + }] + ''' + + proj = hiero.core.projects()[-1] + root = proj.clipsBin() + + if not seq: + seq = hiero.core.Sequence('NewSequences') + root.addItem(hiero.core.BinItem(seq)) + # todo will ned to define this better + # track = seq[1] # lazy example to get a destination# track + clips_lst = [] + for nk in nk_workfiles: + task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']]) + bin = create_bin_in_project(task_path, proj) + + if nk['task'] not in seq.videoTracks(): + track = hiero.core.VideoTrack(nk['task']) + seq.addTrack(track) + else: + track = seq.tracks(nk['task']) + + # create clip media + media = hiero.core.MediaSource(nk['path']) + media_in = int(media.startTime() or 0) + media_duration = int(media.duration() or 0) + + handle_start = nk.get("handleStart") + handle_end = nk.get("handleEnd") + + if media_in: + source_in = media_in + handle_start + else: + source_in = nk["frameStart"] + handle_start + + if media_duration: + source_out = (media_in + media_duration - 1) - handle_end + else: + source_out = nk["frameEnd"] - handle_end + + source = hiero.core.Clip(media) + + name = os.path.basename(os.path.splitext(nk['path'])[0]) + split_name = split_by_client_version(name)[0] or name + + # add to bin as clip item + items_in_bin = [b.name() for b in bin.items()] + if split_name not in items_in_bin: + binItem = hiero.core.BinItem(source) + bin.addItem(binItem) + + new_source = [ + item for item in bin.items() if split_name in item.name() + ][0].items()[0].item() + + # add to track as clip item + trackItem = hiero.core.TrackItem( + split_name, hiero.core.TrackItem.kVideo) + trackItem.setSource(new_source) + trackItem.setSourceIn(source_in) + trackItem.setSourceOut(source_out) + trackItem.setTimelineIn(nk["clipIn"]) + trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in)) + track.addTrackItem(trackItem) + clips_lst.append(trackItem) + + return clips_lst + + +def create_bin_in_project(bin_name='', project=''): + ''' + create bin in project and + if the bin_name is "bin1/bin2/bin3" it will create whole depth + ''' + + if not project: + # get the first loaded project + project = hiero.core.projects()[-1] + if not bin_name: + return None + if '/' in bin_name: + bin_name = bin_name.split('/') + else: + bin_name = [bin_name] + + clipsBin = project.clipsBin() + + done_bin_lst = [] + for i, b in enumerate(bin_name): + if i == 0 and len(bin_name) > 1: + if b in [bin.name() for bin in clipsBin.bins()]: + bin = [bin for bin in clipsBin.bins() if b in bin.name()][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + clipsBin.addItem(create_bin) + done_bin_lst.append(create_bin) + + elif i >= 1 and i < len(bin_name) - 1: + if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: + bin = [ + bin for bin in done_bin_lst[i - 1].bins() + if b in bin.name() + ][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + done_bin_lst[i - 1].addItem(create_bin) + done_bin_lst.append(create_bin) + + elif i == len(bin_name) - 1: + if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: + bin = [ + bin for bin in done_bin_lst[i - 1].bins() + if b in bin.name() + ][0] + done_bin_lst.append(bin) + else: + create_bin = hiero.core.Bin(b) + done_bin_lst[i - 1].addItem(create_bin) + done_bin_lst.append(create_bin) + # print [bin.name() for bin in clipsBin.bins()] + return done_bin_lst[-1] + + +def split_by_client_version(string): + regex = r"[/_.]v\d+" + try: + matches = re.findall(regex, string, re.IGNORECASE) + return string.split(matches[0]) + except Exception as e: + print(e) + return None + + +# nk_workfiles = [{ +# 'path': 'C:/Users/hubert/_PYPE_testing/projects/D001_projectx/episodes/ep120/ep120sq01/120sh020/publish/plates/platesMain/v023/prjx_120sh020_platesMain_v023.nk', +# 'name': '120sh020_platesMain', +# 'handles': 10, +# 'handleStart': 10, +# 'handleEnd': 10, +# "clipIn": 16, +# "frameStart": 991, +# "frameEnd": 1023, +# 'task': 'platesMain', +# 'work_dir': 'shots', +# 'shot': '120sh020' +# }] diff --git a/pype/nukestudio/menu.py b/pype/nukestudio/menu.py index a996389524..ee9af44e74 100644 --- a/pype/nukestudio/menu.py +++ b/pype/nukestudio/menu.py @@ -5,13 +5,6 @@ from pypeapp import Logger from avalon.api import Session from hiero.ui import findMenuAction -# this way we secure compatibility between nuke 10 and 11 -try: - from PySide.QtGui import * -except Exception: - from PySide2.QtGui import * - from PySide2.QtWidgets import * - from .tags import add_tags_from_presets from .lib import ( @@ -50,14 +43,8 @@ def install(): """ # here is the best place to add menu - from avalon.tools import ( - creator, - publish, - cbloader, - cbsceneinventory, - contextmanager, - libraryloader - ) + from avalon.tools import publish, cbloader + from avalon.vendor.Qt import QtGui menu_name = os.environ['AVALON_LABEL'] @@ -67,94 +54,57 @@ def install(): self._change_context_menu = context_label - # Grab Hiero's MenuBar - M = hiero.ui.menuBar() - try: check_made_menu = findMenuAction(menu_name) except Exception: - pass + check_made_menu = None if not check_made_menu: - menu = M.addMenu(menu_name) + # Grab Hiero's MenuBar + menu = hiero.ui.menuBar().addMenu(menu_name) else: menu = check_made_menu.menu() - actions = [ - { - 'parent': context_label, - 'action': QAction('Set Context', None), - 'function': contextmanager.show, - 'icon': QIcon('icons:Position.png') - }, - "separator", - { - 'action': QAction("Work Files...", None), - 'function': set_workfiles, - 'icon': QIcon('icons:Position.png') - }, - { - 'action': QAction('Create Default Tags..', None), - 'function': add_tags_from_presets, - 'icon': QIcon('icons:Position.png') - }, - "separator", - # { - # 'action': QAction('Create...', None), - # 'function': creator.show, - # 'icon': QIcon('icons:ColorAdd.png') - # }, - # { - # 'action': QAction('Load...', None), - # 'function': cbloader.show, - # 'icon': QIcon('icons:CopyRectangle.png') - # }, - { - 'action': QAction('Publish...', None), - 'function': publish.show, - 'icon': QIcon('icons:Output.png') - }, - # { - # 'action': QAction('Manage...', None), - # 'function': cbsceneinventory.show, - # 'icon': QIcon('icons:ModifyMetaData.png') - # }, - { - 'action': QAction('Library...', None), - 'function': libraryloader.show, - 'icon': QIcon('icons:ColorAdd.png') - }, - "separator", - { - 'action': QAction('Reload pipeline...', None), - 'function': reload_config, - 'icon': QIcon('icons:ColorAdd.png') - }] + context_label_action = menu.addAction(context_label) + context_label_action.setEnabled(False) - # Create menu items - for a in actions: - add_to_menu = menu - if isinstance(a, dict): - # create action - for k in a.keys(): - if 'parent' in k: - submenus = [sm for sm in a[k].split('/')] - submenu = None - for sm in submenus: - if submenu: - submenu.addMenu(sm) - else: - submenu = menu.addMenu(sm) - add_to_menu = submenu - if 'action' in k: - action = a[k] - elif 'function' in k: - action.triggered.connect(a[k]) - elif 'icon' in k: - action.setIcon(a[k]) + menu.addSeparator() - # add action to menu - add_to_menu.addAction(action) - hiero.ui.registerAction(action) - elif isinstance(a, str): - add_to_menu.addSeparator() + workfiles_action = menu.addAction("Work Files...") + workfiles_action.setIcon(QtGui.QIcon("icons:Position.png")) + workfiles_action.triggered.connect(set_workfiles) + + default_tags_action = menu.addAction("Create Default Tags...") + default_tags_action.setIcon(QtGui.QIcon("icons:Position.png")) + default_tags_action.triggered.connect(add_tags_from_presets) + + menu.addSeparator() + + publish_action = menu.addAction("Publish...") + publish_action.setIcon(QtGui.QIcon("icons:Output.png")) + publish_action.triggered.connect( + lambda *args: publish.show(hiero.ui.mainWindow()) + ) + + loader_action = menu.addAction("Load...") + loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png")) + loader_action.triggered.connect(cbloader.show) + menu.addSeparator() + + reload_action = menu.addAction("Reload pipeline...") + reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) + reload_action.triggered.connect(reload_config) + + # Is this required? + # hiero.ui.registerAction(context_label_action) + # hiero.ui.registerAction(workfiles_action) + # hiero.ui.registerAction(default_tags_action) + # hiero.ui.registerAction(publish_action) + # hiero.ui.registerAction(loader_action) + # hiero.ui.registerAction(reload_action) + + self.context_label_action = context_label_action + self.workfile_actions = workfiles_action + self.default_tags_action = default_tags_action + self.publish_action = publish_action + self.reload_action = reload_action diff --git a/pype/nukestudio/precomp_clip.py b/pype/nukestudio/precomp_clip.py deleted file mode 100644 index b544b6e654..0000000000 --- a/pype/nukestudio/precomp_clip.py +++ /dev/null @@ -1,188 +0,0 @@ -import hiero.core -import hiero.ui - -import re -import os - - -def create_nk_script_clips(script_lst, seq=None): - ''' - nk_scripts is list of dictionaries like: - [{ - 'path': 'P:/Jakub_testy_pipeline/test_v01.nk', - 'name': 'test', - 'handles': 10, - 'handleStart': 15, # added asymetrically to handles - 'handleEnd': 10, # added asymetrically to handles - "clipIn": 16, - "frameStart": 991, - "frameEnd": 1023, - 'task': 'Comp-tracking', - 'work_dir': 'VFX_PR', - 'shot': '00010' - }] - ''' - - proj = hiero.core.projects()[-1] - root = proj.clipsBin() - - if not seq: - seq = hiero.core.Sequence('NewSequences') - root.addItem(hiero.core.BinItem(seq)) - # todo will ned to define this better - # track = seq[1] # lazy example to get a destination# track - clips_lst = [] - for nk in script_lst: - task_path = '/'.join([nk['work_dir'], nk['shot'], nk['task']]) - bin = create_bin_in_project(task_path, proj) - - if nk['task'] not in seq.videoTracks(): - track = hiero.core.VideoTrack(nk['task']) - seq.addTrack(track) - else: - track = seq.tracks(nk['task']) - - # create slip media - print("__ path: `{}`".format(nk['path'])) - - media = hiero.core.MediaSource(nk['path']) - media_in = int(media.startTime() or 0) - media_duration = int(media.duration() or 0) - - handle_start = nk.get("handleStart") or nk['handles'] - handle_end = nk.get("handleEnd") or nk['handles'] - - if media_in: - source_in = media_in + handle_start - else: - source_in = nk["frameStart"] + handle_start - - if media_duration: - source_out = (media_in + media_duration - 1) - handle_end - else: - source_out = nk["frameEnd"] - handle_end - - print("__ media: `{}`".format(media)) - print("__ media_in: `{}`".format(media_in)) - print("__ media_duration : `{}`".format(media_duration)) - print("__ source_in: `{}`".format(source_in)) - print("__ source_out : `{}`".format(source_out)) - - source = hiero.core.Clip(media) - print("__ source : `{}`".format(source)) - print("__ source.sourceIn(): `{}`".format(source.sourceIn())) - - name = os.path.basename(os.path.splitext(nk['path'])[0]) - split_name = split_by_client_version(name)[0] or name - - print("__ split_name: `{}`".format(split_name)) - - # add to bin as clip item - items_in_bin = [b.name() for b in bin.items()] - if split_name not in items_in_bin: - binItem = hiero.core.BinItem(source) - bin.addItem(binItem) - - print("__ bin.items(): `{}`".format(bin.items())) - - new_source = [ - item for item in bin.items() if split_name in item.name() - ][0].items()[0].item() - - print("__ new_source: `{}`".format(new_source)) - print("__ new_source: `{}`".format(new_source)) - - # add to track as clip item - trackItem = hiero.core.TrackItem(split_name, hiero.core.TrackItem.kVideo) - trackItem.setSource(new_source) - trackItem.setSourceIn(source_in) - trackItem.setSourceOut(source_out) - trackItem.setSourceIn(source_in) - trackItem.setTimelineIn(nk["clipIn"]) - trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in)) - track.addTrackItem(trackItem) - track.addTrackItem(trackItem) - clips_lst.append(trackItem) - - return clips_lst - - -def create_bin_in_project(bin_name='', project=''): - ''' - create bin in project and - if the bin_name is "bin1/bin2/bin3" it will create whole depth - ''' - - if not project: - # get the first loaded project - project = hiero.core.projects()[-1] - if not bin_name: - return None - if '/' in bin_name: - bin_name = bin_name.split('/') - else: - bin_name = [bin_name] - - clipsBin = project.clipsBin() - - done_bin_lst = [] - for i, b in enumerate(bin_name): - if i == 0 and len(bin_name) > 1: - if b in [bin.name() for bin in clipsBin.bins()]: - bin = [bin for bin in clipsBin.bins() if b in bin.name()][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - clipsBin.addItem(create_bin) - done_bin_lst.append(create_bin) - - elif i >= 1 and i < len(bin_name) - 1: - if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: - bin = [ - bin for bin in done_bin_lst[i - 1].bins() - if b in bin.name() - ][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - done_bin_lst[i - 1].addItem(create_bin) - done_bin_lst.append(create_bin) - - elif i == len(bin_name) - 1: - if b in [bin.name() for bin in done_bin_lst[i - 1].bins()]: - bin = [ - bin for bin in done_bin_lst[i - 1].bins() - if b in bin.name() - ][0] - done_bin_lst.append(bin) - else: - create_bin = hiero.core.Bin(b) - done_bin_lst[i - 1].addItem(create_bin) - done_bin_lst.append(create_bin) - # print [bin.name() for bin in clipsBin.bins()] - return done_bin_lst[-1] - - -def split_by_client_version(string): - regex = r"[/_.]v\d+" - try: - matches = re.findall(regex, string, re.IGNORECASE) - return string.split(matches[0]) - except Exception as e: - print(e) - return None - - -script_lst = [{ - 'path': 'C:/Users/hubert/_PYPE_testing/projects/D001_projectx/episodes/ep120/ep120sq01/120sh020/publish/plates/platesMain/v023/prjx_120sh020_platesMain_v023.nk', - 'name': '120sh020_platesMain', - 'handles': 10, - 'handleStart': 10, - 'handleEnd': 10, - "clipIn": 16, - "frameStart": 991, - "frameEnd": 1023, - 'task': 'platesMain', - 'work_dir': 'shots', - 'shot': '120sh020' -}] diff --git a/pype/nukestudio/workio.py b/pype/nukestudio/workio.py index c7484b826b..1c7c77dab9 100644 --- a/pype/nukestudio/workio.py +++ b/pype/nukestudio/workio.py @@ -73,5 +73,5 @@ def current_file(): return normalised -def work_root(): - return os.path.normpath(api.Session["AVALON_WORKDIR"]).replace("\\", "/") +def work_root(session): + return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_comments.py b/pype/plugins/ftrack/_unused_publish/integrate_ftrack_comments.py similarity index 92% rename from pype/plugins/ftrack/publish/integrate_ftrack_comments.py rename to pype/plugins/ftrack/_unused_publish/integrate_ftrack_comments.py index 9d0b7b3ab9..4be9f7fc3a 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_comments.py +++ b/pype/plugins/ftrack/_unused_publish/integrate_ftrack_comments.py @@ -7,8 +7,9 @@ class IntegrateFtrackComments(pyblish.api.InstancePlugin): """Create comments in Ftrack.""" order = pyblish.api.IntegratorOrder - label = "Integrate Comments to Ftrack." + label = "Integrate Comments to Ftrack" families = ["shot"] + enabled = False def process(self, instance): session = instance.context.data["ftrackSession"] diff --git a/pype/plugins/ftrack/publish/collect_ftrack_api.py b/pype/plugins/ftrack/publish/collect_ftrack_api.py index d09baec676..0aad3b2433 100644 --- a/pype/plugins/ftrack/publish/collect_ftrack_api.py +++ b/pype/plugins/ftrack/publish/collect_ftrack_api.py @@ -23,25 +23,79 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): # Collect session session = ftrack_api.Session() + self.log.debug("Ftrack user: \"{0}\"".format(session.api_user)) context.data["ftrackSession"] = session # Collect task - project = os.environ.get('AVALON_PROJECT', '') - asset = os.environ.get('AVALON_ASSET', '') - task = os.environ.get('AVALON_TASK', None) - self.log.debug(task) + project_name = os.environ.get('AVALON_PROJECT', '') + asset_name = os.environ.get('AVALON_ASSET', '') + task_name = os.environ.get('AVALON_TASK', None) + + # Find project entity + project_query = 'Project where full_name is "{0}"'.format(project_name) + self.log.debug("Project query: < {0} >".format(project_query)) + project_entity = list(session.query(project_query).all()) + if len(project_entity) == 0: + raise AssertionError( + "Project \"{0}\" not found in Ftrack.".format(project_name) + ) + # QUESTION Is possible to happen? + elif len(project_entity) > 1: + raise AssertionError(( + "Found more than one project with name \"{0}\" in Ftrack." + ).format(project_name)) + + project_entity = project_entity[0] + self.log.debug("Project found: {0}".format(project_entity)) + + # Find asset entity + entity_query = ( + 'TypedContext where project_id is "{0}"' + ' and name is "{1}"' + ).format(project_entity["id"], asset_name) + self.log.debug("Asset entity query: < {0} >".format(entity_query)) + asset_entities = [] + for entity in session.query(entity_query).all(): + # Skip tasks + if entity.entity_type.lower() != "task": + asset_entities.append(entity) + + if len(asset_entities) == 0: + raise AssertionError(( + "Entity with name \"{0}\" not found" + " in Ftrack project \"{1}\"." + ).format(asset_name, project_name)) + + elif len(asset_entities) > 1: + raise AssertionError(( + "Found more than one entity with name \"{0}\"" + " in Ftrack project \"{1}\"." + ).format(asset_name, project_name)) + + asset_entity = asset_entities[0] + self.log.debug("Asset found: {0}".format(asset_entity)) + + # Find task entity if task is set + if task_name: + task_query = ( + 'Task where name is "{0}" and parent_id is "{1}"' + ).format(task_name, asset_entity["id"]) + self.log.debug("Task entity query: < {0} >".format(task_query)) + task_entity = session.query(task_query).first() + if not task_entity: + self.log.warning( + "Task entity with name \"{0}\" was not found.".format( + task_name + ) + ) + else: + self.log.debug("Task entity found: {0}".format(task_entity)) - if task: - result = session.query('Task where\ - project.full_name is "{0}" and\ - name is "{1}" and\ - parent.name is "{2}"'.format(project, task, asset)).one() - context.data["ftrackTask"] = result else: - result = session.query('TypedContext where\ - project.full_name is "{0}" and\ - name is "{1}"'.format(project, asset)).one() - context.data["ftrackEntity"] = result + task_entity = None + self.log.warning("Task name is not set.") - self.log.info(result) + context.data["ftrackProject"] = asset_entity + context.data["ftrackEntity"] = asset_entity + context.data["ftrackTask"] = task_entity diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_api.py b/pype/plugins/ftrack/publish/integrate_ftrack_api.py index c51685f84d..cd94b2a150 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_api.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_api.py @@ -77,6 +77,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): info_msg = "Created new {entity_type} with data: {data}" info_msg += ", metadata: {metadata}." + used_asset_versions = [] # Iterate over components and publish for data in instance.data.get("ftrackComponentsList", []): @@ -148,6 +149,9 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): assetversion_cust_attrs = _assetversion_data.pop( "custom_attributes", {} ) + asset_version_comment = _assetversion_data.pop( + "comment", None + ) assetversion_data.update(_assetversion_data) assetversion_entity = session.query( @@ -185,6 +189,20 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): existing_assetversion_metadata.update(assetversion_metadata) assetversion_entity["metadata"] = existing_assetversion_metadata + # Add comment + if asset_version_comment: + assetversion_entity["comment"] = asset_version_comment + try: + session.commit() + except Exception: + session.rollback() + self.log.warning(( + "Comment was not possible to set for AssetVersion" + "\"{0}\". Can't set it's value to: \"{1}\"" + ).format( + assetversion_entity["id"], str(asset_version_comment) + )) + # Adding Custom Attributes for attr, val in assetversion_cust_attrs.items(): if attr in assetversion_entity["custom_attributes"]: @@ -369,3 +387,14 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): tp, value, tb = sys.exc_info() session.rollback() six.reraise(tp, value, tb) + + if assetversion_entity not in used_asset_versions: + used_asset_versions.append(assetversion_entity) + + asset_versions_key = "ftrackIntegratedAssetVersions" + if asset_versions_key not in instance.data: + instance.data[asset_versions_key] = [] + + for asset_version in used_asset_versions: + if asset_version not in instance.data[asset_versions_key]: + instance.data[asset_versions_key].append(asset_version) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py index 78583b0a2f..591dcf0dc2 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py @@ -73,9 +73,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): ''' start_frame = 0 end_frame = 1 - if 'endFrameReview' in comp and 'startFrameReview' in comp: + if 'frameEndFtrack' in comp and 'frameStartFtrack' in comp: end_frame += ( - comp['endFrameReview'] - comp['startFrameReview'] + comp['frameEndFtrack'] - comp['frameStartFtrack'] ) else: end_frame += ( @@ -127,7 +127,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): # Add custom attributes for AssetVersion assetversion_cust_attrs = {} - intent_val = instance.context.data.get("intent") + intent_val = instance.context.data.get("intent", {}).get("value") if intent_val: assetversion_cust_attrs["intent"] = intent_val diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py new file mode 100644 index 0000000000..679010ca58 --- /dev/null +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -0,0 +1,143 @@ +import sys +import json +import pyblish.api +import six + + +class IntegrateFtrackNote(pyblish.api.InstancePlugin): + """Create comments in Ftrack.""" + + # Must be after integrate asset new + order = pyblish.api.IntegratorOrder + 0.4999 + label = "Integrate Ftrack note" + families = ["ftrack"] + optional = True + + # Can be set in presets: + # - Allows only `intent` and `comment` keys + note_with_intent_template = "{intent}: {comment}" + # - note label must exist in Ftrack + note_labels = [] + + def get_intent_label(self, session, intent_value): + if not intent_value: + return + + intent_configurations = session.query( + "CustomAttributeConfiguration where key is intent" + ).all() + if not intent_configurations: + return + + intent_configuration = intent_configurations[0] + if len(intent_configuration) > 1: + self.log.warning(( + "Found more than one `intent` custom attribute." + " Using first found." + )) + + config = intent_configuration.get("config") + if not config: + return + + configuration = json.loads(config) + items = configuration.get("data") + if not items: + return + + if sys.version_info[0] < 3: + string_type = basestring + else: + string_type = str + + if isinstance(items, string_type): + items = json.loads(items) + + intent_label = None + for item in items: + if item["value"] == intent_value: + intent_label = item["menu"] + break + + return intent_label + + def process(self, instance): + comment = (instance.context.data.get("comment") or "").strip() + if not comment: + self.log.info("Comment is not set.") + return + + self.log.debug("Comment is set to `{}`".format(comment)) + + session = instance.context.data["ftrackSession"] + + intent_val = instance.context.data.get("intent", {}).get("value") + intent_label = instance.context.data.get("intent", {}).get("label") + final_label = None + if intent_val: + final_label = self.get_intent_label(session, intent_val) + if final_label is None: + final_label = intent_label + + # if intent label is set then format comment + # - it is possible that intent_label is equal to "" (empty string) + if final_label: + msg = "Intent label is set to `{}`.".format(final_label) + comment = self.note_with_intent_template.format(**{ + "intent": final_label, + "comment": comment + }) + + elif intent_val: + msg = ( + "Intent is set to `{}` and was not added" + " to comment because label is set to `{}`." + ).format(intent_val, final_label) + + else: + msg = "Intent is not set." + + self.log.debug(msg) + + asset_versions_key = "ftrackIntegratedAssetVersions" + asset_versions = instance.data.get(asset_versions_key) + if not asset_versions: + self.log.info("There are any integrated AssetVersions") + return + + user = session.query( + "User where username is \"{}\"".format(session.api_user) + ).first() + if not user: + self.log.warning( + "Was not able to query current User {}".format( + session.api_user + ) + ) + + labels = [] + if self.note_labels: + all_labels = session.query("NoteLabel").all() + labels_by_low_name = {lab["name"].lower(): lab for lab in all_labels} + for _label in self.note_labels: + label = labels_by_low_name.get(_label.lower()) + if not label: + self.log.warning( + "Note Label `{}` was not found.".format(_label) + ) + continue + + labels.append(label) + + for asset_version in asset_versions: + asset_version.create_note(comment, author=user, labels=labels) + + try: + session.commit() + self.log.debug("Note added to AssetVersion \"{}\"".format( + str(asset_version) + )) + except Exception: + tp, value, tb = sys.exc_info() + session.rollback() + six.reraise(tp, value, tb) diff --git a/pype/plugins/ftrack/publish/integrate_remove_components.py b/pype/plugins/ftrack/publish/integrate_remove_components.py index bad50f7200..26cac0f1ae 100644 --- a/pype/plugins/ftrack/publish/integrate_remove_components.py +++ b/pype/plugins/ftrack/publish/integrate_remove_components.py @@ -11,13 +11,13 @@ class IntegrateCleanComponentData(pyblish.api.InstancePlugin): label = 'Clean component data' families = ["ftrack"] optional = True - active = True + active = False def process(self, instance): for comp in instance.data['representations']: self.log.debug('component {}'.format(comp)) - + if "%" in comp['published_path'] or "#" in comp['published_path']: continue diff --git a/pype/plugins/global/_publish_unused/collect_deadline_user.py b/pype/plugins/global/_publish_unused/collect_deadline_user.py deleted file mode 100644 index f4d13a0545..0000000000 --- a/pype/plugins/global/_publish_unused/collect_deadline_user.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -import subprocess - -import pyblish.api - -CREATE_NO_WINDOW = 0x08000000 - - -def deadline_command(cmd): - # Find Deadline - path = os.environ.get("DEADLINE_PATH", None) - assert path is not None, "Variable 'DEADLINE_PATH' must be set" - - executable = os.path.join(path, "deadlinecommand") - if os.name == "nt": - executable += ".exe" - assert os.path.exists( - executable), "Deadline executable not found at %s" % executable - assert cmd, "Must have a command" - - query = (executable, cmd) - - process = subprocess.Popen(query, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - creationflags=CREATE_NO_WINDOW) - out, err = process.communicate() - - return out - - -class CollectDeadlineUser(pyblish.api.ContextPlugin): - """Retrieve the local active Deadline user""" - - order = pyblish.api.CollectorOrder + 0.499 - label = "Deadline User" - - hosts = ['maya', 'fusion', 'nuke'] - families = [ - "renderlayer", - "saver.deadline", - "imagesequence" - ] - - - def process(self, context): - """Inject the current working file""" - user = None - try: - user = deadline_command("GetCurrentUserName").strip() - except: - self.log.warning("Deadline command seems not to be working") - - if not user: - self.log.warning("No Deadline user found. " - "Do you have Deadline installed?") - return - - self.log.info("Found Deadline user: {}".format(user)) - context.data['deadlineUser'] = user diff --git a/pype/plugins/global/_publish_unused/collect_json.py b/pype/plugins/global/_publish_unused/collect_json.py deleted file mode 100644 index dc5bfb9c81..0000000000 --- a/pype/plugins/global/_publish_unused/collect_json.py +++ /dev/null @@ -1,127 +0,0 @@ -import os -import json -import re - -import pyblish.api -import clique - - -class CollectJSON(pyblish.api.ContextPlugin): - """ Collecting the json files in current directory. """ - - label = "JSON" - order = pyblish.api.CollectorOrder - hosts = ['maya'] - - def version_get(self, string, prefix): - """ Extract version information from filenames. Code from Foundry"s - nukescripts.version_get() - """ - - regex = r"[/_.]{}\d+".format(prefix) - matches = re.findall(regex, string, re.IGNORECASE) - - if not len(matches): - msg = "No '_{}#' found in '{}'".format(prefix, string) - raise ValueError(msg) - return matches[-1:][0][1], re.search(r"\d+", matches[-1:][0]).group() - - def process(self, context): - current_file = context.data.get("currentFile", '') - # Skip if current file is not a directory - if not os.path.isdir(current_file): - return - - # Traverse directory and collect collections from json files. - instances = [] - for root, dirs, files in os.walk(current_file): - for f in files: - if f.endswith(".json"): - with open(os.path.join(root, f)) as json_data: - for data in json.load(json_data): - instances.append(data) - - # Validate instance based on supported families. - valid_families = ["img", "cache", "scene", "mov"] - valid_data = [] - for data in instances: - families = data.get("families", []) + [data["family"]] - family_type = list(set(families) & set(valid_families)) - if family_type: - valid_data.append(data) - - # Create existing output instance. - scanned_dirs = [] - files = [] - collections = [] - for data in valid_data: - if "collection" not in data.keys(): - continue - if data["collection"] is None: - continue - - instance_collection = clique.parse(data["collection"]) - - try: - version = self.version_get( - os.path.basename(instance_collection.format()), "v" - )[1] - except KeyError: - # Ignore any output that is not versioned - continue - - # Getting collections of all previous versions and current version - for count in range(1, int(version) + 1): - - # Generate collection - version_string = "v" + str(count).zfill(len(version)) - head = instance_collection.head.replace( - "v" + version, version_string - ) - collection = clique.Collection( - head=head.replace("\\", "/"), - padding=instance_collection.padding, - tail=instance_collection.tail - ) - collection.version = count - - # Scan collection directory - scan_dir = os.path.dirname(collection.head) - if scan_dir not in scanned_dirs and os.path.exists(scan_dir): - for f in os.listdir(scan_dir): - file_path = os.path.join(scan_dir, f) - files.append(file_path.replace("\\", "/")) - scanned_dirs.append(scan_dir) - - # Match files to collection and add - for f in files: - if collection.match(f): - collection.add(f) - - # Skip if no files were found in the collection - if not list(collection): - continue - - # Skip existing collections - if collection in collections: - continue - - instance = context.create_instance(name=data["name"]) - version = self.version_get( - os.path.basename(collection.format()), "v" - )[1] - - basename = os.path.basename(collection.format()) - instance.data["label"] = "{0} - {1}".format( - data["name"], basename - ) - - families = data["families"] + [data["family"]] - family = list(set(valid_families) & set(families))[0] - instance.data["family"] = family - instance.data["families"] = ["output"] - instance.data["collection"] = collection - instance.data["version"] = int(version) - instance.data["publish"] = False - - collections.append(collection) diff --git a/pype/plugins/global/_publish_unused/collect_textures.py b/pype/plugins/global/_publish_unused/collect_textures.py deleted file mode 100644 index c38e911033..0000000000 --- a/pype/plugins/global/_publish_unused/collect_textures.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -import re -import copy -from avalon import io -from pprint import pprint - -import pyblish.api -from avalon import api - - -texture_extensions = ['.tif', '.tiff', '.jpg', '.jpeg', '.tx', '.png', '.tga', - '.psd', '.dpx', '.hdr', '.hdri', '.exr', '.sxr', '.psb'] - - -class CollectTextures(pyblish.api.ContextPlugin): - """ - Gather all texture files in working directory, traversing whole structure. - """ - - order = pyblish.api.CollectorOrder - targets = ["texture"] - label = "Textures" - hosts = ["shell"] - - def process(self, context): - - if os.environ.get("PYPE_PUBLISH_PATHS"): - paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep) - else: - cwd = context.get("workspaceDir", os.getcwd()) - paths = [cwd] - - textures = [] - for path in paths: - for dir, subdir, files in os.walk(path): - textures.extend( - os.path.join(dir, x) for x in files - if os.path.splitext(x)[1].lower() in texture_extensions) - - self.log.info("Got {} texture files.".format(len(textures))) - if len(textures) < 1: - raise RuntimeError("no textures found.") - - asset_name = os.environ.get("AVALON_ASSET") - family = 'texture' - subset = 'Main' - - project = io.find_one({'type': 'project'}) - asset = io.find_one({ - 'type': 'asset', - 'name': asset_name - }) - - context.data['project'] = project - context.data['asset'] = asset - - for tex in textures: - self.log.info("Processing: {}".format(tex)) - name, ext = os.path.splitext(tex) - simple_name = os.path.splitext(os.path.basename(tex))[0] - instance = context.create_instance(simple_name) - - instance.data.update({ - "subset": subset, - "asset": asset_name, - "label": simple_name, - "name": simple_name, - "family": family, - "families": [family, 'ftrack'], - }) - instance.data['destination_list'] = list() - instance.data['representations'] = list() - instance.data['source'] = 'pype command' - - texture_data = {} - texture_data['anatomy_template'] = 'texture' - texture_data["ext"] = ext - texture_data["label"] = simple_name - texture_data["name"] = "texture" - texture_data["stagingDir"] = os.path.dirname(tex) - texture_data["files"] = os.path.basename(tex) - texture_data["thumbnail"] = False - texture_data["preview"] = False - - instance.data["representations"].append(texture_data) - self.log.info("collected instance: {}".format(instance.data)) - - self.log.info("All collected.") diff --git a/pype/plugins/global/_publish_unused/extract_json.py b/pype/plugins/global/_publish_unused/extract_json.py deleted file mode 100644 index 8aff324574..0000000000 --- a/pype/plugins/global/_publish_unused/extract_json.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import json -import datetime -import time - -import pyblish.api -import clique - - -class ExtractJSON(pyblish.api.ContextPlugin): - """ Extract all instances to a serialized json file. """ - - order = pyblish.api.IntegratorOrder - label = "JSON" - hosts = ['maya'] - - def process(self, context): - - workspace = os.path.join( - os.path.dirname(context.data["currentFile"]), "workspace", - "instances") - - if not os.path.exists(workspace): - os.makedirs(workspace) - - output_data = [] - for instance in context: - self.log.debug(instance['data']) - - data = {} - for key, value in instance.data.iteritems(): - if isinstance(value, clique.Collection): - value = value.format() - - try: - json.dumps(value) - data[key] = value - except KeyError: - msg = "\"{0}\"".format(value) - msg += " in instance.data[\"{0}\"]".format(key) - msg += " could not be serialized." - self.log.debug(msg) - - output_data.append(data) - - timestamp = datetime.datetime.fromtimestamp( - time.time()).strftime("%Y%m%d-%H%M%S") - filename = timestamp + "_instances.json" - - with open(os.path.join(workspace, filename), "w") as outfile: - outfile.write(json.dumps(output_data, indent=4, sort_keys=True)) diff --git a/pype/plugins/global/_publish_unused/extract_quicktime.py b/pype/plugins/global/_publish_unused/extract_quicktime.py deleted file mode 100644 index 76a920b798..0000000000 --- a/pype/plugins/global/_publish_unused/extract_quicktime.py +++ /dev/null @@ -1,86 +0,0 @@ -import os -import pyblish.api -import subprocess -import clique - - -class ExtractQuicktimeEXR(pyblish.api.InstancePlugin): - """Resolve any dependency issies - - This plug-in resolves any paths which, if not updated might break - the published file. - - The order of families is important, when working with lookdev you want to - first publish the texture, update the texture paths in the nodes and then - publish the shading network. Same goes for file dependent assets. - """ - - label = "Extract Quicktime" - order = pyblish.api.ExtractorOrder - families = ["imagesequence", "render", "write", "source"] - hosts = ["shell"] - - def process(self, instance): - # fps = instance.data.get("fps") - # start = instance.data.get("startFrame") - # stagingdir = os.path.normpath(instance.data.get("stagingDir")) - # - # collected_frames = os.listdir(stagingdir) - # collections, remainder = clique.assemble(collected_frames) - # - # full_input_path = os.path.join( - # stagingdir, collections[0].format('{head}{padding}{tail}') - # ) - # self.log.info("input {}".format(full_input_path)) - # - # filename = collections[0].format('{head}') - # if not filename.endswith('.'): - # filename += "." - # movFile = filename + "mov" - # full_output_path = os.path.join(stagingdir, movFile) - # - # self.log.info("output {}".format(full_output_path)) - # - # config_data = instance.context.data['output_repre_config'] - # - # proj_name = os.environ.get('AVALON_PROJECT', '__default__') - # profile = config_data.get(proj_name, config_data['__default__']) - # - # input_args = [] - # # overrides output file - # input_args.append("-y") - # # preset's input data - # input_args.extend(profile.get('input', [])) - # # necessary input data - # input_args.append("-start_number {}".format(start)) - # input_args.append("-i {}".format(full_input_path)) - # input_args.append("-framerate {}".format(fps)) - # - # output_args = [] - # # preset's output data - # output_args.extend(profile.get('output', [])) - # # output filename - # output_args.append(full_output_path) - # mov_args = [ - # "ffmpeg", - # " ".join(input_args), - # " ".join(output_args) - # ] - # subprocess_mov = " ".join(mov_args) - # sub_proc = subprocess.Popen(subprocess_mov) - # sub_proc.wait() - # - # if not os.path.isfile(full_output_path): - # raise("Quicktime wasn't created succesfully") - # - # if "representations" not in instance.data: - # instance.data["representations"] = [] - # - # representation = { - # 'name': 'mov', - # 'ext': 'mov', - # 'files': movFile, - # "stagingDir": stagingdir, - # "preview": True - # } - # instance.data["representations"].append(representation) diff --git a/pype/plugins/global/_publish_unused/transcode.py b/pype/plugins/global/_publish_unused/transcode.py deleted file mode 100644 index 6da65e3cc7..0000000000 --- a/pype/plugins/global/_publish_unused/transcode.py +++ /dev/null @@ -1,153 +0,0 @@ -import os -import subprocess - -import pyblish.api -import filelink - - -class ExtractTranscode(pyblish.api.InstancePlugin): - """Extracts review movie from image sequence. - - Offset to get images to transcode from. - """ - - order = pyblish.api.ExtractorOrder + 0.1 - label = "Transcode" - optional = True - families = ["review"] - - def find_previous_index(self, index, indexes): - """Finds the closest previous value in a list from a value.""" - - data = [] - for i in indexes: - if i >= index: - continue - data.append(index - i) - - return indexes[data.index(min(data))] - - def process(self, instance): - - if "collection" in instance.data.keys(): - self.process_image(instance) - - if "output_path" in instance.data.keys(): - self.process_movie(instance) - - def process_image(self, instance): - - collection = instance.data.get("collection", []) - - if not list(collection): - msg = "Skipping \"{0}\" because no frames was found." - self.log.warning(msg.format(instance.data["name"])) - return - - # Temporary fill the missing frames. - missing = collection.holes() - if not collection.is_contiguous(): - pattern = collection.format("{head}{padding}{tail}") - for index in missing.indexes: - dst = pattern % index - src_index = self.find_previous_index( - index, list(collection.indexes) - ) - src = pattern % src_index - - filelink.create(src, dst) - - # Generate args. - # Has to be yuv420p for compatibility with older players and smooth - # playback. This does come with a sacrifice of more visible banding - # issues. - # -crf 18 is visually lossless. - args = [ - "ffmpeg", "-y", - "-start_number", str(min(collection.indexes)), - "-framerate", str(instance.context.data["framerate"]), - "-i", collection.format("{head}{padding}{tail}"), - "-pix_fmt", "yuv420p", - "-crf", "18", - "-timecode", "00:00:00:01", - "-vframes", - str(max(collection.indexes) - min(collection.indexes) + 1), - "-vf", - "scale=trunc(iw/2)*2:trunc(ih/2)*2", - ] - - if instance.data.get("baked_colorspace_movie"): - args = [ - "ffmpeg", "-y", - "-i", instance.data["baked_colorspace_movie"], - "-pix_fmt", "yuv420p", - "-crf", "18", - "-timecode", "00:00:00:01", - ] - - args.append(collection.format("{head}.mov")) - - self.log.debug("Executing args: {0}".format(args)) - - # Can't use subprocess.check_output, cause Houdini doesn't like that. - p = subprocess.Popen( - args, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - stdin=subprocess.PIPE, - cwd=os.path.dirname(args[-1]) - ) - - output = p.communicate()[0] - - # Remove temporary frame fillers - for f in missing: - os.remove(f) - - if p.returncode != 0: - raise ValueError(output) - - self.log.debug(output) - - def process_movie(self, instance): - # Generate args. - # Has to be yuv420p for compatibility with older players and smooth - # playback. This does come with a sacrifice of more visible banding - # issues. - args = [ - "ffmpeg", "-y", - "-i", instance.data["output_path"], - "-pix_fmt", "yuv420p", - "-crf", "18", - "-timecode", "00:00:00:01", - ] - - if instance.data.get("baked_colorspace_movie"): - args = [ - "ffmpeg", "-y", - "-i", instance.data["baked_colorspace_movie"], - "-pix_fmt", "yuv420p", - "-crf", "18", - "-timecode", "00:00:00:01", - ] - - split = os.path.splitext(instance.data["output_path"]) - args.append(split[0] + "_review.mov") - - self.log.debug("Executing args: {0}".format(args)) - - # Can't use subprocess.check_output, cause Houdini doesn't like that. - p = subprocess.Popen( - args, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - stdin=subprocess.PIPE, - cwd=os.path.dirname(args[-1]) - ) - - output = p.communicate()[0] - - if p.returncode != 0: - raise ValueError(output) - - self.log.debug(output) diff --git a/pype/plugins/global/load/open_file.py b/pype/plugins/global/load/open_file.py index 9425eaab04..b496311e0c 100644 --- a/pype/plugins/global/load/open_file.py +++ b/pype/plugins/global/load/open_file.py @@ -18,7 +18,7 @@ def open(filepath): class Openfile(api.Loader): """Open Image Sequence with system default""" - families = ["write"] + families = ["render2d"] representations = ["*"] label = "Open" diff --git a/pype/plugins/global/publish/collect_anatomy.py b/pype/plugins/global/publish/collect_anatomy.py index 9412209850..73ae3bb024 100644 --- a/pype/plugins/global/publish/collect_anatomy.py +++ b/pype/plugins/global/publish/collect_anatomy.py @@ -1,10 +1,20 @@ -""" +"""Collect Anatomy and global anatomy data. + Requires: - None + session -> AVALON_TASK + projectEntity, assetEntity -> collect_avalon_entities *(pyblish.api.CollectorOrder) + username -> collect_pype_user *(pyblish.api.CollectorOrder + 0.001) + datetimeData -> collect_datetime_data *(pyblish.api.CollectorOrder) + Provides: context -> anatomy (pypeapp.Anatomy) + context -> anatomyData """ +import os +import json + +from avalon import api, lib from pypeapp import Anatomy import pyblish.api @@ -12,9 +22,52 @@ import pyblish.api class CollectAnatomy(pyblish.api.ContextPlugin): """Collect Anatomy into Context""" - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder + 0.002 label = "Collect Anatomy" def process(self, context): - context.data['anatomy'] = Anatomy() - self.log.info("Anatomy templates collected...") + root_path = api.registered_root() + task_name = api.Session["AVALON_TASK"] + + project_entity = context.data["projectEntity"] + asset_entity = context.data["assetEntity"] + + project_name = project_entity["name"] + + context.data["anatomy"] = Anatomy(project_name) + self.log.info( + "Anatomy object collected for project \"{}\".".format(project_name) + ) + + hierarchy_items = asset_entity["data"]["parents"] + hierarchy = "" + if hierarchy_items: + hierarchy = os.path.join(*hierarchy_items) + + context_data = { + "root": root_path, + "project": { + "name": project_name, + "code": project_entity["data"].get("code") + }, + "asset": asset_entity["name"], + "hierarchy": hierarchy.replace("\\", "/"), + "task": task_name, + + "username": context.data["user"] + } + + avalon_app_name = os.environ.get("AVALON_APP_NAME") + if avalon_app_name: + application_def = lib.get_application(avalon_app_name) + app_dir = application_def.get("application_dir") + if app_dir: + context_data["app"] = app_dir + + datetime_data = context.data.get("datetimeData") or {} + context_data.update(datetime_data) + + context.data["anatomyData"] = context_data + + self.log.info("Global anatomy Data collected") + self.log.debug(json.dumps(context_data, indent=4)) diff --git a/pype/plugins/global/publish/collect_avalon_entities.py b/pype/plugins/global/publish/collect_avalon_entities.py new file mode 100644 index 0000000000..103f5abd1a --- /dev/null +++ b/pype/plugins/global/publish/collect_avalon_entities.py @@ -0,0 +1,53 @@ +"""Collect Anatomy and global anatomy data. + +Requires: + session -> AVALON_PROJECT, AVALON_ASSET + +Provides: + context -> projectEntity - project entity from database + context -> assetEntity - asset entity from database +""" + +from avalon import io, api +import pyblish.api + + +class CollectAvalonEntities(pyblish.api.ContextPlugin): + """Collect Anatomy into Context""" + + order = pyblish.api.CollectorOrder + label = "Collect Avalon Entities" + + def process(self, context): + io.install() + project_name = api.Session["AVALON_PROJECT"] + asset_name = api.Session["AVALON_ASSET"] + + project_entity = io.find_one({ + "type": "project", + "name": project_name + }) + assert project_entity, ( + "Project '{0}' was not found." + ).format(project_name) + self.log.debug("Collected Project \"{}\"".format(project_entity)) + + asset_entity = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project_entity["_id"] + }) + assert asset_entity, ( + "No asset found by the name '{0}' in project '{1}'" + ).format(asset_name, project_name) + + self.log.debug("Collected Asset \"{}\"".format(asset_entity)) + + context.data["projectEntity"] = project_entity + context.data["assetEntity"] = asset_entity + + data = asset_entity['data'] + handles = int(data.get("handles") or 0) + context.data["handles"] = handles + context.data["handleStart"] = int(data.get("handleStart", handles)) + context.data["handleEnd"] = int(data.get("handleEnd", handles)) diff --git a/pype/plugins/global/publish/collect_comment.py b/pype/plugins/global/publish/collect_comment.py index 22970665a1..062142ace9 100644 --- a/pype/plugins/global/publish/collect_comment.py +++ b/pype/plugins/global/publish/collect_comment.py @@ -15,4 +15,5 @@ class CollectComment(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder def process(self, context): - context.data["comment"] = "" + comment = (context.data.get("comment") or "").strip() + context.data["comment"] = comment diff --git a/pype/plugins/global/publish/collect_datetime_data.py b/pype/plugins/global/publish/collect_datetime_data.py new file mode 100644 index 0000000000..f04f924e18 --- /dev/null +++ b/pype/plugins/global/publish/collect_datetime_data.py @@ -0,0 +1,18 @@ +"""These data *must* be collected only once during publishing process. + +Provides: + context -> datetimeData +""" + +import pyblish.api +from pypeapp import config + + +class CollectDateTimeData(pyblish.api.ContextPlugin): + order = pyblish.api.CollectorOrder + label = "Collect DateTime data" + + def process(self, context): + key = "datetimeData" + if key not in context.data: + context.data[key] = config.get_datetime_data() diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py deleted file mode 100644 index a04de4fdd7..0000000000 --- a/pype/plugins/global/publish/collect_filesequences.py +++ /dev/null @@ -1,446 +0,0 @@ -""" -Requires: - environment -> PYPE_PUBLISH_PATHS - context -> workspaceDir - -Provides: - context -> user (str) - instance -> new instance -""" - -import os -import re -import copy -import json - -import pyblish.api -from avalon import api - - -def collect(root, - regex=None, - exclude_regex=None, - frame_start=None, - frame_end=None): - """Collect sequence collections in root""" - - from avalon.vendor import clique - - files = list() - for filename in os.listdir(root): - - # Must have extension - ext = os.path.splitext(filename)[1] - if not ext: - continue - - # Only files - if not os.path.isfile(os.path.join(root, filename)): - continue - - # Include and exclude regex - if regex and not re.search(regex, filename): - continue - if exclude_regex and re.search(exclude_regex, filename): - continue - - files.append(filename) - - # Match collections - # Support filenames like: projectX_shot01_0010.tiff with this regex - pattern = r"(?P(?P0*)\d+)\.\D+\d?$" - collections, remainder = clique.assemble(files, - patterns=[pattern], - minimum_items=1) - - # Exclude any frames outside start and end frame. - for collection in collections: - for index in list(collection.indexes): - if frame_start is not None and index < frame_start: - collection.indexes.discard(index) - continue - if frame_end is not None and index > frame_end: - collection.indexes.discard(index) - continue - - # Keep only collections that have at least a single frame - collections = [c for c in collections if c.indexes] - - return collections, remainder - - -class CollectRenderedFrames(pyblish.api.ContextPlugin): - """Gather file sequences from working directory - - When "FILESEQUENCE" environment variable is set these paths (folders or - .json files) are parsed for image sequences. Otherwise the current - working directory is searched for file sequences. - - The json configuration may have the optional keys: - asset (str): The asset to publish to. If not provided fall back to - api.Session["AVALON_ASSET"] - subset (str): The subset to publish to. If not provided the sequence's - head (up to frame number) will be used. - frame_start (int): The start frame for the sequence - frame_end (int): The end frame for the sequence - root (str): The path to collect from (can be relative to the .json) - regex (str): A regex for the sequence filename - exclude_regex (str): A regex for filename to exclude from collection - metadata (dict): Custom metadata for instance.data["metadata"] - - """ - - order = pyblish.api.CollectorOrder - 0.0001 - targets = ["filesequence"] - label = "RenderedFrames" - - def process(self, context): - pixel_aspect = 1 - resolution_width = 1920 - resolution_height = 1080 - lut_path = None - slate_frame = None - families_data = None - subset = None - version = None - frame_start = 0 - frame_end = 0 - if os.environ.get("PYPE_PUBLISH_PATHS"): - paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep) - self.log.info("Collecting paths: {}".format(paths)) - else: - cwd = context.get("workspaceDir", os.getcwd()) - paths = [cwd] - - for path in paths: - - self.log.info("Loading: {}".format(path)) - - if path.endswith(".json"): - # Search using .json configuration - with open(path, "r") as f: - try: - data = json.load(f) - except Exception as exc: - self.log.error( - "Error loading json: " - "{} - Exception: {}".format(path, exc) - ) - raise - - cwd = os.path.dirname(path) - root_override = data.get("root") - frame_start = int(data.get("frameStart")) - frame_end = int(data.get("frameEnd")) - subset = data.get("subset") - - if root_override: - if os.path.isabs(root_override): - root = root_override - else: - root = os.path.join(cwd, root_override) - else: - root = cwd - - if data.get("ftrack"): - f = data.get("ftrack") - os.environ["FTRACK_API_USER"] = f["FTRACK_API_USER"] - os.environ["FTRACK_API_KEY"] = f["FTRACK_API_KEY"] - os.environ["FTRACK_SERVER"] = f["FTRACK_SERVER"] - - metadata = data.get("metadata") - if metadata: - session = metadata.get("session") - if session: - self.log.info("setting session using metadata") - api.Session.update(session) - os.environ.update(session) - instance = metadata.get("instance") - if instance: - instance_family = instance.get("family") - pixel_aspect = instance.get("pixelAspect", 1) - resolution_width = instance.get("resolutionWidth", 1920) - resolution_height = instance.get("resolutionHeight", 1080) - lut_path = instance.get("lutPath", None) - baked_mov_path = instance.get("bakeRenderPath") - families_data = instance.get("families") - slate_frame = instance.get("slateFrame") - version = instance.get("version") - - - else: - # Search in directory - data = dict() - root = path - - self.log.info("Collecting: {}".format(root)) - - regex = data.get("regex") - if baked_mov_path: - regex = "^{}.*$".format(subset) - - if regex: - self.log.info("Using regex: {}".format(regex)) - - if "slate" in families_data: - frame_start -= 1 - - collections, remainder = collect( - root=root, - regex=regex, - exclude_regex=data.get("exclude_regex"), - frame_start=frame_start, - frame_end=frame_end, - ) - - self.log.info("Found collections: {}".format(collections)) - self.log.info("Found remainder: {}".format(remainder)) - - fps = data.get("fps", 25) - - if data.get("user"): - context.data["user"] = data["user"] - - if data.get("version"): - version = data.get("version") - - # Get family from the data - families = data.get("families", ["render"]) - if "render" not in families: - families.append("render") - if "ftrack" not in families: - families.append("ftrack") - if "write" in instance_family: - families.append("write") - if families_data and "slate" in families_data: - families.append("slate") - - if data.get("attachTo"): - # we need to attach found collections to existing - # subset version as review represenation. - - for attach in data.get("attachTo"): - self.log.info( - "Attaching render {}:v{}".format( - attach["subset"], attach["version"])) - instance = context.create_instance( - attach["subset"]) - instance.data.update( - { - "name": attach["subset"], - "version": attach["version"], - "family": 'review', - "families": ['review', 'ftrack'], - "asset": data.get( - "asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, - "frameStart": frame_start, - "frameEnd": frame_end, - "fps": fps, - "source": data.get("source", ""), - "pixelAspect": pixel_aspect, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height - }) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - for collection in collections: - self.log.info( - " - adding representation: {}".format( - str(collection)) - ) - ext = collection.tail.lstrip(".") - - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"], - } - instance.data["representations"].append( - representation) - - elif subset: - # if we have subset - add all collections and known - # reminder as representations - - # take out review family if mov path - # this will make imagesequence none review - - if baked_mov_path: - self.log.info( - "Baked mov is available {}".format( - baked_mov_path)) - families.append("review") - - if session['AVALON_APP'] == "maya": - families.append("review") - - self.log.info( - "Adding representations to subset {}".format( - subset)) - - instance = context.create_instance(subset) - data = copy.deepcopy(data) - - instance.data.update( - { - "name": subset, - "family": families[0], - "families": list(families), - "subset": subset, - "asset": data.get( - "asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, - "frameStart": frame_start, - "frameEnd": frame_end, - "fps": fps, - "source": data.get("source", ""), - "pixelAspect": pixel_aspect, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "slateFrame": slate_frame, - "version": version - } - ) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - for collection in collections: - self.log.info(" - {}".format(str(collection))) - - ext = collection.tail.lstrip(".") - - if "slate" in instance.data["families"]: - frame_start += 1 - - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "frameStart": frame_start, - "frameEnd": frame_end, - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"] if not baked_mov_path else [], - } - instance.data["representations"].append( - representation) - - # filter out only relevant mov in case baked available - self.log.debug("__ remainder {}".format(remainder)) - if baked_mov_path: - remainder = [r for r in remainder - if r in baked_mov_path] - self.log.debug("__ remainder {}".format(remainder)) - - # process reminders - for rem in remainder: - # add only known types to representation - if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']: - self.log.info(" . {}".format(rem)) - - if "slate" in instance.data["families"]: - frame_start += 1 - - tags = ["review"] - - if baked_mov_path: - tags.append("delete") - - representation = { - "name": rem.split(".")[-1], - "ext": "{}".format(rem.split(".")[-1]), - "files": rem, - "stagingDir": root, - "frameStart": frame_start, - "anatomy_template": "render", - "fps": fps, - "tags": tags - } - instance.data["representations"].append( - representation) - - else: - # we have no subset so we take every collection and create one - # from it - for collection in collections: - instance = context.create_instance(str(collection)) - self.log.info("Creating subset from: %s" % str(collection)) - - # Ensure each instance gets a unique reference to the data - data = copy.deepcopy(data) - - # If no subset provided, get it from collection's head - subset = data.get("subset", collection.head.rstrip("_. ")) - - # If no start or end frame provided, get it from collection - indices = list(collection.indexes) - start = data.get("frameStart", indices[0]) - end = data.get("frameEnd", indices[-1]) - - ext = list(collection)[0].split(".")[-1] - - if "review" not in families: - families.append("review") - - instance.data.update( - { - "name": str(collection), - "family": families[0], # backwards compatibility - "families": list(families), - "subset": subset, - "asset": data.get( - "asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, - "frameStart": start, - "frameEnd": end, - "fps": fps, - "source": data.get("source", ""), - "pixelAspect": pixel_aspect, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "version": version - } - ) - if lut_path: - instance.data.update({"lutPath": lut_path}) - - instance.append(collection) - instance.context.data["fps"] = fps - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"], - } - instance.data["representations"].append(representation) - - # temporary ... allow only beauty on ftrack - if session['AVALON_APP'] == "maya": - AOV_filter = ['beauty'] - for aov in AOV_filter: - if aov not in instance.data['subset']: - instance.data['families'].remove('review') - instance.data['families'].remove('ftrack') - representation["tags"].remove('review') - - self.log.debug( - "__ representations {}".format( - instance.data["representations"])) - self.log.debug( - "__ instance.data {}".format(instance.data)) diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py new file mode 100644 index 0000000000..06a25b7c8a --- /dev/null +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -0,0 +1,129 @@ +""" +Requires: + context -> anatomyData + context -> projectEntity + context -> assetEntity + instance -> asset + instance -> subset + instance -> family + +Optional: + instance -> version + instance -> resolutionWidth + instance -> resolutionHeight + instance -> fps + +Provides: + instance -> projectEntity + instance -> assetEntity + instance -> anatomyData + instance -> version + instance -> latestVersion +""" + +import copy +import json + +from avalon import io +import pyblish.api + + +class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): + """Fill templates with data needed for publish""" + + order = pyblish.api.CollectorOrder + 0.49 + label = "Collect instance anatomy data" + + def process(self, instance): + # get all the stuff from the database + anatomy_data = copy.deepcopy(instance.context.data["anatomyData"]) + project_entity = instance.context.data["projectEntity"] + context_asset_entity = instance.context.data["assetEntity"] + + asset_name = instance.data["asset"] + # Check if asset name is the same as what is in context + # - they may be different, e.g. in NukeStudio + if context_asset_entity["name"] == asset_name: + asset_entity = context_asset_entity + + else: + asset_entity = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project_entity["_id"] + }) + + subset_name = instance.data["subset"] + version_number = instance.data.get("version") + latest_version = None + + if asset_entity: + subset_entity = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset_entity["_id"] + }) + + if subset_entity is None: + self.log.debug("Subset entity does not exist yet.") + else: + version_entity = io.find_one( + { + "type": "version", + "parent": subset_entity["_id"] + }, + sort=[("name", -1)] + ) + if version_entity: + latest_version = version_entity["name"] + + # If version is not specified for instance or context + if version_number is None: + # TODO we should be able to change default version by studio + # preferences (like start with version number `0`) + version_number = 1 + # use latest version (+1) if already any exist + if latest_version is not None: + version_number += int(latest_version) + + anatomy_updates = { + "asset": asset_name, + "family": instance.data["family"], + "subset": subset_name, + "version": version_number + } + + task_name = instance.data.get("task") + if task_name: + anatomy_updates["task"] = task_name + + # Version should not be collected since may be instance + anatomy_data.update(anatomy_updates) + + resolution_width = instance.data.get("resolutionWidth") + if resolution_width: + anatomy_data["resolution_width"] = resolution_width + + resolution_height = instance.data.get("resolutionHeight") + if resolution_height: + anatomy_data["resolution_height"] = resolution_height + + pixel_aspect = instance.data.get("pixelAspect") + if pixel_aspect: + anatomy_data["pixel_aspect"] = float("{:0.2f}".format( + float(pixel_aspect))) + + fps = instance.data.get("fps") + if fps: + anatomy_data["fps"] = float("{:0.2f}".format( + float(fps))) + + instance.data["projectEntity"] = project_entity + instance.data["assetEntity"] = asset_entity + instance.data["anatomyData"] = anatomy_data + instance.data["latestVersion"] = latest_version + # TODO should be version number set here? + instance.data["version"] = version_number + + self.log.info("Instance anatomy Data collected") + self.log.debug(json.dumps(anatomy_data, indent=4)) diff --git a/pype/plugins/global/publish/collect_project_data.py b/pype/plugins/global/publish/collect_project_data.py deleted file mode 100644 index acdbc2c41f..0000000000 --- a/pype/plugins/global/publish/collect_project_data.py +++ /dev/null @@ -1,24 +0,0 @@ -""" -Requires: - None - -Provides: - context -> projectData -""" - -import pyblish.api -import pype.api as pype - - -class CollectProjectData(pyblish.api.ContextPlugin): - """Collecting project data from avalon db""" - - label = "Collect Project Data" - order = pyblish.api.CollectorOrder - 0.1 - hosts = ["nukestudio"] - - def process(self, context): - # get project data from avalon db - context.data["projectData"] = pype.get_project()["data"] - - return diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py new file mode 100644 index 0000000000..552fd49f6d --- /dev/null +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -0,0 +1,94 @@ +import os +import json + +import pyblish.api +from avalon import api + +from pypeapp import PypeLauncher + + +class CollectRenderedFiles(pyblish.api.ContextPlugin): + """ + This collector will try to find json files in provided + `PYPE_PUBLISH_DATA`. Those files _MUST_ share same context. + + """ + order = pyblish.api.CollectorOrder - 0.0001 + targets = ["filesequence"] + label = "Collect rendered frames" + + _context = None + + def _load_json(self, path): + assert os.path.isfile(path), ("path to json file doesn't exist") + data = None + with open(path, "r") as json_file: + try: + data = json.load(json_file) + except Exception as exc: + self.log.error( + "Error loading json: " + "{} - Exception: {}".format(path, exc) + ) + return data + + def _process_path(self, data): + # validate basic necessary data + data_err = "invalid json file - missing data" + required = ["asset", "user", "comment", + "job", "instances", "session", "version"] + assert all(elem in data.keys() for elem in required), data_err + + # set context by first json file + ctx = self._context.data + + ctx["asset"] = ctx.get("asset") or data.get("asset") + ctx["intent"] = ctx.get("intent") or data.get("intent") + ctx["comment"] = ctx.get("comment") or data.get("comment") + ctx["user"] = ctx.get("user") or data.get("user") + ctx["version"] = ctx.get("version") or data.get("version") + + # basic sanity check to see if we are working in same context + # if some other json file has different context, bail out. + ctx_err = "inconsistent contexts in json files - %s" + assert ctx.get("asset") == data.get("asset"), ctx_err % "asset" + assert ctx.get("intent") == data.get("intent"), ctx_err % "intent" + assert ctx.get("comment") == data.get("comment"), ctx_err % "comment" + assert ctx.get("user") == data.get("user"), ctx_err % "user" + assert ctx.get("version") == data.get("version"), ctx_err % "version" + + # ftrack credentials are passed as environment variables by Deadline + # to publish job, but Muster doesn't pass them. + if data.get("ftrack") and not os.environ.get("FTRACK_API_USER"): + ftrack = data.get("ftrack") + os.environ["FTRACK_API_USER"] = ftrack["FTRACK_API_USER"] + os.environ["FTRACK_API_KEY"] = ftrack["FTRACK_API_KEY"] + os.environ["FTRACK_SERVER"] = ftrack["FTRACK_SERVER"] + + # now we can just add instances from json file and we are done + for instance in data.get("instances"): + self.log.info(" - processing instance for {}".format( + instance.get("subset"))) + i = self._context.create_instance(instance.get("subset")) + self.log.info("remapping paths ...") + i.data["representations"] = [PypeLauncher().path_remapper( + data=r) for r in instance.get("representations")] + i.data.update(instance) + + def process(self, context): + self._context = context + + assert os.environ.get("PYPE_PUBLISH_DATA"), ( + "Missing `PYPE_PUBLISH_DATA`") + paths = os.environ["PYPE_PUBLISH_DATA"].split(os.pathsep) + + session_set = False + for path in paths: + data = self._load_json(path) + if not session_set: + self.log.info("Setting session using data from file") + api.Session.update(data.get("session")) + os.environ.update(data.get("session")) + session_set = True + assert data, "failed to load json file" + self._process_path(data) diff --git a/pype/plugins/global/publish/collect_resources_path.py b/pype/plugins/global/publish/collect_resources_path.py new file mode 100644 index 0000000000..734d1f84e4 --- /dev/null +++ b/pype/plugins/global/publish/collect_resources_path.py @@ -0,0 +1,60 @@ +""" +Requires: + context -> anatomy + context -> anatomyData + +Provides: + instance -> publishDir + instance -> resourcesDir +""" + +import os +import copy + +import pyblish.api +from avalon import api + + +class CollectResourcesPath(pyblish.api.InstancePlugin): + """Generate directory path where the files and resources will be stored""" + + label = "Collect Resources Path" + order = pyblish.api.CollectorOrder + 0.495 + + def process(self, instance): + anatomy = instance.context.data["anatomy"] + + template_data = copy.deepcopy(instance.data["anatomyData"]) + + # This is for cases of Deprecated anatomy without `folder` + # TODO remove when all clients have solved this issue + template_data.update({ + "frame": "FRAME_TEMP", + "representation": "TEMP" + }) + + anatomy_filled = anatomy.format(template_data) + + if "folder" in anatomy.templates["publish"]: + publish_folder = anatomy_filled["publish"]["folder"] + else: + # solve deprecated situation when `folder` key is not underneath + # `publish` anatomy + project_name = api.Session["AVALON_PROJECT"] + self.log.warning(( + "Deprecation warning: Anatomy does not have set `folder`" + " key underneath `publish` (in global of for project `{}`)." + ).format(project_name)) + + file_path = anatomy_filled["publish"]["path"] + # Directory + publish_folder = os.path.dirname(file_path) + + publish_folder = os.path.normpath(publish_folder) + resources_folder = os.path.join(publish_folder, "resources") + + instance.data["publishDir"] = publish_folder + instance.data["resourcesDir"] = resources_folder + + self.log.debug("publishDir: \"{}\"".format(publish_folder)) + self.log.debug("resourcesDir: \"{}\"".format(resources_folder)) diff --git a/pype/plugins/global/publish/collect_scene_version.py b/pype/plugins/global/publish/collect_scene_version.py index 2844a695e2..02e913199b 100644 --- a/pype/plugins/global/publish/collect_scene_version.py +++ b/pype/plugins/global/publish/collect_scene_version.py @@ -21,7 +21,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin): if '' in filename: return - rootVersion = pype.get_version_from_path(filename) + rootVersion = int(pype.get_version_from_path(filename)) context.data['version'] = rootVersion - + self.log.info("{}".format(type(rootVersion))) self.log.info('Scene Version: %s' % context.data.get('version')) diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py deleted file mode 100644 index 383944e293..0000000000 --- a/pype/plugins/global/publish/collect_templates.py +++ /dev/null @@ -1,113 +0,0 @@ -""" -Requires: - session -> AVALON_PROJECT - context -> anatomy (pypeapp.Anatomy) - instance -> subset - instance -> asset - instance -> family - -Provides: - instance -> template - instance -> assumedTemplateData - instance -> assumedDestination -""" - -import os - -from avalon import io, api -import pyblish.api - - -class CollectTemplates(pyblish.api.InstancePlugin): - """Fill templates with data needed for publish""" - - order = pyblish.api.CollectorOrder + 0.1 - label = "Collect and fill Templates" - hosts = ["maya", "nuke", "standalonepublisher"] - - def process(self, instance): - # get all the stuff from the database - subset_name = instance.data["subset"] - asset_name = instance.data["asset"] - project_name = api.Session["AVALON_PROJECT"] - - project = io.find_one( - { - "type": "project", - "name": project_name - }, - projection={"config": True, "data": True} - ) - - template = project["config"]["template"]["publish"] - anatomy = instance.context.data['anatomy'] - - asset = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project["_id"] - }) - - assert asset, ("No asset found by the name '{}' " - "in project '{}'".format(asset_name, project_name)) - silo = asset.get('silo') - - subset = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset["_id"] - }) - - # assume there is no version yet, we start at `1` - version = None - version_number = 1 - if subset is not None: - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) - - # if there is a subset there ought to be version - if version is not None: - version_number += int(version["name"]) - - hierarchy = asset['data']['parents'] - if hierarchy: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = os.path.join(*hierarchy) - - template_data = {"root": api.Session["AVALON_PROJECTS"], - "project": {"name": project_name, - "code": project['data']['code']}, - "silo": silo, - "family": instance.data['family'], - "asset": asset_name, - "subset": subset_name, - "version": version_number, - "hierarchy": hierarchy.replace("\\", "/"), - "representation": "TEMP")} - - resolution_width = instance.data.get("resolutionWidth") - resolution_height = instance.data.get("resolutionHeight") - fps = instance.data.get("fps") - - if resolution_width: - template_data["resolution_width"] = resolution_width - if resolution_width: - template_data["resolution_height"] = resolution_height - if resolution_width: - template_data["fps"] = fps - - instance.data["template"] = template - instance.data["assumedTemplateData"] = template_data - - # We take the parent folder of representation 'filepath' - instance.data["assumedDestination"] = os.path.dirname( - (anatomy.format(template_data))["publish"]["path"] - ) - self.log.info("Assumed Destination has been created...") - self.log.debug("__ assumedTemplateData: `{}`".format(instance.data["assumedTemplateData"])) - self.log.debug("__ template: `{}`".format(instance.data["template"])) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 8f5a4aa000..086a1fdfb2 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -4,7 +4,6 @@ import copy import pype.api import pyblish -from pypeapp import config class ExtractBurnin(pype.api.Extractor): @@ -16,7 +15,7 @@ class ExtractBurnin(pype.api.Extractor): `tags` including `burnin` """ - label = "Quicktime with burnins" + label = "Extract burnins" order = pyblish.api.ExtractorOrder + 0.03 families = ["review", "burnin"] hosts = ["nuke", "maya", "shell"] @@ -26,46 +25,38 @@ class ExtractBurnin(pype.api.Extractor): if "representations" not in instance.data: raise RuntimeError("Burnin needs already created mov to work on.") - version = instance.context.data.get( - 'version', instance.data.get('version')) + context_data = instance.context.data + + version = instance.data.get( + 'version', instance.context.data.get('version')) frame_start = int(instance.data.get("frameStart") or 0) frame_end = int(instance.data.get("frameEnd") or 1) - duration = frame_end - frame_start + 1 + handle_start = instance.data.get("handleStart", + context_data.get("handleStart")) + handle_end = instance.data.get("handleEnd", + context_data.get("handleEnd")) - prep_data = { - "username": instance.context.data['user'], - "asset": os.environ['AVALON_ASSET'], - "task": os.environ['AVALON_TASK'], - "frame_start": frame_start, - "frame_end": frame_end, - "duration": duration, - "version": int(version), - "comment": instance.context.data.get("comment", ""), - "intent": instance.context.data.get("intent", "") - } + frame_start_handle = frame_start - handle_start + frame_end_handle = frame_end + handle_end + duration = frame_end_handle - frame_start_handle + 1 - # Add datetime data to preparation data - prep_data.update(config.get_datetime_data()) + prep_data = copy.deepcopy(instance.data["anatomyData"]) - slate_frame_start = frame_start - slate_frame_end = frame_end - slate_duration = duration - - # exception for slate workflow - if "slate" in instance.data["families"]: - slate_frame_start = frame_start - 1 - slate_frame_end = frame_end - slate_duration = slate_frame_end - slate_frame_start + 1 + if "slate.farm" in instance.data["families"]: + frame_start_handle += 1 + duration -= 1 prep_data.update({ - "slate_frame_start": slate_frame_start, - "slate_frame_end": slate_frame_end, - "slate_duration": slate_duration + "frame_start": frame_start_handle, + "frame_end": frame_end_handle, + "duration": duration, + "version": int(version), + "comment": instance.context.data.get("comment", "") }) - # Update data with template data - template_data = instance.data.get("assumedTemplateData") or {} - prep_data.update(template_data) + intent = instance.context.data.get("intent", {}).get("label") + if intent: + prep_data["intent"] = intent # get anatomy project anatomy = instance.context.data['anatomy'] @@ -77,27 +68,77 @@ class ExtractBurnin(pype.api.Extractor): if "burnin" not in repre.get("tags", []): continue + is_sequence = "sequence" in repre.get("tags", []) + + # no handles switch from profile tags + no_handles = "no-handles" in repre.get("tags", []) + stagingdir = repre["stagingDir"] filename = "{0}".format(repre["files"]) + if is_sequence: + filename = repre["sequence_file"] + name = "_burnin" ext = os.path.splitext(filename)[1] movieFileBurnin = filename.replace(ext, "") + name + ext + if is_sequence: + fn_splt = filename.split(".") + movieFileBurnin = ".".join( + ((fn_splt[0] + name), fn_splt[-2], fn_splt[-1])) + + self.log.debug("__ movieFileBurnin: `{}`".format(movieFileBurnin)) + full_movie_path = os.path.join( - os.path.normpath(stagingdir), repre["files"] - ) + os.path.normpath(stagingdir), filename) full_burnin_path = os.path.join( - os.path.normpath(stagingdir), movieFileBurnin - ) + os.path.normpath(stagingdir), movieFileBurnin) + + self.log.debug("__ full_movie_path: {}".format(full_movie_path)) self.log.debug("__ full_burnin_path: {}".format(full_burnin_path)) # create copy of prep_data for anatomy formatting _prep_data = copy.deepcopy(prep_data) _prep_data["representation"] = repre["name"] - _prep_data["anatomy"] = ( - anatomy.format_all(_prep_data).get("solved") or {} - ) + filled_anatomy = anatomy.format_all(_prep_data) + _prep_data["anatomy"] = filled_anatomy.get_solved() + + # copy frame range variables + frame_start_cp = frame_start_handle + frame_end_cp = frame_end_handle + duration_cp = duration + + if no_handles: + frame_start_cp = frame_start + frame_end_cp = frame_end + duration_cp = frame_end_cp - frame_start_cp + 1 + _prep_data.update({ + "frame_start": frame_start_cp, + "frame_end": frame_end_cp, + "duration": duration_cp, + }) + + # dealing with slates + slate_frame_start = frame_start_cp + slate_frame_end = frame_end_cp + slate_duration = duration_cp + + # exception for slate workflow + if ("slate" in instance.data["families"]): + if "slate-frame" in repre.get("tags", []): + slate_frame_start = frame_start_cp - 1 + slate_frame_end = frame_end_cp + slate_duration = duration_cp + 1 + + self.log.debug("__1 slate_frame_start: {}".format(slate_frame_start)) + + _prep_data.update({ + "slate_frame_start": slate_frame_start, + "slate_frame_end": slate_frame_end, + "slate_duration": slate_duration + }) + burnin_data = { "input": full_movie_path.replace("\\", "/"), "codec": repre.get("codec", []), @@ -144,15 +185,35 @@ class ExtractBurnin(pype.api.Extractor): self.log.debug("Output: {}".format(output)) repre_update = { + "anatomy_template": "render", "files": movieFileBurnin, "name": repre["name"], "tags": [x for x in repre["tags"] if x != "delete"] } + + if is_sequence: + burnin_seq_files = list() + for frame_index in range(_prep_data["duration"] + 1): + if frame_index == 0: + continue + burnin_seq_files.append(movieFileBurnin % frame_index) + repre_update.update({ + "files": burnin_seq_files + }) + instance.data["representations"][i].update(repre_update) # removing the source mov file - os.remove(full_movie_path) - self.log.debug("Removed: `{}`".format(full_movie_path)) + if is_sequence: + for frame_index in range(_prep_data["duration"] + 1): + if frame_index == 0: + continue + rm_file = full_movie_path % frame_index + os.remove(rm_file) + self.log.debug("Removed: `{}`".format(rm_file)) + else: + os.remove(full_movie_path) + self.log.debug("Removed: `{}`".format(full_movie_path)) # Remove any representations tagged for deletion. for repre in instance.data["representations"]: diff --git a/pype/plugins/global/publish/extract_hierarchy_avalon.py b/pype/plugins/global/publish/extract_hierarchy_avalon.py index 318a6db105..ab8226f6ef 100644 --- a/pype/plugins/global/publish/extract_hierarchy_avalon.py +++ b/pype/plugins/global/publish/extract_hierarchy_avalon.py @@ -28,29 +28,33 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): entity_type = entity_data["entity_type"] data = {} - - data["inputs"] = entity_data.get("inputs", []) data["entityType"] = entity_type # Custom attributes. for k, val in entity_data.get("custom_attributes", {}).items(): data[k] = val - # Tasks. - tasks = entity_data.get("tasks", []) - if tasks is not None or len(tasks) > 0: - data["tasks"] = tasks - parents = [] - visualParent = None - # do not store project"s id as visualParent (silo asset) - if self.project is not None: - if self.project["_id"] != parent["_id"]: - visualParent = parent["_id"] - parents.extend(parent.get("data", {}).get("parents", [])) - parents.append(parent["name"]) - data["visualParent"] = visualParent - data["parents"] = parents + if entity_type.lower() != "project": + data["inputs"] = entity_data.get("inputs", []) + # Tasks. + tasks = entity_data.get("tasks", []) + if tasks is not None or len(tasks) > 0: + data["tasks"] = tasks + parents = [] + visualParent = None + # do not store project"s id as visualParent (silo asset) + if self.project is not None: + if self.project["_id"] != parent["_id"]: + visualParent = parent["_id"] + parents.extend( + parent.get("data", {}).get("parents", []) + ) + parents.append(parent["name"]) + data["visualParent"] = visualParent + data["parents"] = parents + + update_data = True # Process project if entity_type.lower() == "project": entity = io.find_one({"type": "project"}) @@ -58,8 +62,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): assert (entity is not None), "Did not find project in DB" # get data from already existing project - for key, value in entity.get("data", {}).items(): - data[key] = value + cur_entity_data = entity.get("data") or {} + cur_entity_data.update(data) + data = cur_entity_data self.project = entity # Raise error if project or parent are not set @@ -70,16 +75,63 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): # Else process assset else: entity = io.find_one({"type": "asset", "name": name}) - # Create entity if doesn"t exist - if entity is None: - entity = self.create_avalon_asset(name, data) + if entity: + # Do not override data, only update + cur_entity_data = entity.get("data") or {} + cur_entity_data.update(data) + data = cur_entity_data + else: + # Skip updating data + update_data = False - # Update entity data with input data - io.update_many({"_id": entity["_id"]}, {"$set": {"data": data}}) + archived_entities = io.find({ + "type": "archived_asset", + "name": name + }) + unarchive_entity = None + for archived_entity in archived_entities: + archived_parents = ( + archived_entity + .get("data", {}) + .get("parents") + ) + if data["parents"] == archived_parents: + unarchive_entity = archived_entity + break + + if unarchive_entity is None: + # Create entity if doesn"t exist + entity = self.create_avalon_asset(name, data) + else: + # Unarchive if entity was archived + entity = self.unarchive_entity(unarchive_entity, data) + + if update_data: + # Update entity data with input data + io.update_many( + {"_id": entity["_id"]}, + {"$set": {"data": data}} + ) if "childs" in entity_data: self.import_to_avalon(entity_data["childs"], entity) + def unarchive_entity(self, entity, data): + # Unarchived asset should not use same data + new_entity = { + "_id": entity["_id"], + "schema": "avalon-core:asset-3.0", + "name": entity["name"], + "parent": self.project["_id"], + "type": "asset", + "data": data + } + io.replace_one( + {"_id": entity["_id"]}, + new_entity + ) + return new_entity + def create_avalon_asset(self, name, data): item = { "schema": "avalon-core:asset-3.0", diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 00e8a6fedf..9ad6a15dfe 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -1,33 +1,20 @@ import os import pyblish.api -import clique import pype.api +import pype.lib class ExtractJpegEXR(pyblish.api.InstancePlugin): - """Resolve any dependency issies - - This plug-in resolves any paths which, if not updated might break - the published file. - - The order of families is important, when working with lookdev you want to - first publish the texture, update the texture paths in the nodes and then - publish the shading network. Same goes for file dependent assets. - """ + """Create jpg thumbnail from sequence using ffmpeg""" label = "Extract Jpeg EXR" hosts = ["shell"] order = pyblish.api.ExtractorOrder - families = ["imagesequence", "render", "write", "source"] + families = ["imagesequence", "render", "render2d", "source"] enabled = False def process(self, instance): - start = instance.data.get("frameStart") - stagingdir = os.path.normpath(instance.data.get("stagingDir")) - - collected_frames = os.listdir(stagingdir) - collections, remainder = clique.assemble(collected_frames) self.log.info("subset {}".format(instance.data['subset'])) if 'crypto' in instance.data['subset']: @@ -40,10 +27,16 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): representations_new = representations[:] for repre in representations: + tags = repre.get("tags", []) self.log.debug(repre) - if 'review' not in repre['tags']: - return + valid = 'review' in tags or "thumb-nuke" in tags + if not valid: + continue + if not isinstance(repre['files'], list): + continue + + stagingdir = os.path.normpath(repre.get("stagingDir")) input_file = repre['files'][0] # input_file = ( @@ -55,8 +48,8 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): filename = os.path.splitext(input_file)[0] if not filename.endswith('.'): filename += "." - jpegFile = filename + "jpg" - full_output_path = os.path.join(stagingdir, jpegFile) + jpeg_file = filename + "jpg" + full_output_path = os.path.join(stagingdir, jpeg_file) self.log.info("output {}".format(full_output_path)) @@ -65,9 +58,10 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): proj_name = os.environ.get('AVALON_PROJECT', '__default__') profile = config_data.get(proj_name, config_data['__default__']) + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + jpeg_items = [] - jpeg_items.append( - os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) + jpeg_items.append(ffmpeg_path) # override file if already exists jpeg_items.append("-y") # use same input args like with mov @@ -87,9 +81,9 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): instance.data["representations"] = [] representation = { - 'name': 'jpg', + 'name': 'thumbnail', 'ext': 'jpg', - 'files': jpegFile, + 'files': jpeg_file, "stagingDir": stagingdir, "thumbnail": True, "tags": ['thumbnail'] diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index a11f681e61..c8a8510fb2 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -2,6 +2,7 @@ import os import pyblish.api import clique import pype.api +import pype.lib class ExtractReview(pyblish.api.InstancePlugin): @@ -11,7 +12,8 @@ class ExtractReview(pyblish.api.InstancePlugin): otherwise the representation is ignored. All new represetnations are created and encoded by ffmpeg following - presets found in `pype-config/presets/plugins/global/publish.json:ExtractReview:outputs`. To change the file extension + presets found in `pype-config/presets/plugins/global/ + publish.json:ExtractReview:outputs`. To change the file extension filter values use preset's attributes `ext_filter` """ @@ -22,316 +24,413 @@ class ExtractReview(pyblish.api.InstancePlugin): outputs = {} ext_filter = [] + to_width = 1920 + to_height = 1080 def process(self, instance): - to_width = 1920 - to_height = 1080 output_profiles = self.outputs or {} inst_data = instance.data - fps = inst_data.get("fps") - start_frame = inst_data.get("frameStart") - resolution_width = inst_data.get("resolutionWidth", to_width) - resolution_height = inst_data.get("resolutionHeight", to_height) + context_data = instance.context.data + fps = float(inst_data.get("fps")) + frame_start = inst_data.get("frameStart") + frame_end = inst_data.get("frameEnd") + handle_start = inst_data.get("handleStart", + context_data.get("handleStart")) + handle_end = inst_data.get("handleEnd", + context_data.get("handleEnd")) pixel_aspect = inst_data.get("pixelAspect", 1) + resolution_width = inst_data.get("resolutionWidth", self.to_width) + resolution_height = inst_data.get("resolutionHeight", self.to_height) self.log.debug("Families In: `{}`".format(inst_data["families"])) + self.log.debug("__ frame_start: {}".format(frame_start)) + self.log.debug("__ frame_end: {}".format(frame_end)) + self.log.debug("__ handle_start: {}".format(handle_start)) + self.log.debug("__ handle_end: {}".format(handle_end)) # get representation and loop them representations = inst_data["representations"] + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + # filter out mov and img sequences representations_new = representations[:] for repre in representations: - if repre['ext'] in self.ext_filter: - tags = repre.get("tags", []) - - if "thumbnail" in tags: - continue - - self.log.info("Try repre: {}".format(repre)) - - if "review" in tags: - staging_dir = repre["stagingDir"] - for name, profile in output_profiles.items(): - self.log.debug("Profile name: {}".format(name)) - - ext = profile.get("ext", None) - if not ext: - ext = "mov" - self.log.warning( - str("`ext` attribute not in output " - "profile. Setting to default ext: `mov`")) - - self.log.debug( - "instance.families: {}".format( - instance.data['families'])) - self.log.debug( - "profile.families: {}".format(profile['families'])) - - if any(item in instance.data['families'] for item in profile['families']): - if isinstance(repre["files"], list): - collections, remainder = clique.assemble( - repre["files"]) - - full_input_path = os.path.join( - staging_dir, collections[0].format( - '{head}{padding}{tail}') - ) - - filename = collections[0].format('{head}') - if filename.endswith('.'): - filename = filename[:-1] - else: - full_input_path = os.path.join( - staging_dir, repre["files"]) - filename = repre["files"].split(".")[0] - - repr_file = filename + "_{0}.{1}".format(name, ext) - - full_output_path = os.path.join( - staging_dir, repr_file) - - self.log.info("input {}".format(full_input_path)) - self.log.info("output {}".format(full_output_path)) - - repre_new = repre.copy() - - new_tags = [x for x in tags if x != "delete"] - p_tags = profile.get('tags', []) - self.log.info("p_tags: `{}`".format(p_tags)) - # add families - [instance.data["families"].append(t) - for t in p_tags - if t not in instance.data["families"]] - - # add to - [new_tags.append(t) for t in p_tags - if t not in new_tags] - - self.log.info("new_tags: `{}`".format(new_tags)) - - input_args = [] - - # overrides output file - input_args.append("-y") - - # preset's input data - input_args.extend(profile.get('input', [])) - - # necessary input data - # adds start arg only if image sequence - if isinstance(repre["files"], list): - input_args.append( - "-start_number {0} -framerate {1}".format( - start_frame, fps)) - - input_args.append("-i {}".format(full_input_path)) - - for audio in instance.data.get("audio", []): - offset_frames = ( - instance.data.get("startFrameReview") - - audio["offset"] - ) - offset_seconds = offset_frames / fps - - if offset_seconds > 0: - input_args.append("-ss") - else: - input_args.append("-itsoffset") - - input_args.append(str(abs(offset_seconds))) - - input_args.extend( - ["-i", audio["filename"]] - ) - - # Need to merge audio if there are more - # than 1 input. - if len(instance.data["audio"]) > 1: - input_args.extend( - [ - "-filter_complex", - "amerge", - "-ac", - "2" - ] - ) - - output_args = [] - codec_args = profile.get('codec', []) - output_args.extend(codec_args) - # preset's output data - output_args.extend(profile.get('output', [])) - - # defining image ratios - resolution_ratio = float(resolution_width / ( - resolution_height * pixel_aspect)) - delivery_ratio = float(to_width) / float(to_height) - self.log.debug(resolution_ratio) - self.log.debug(delivery_ratio) - - # get scale factor - scale_factor = to_height / ( - resolution_height * pixel_aspect) - self.log.debug(scale_factor) - - # letter_box - lb = profile.get('letter_box', 0) - if lb != 0: - ffmpet_width = to_width - ffmpet_height = to_height - if "reformat" not in p_tags: - lb /= pixel_aspect - if resolution_ratio != delivery_ratio: - ffmpet_width = resolution_width - ffmpet_height = int( - resolution_height * pixel_aspect) - else: - if resolution_ratio != delivery_ratio: - lb /= scale_factor - else: - lb /= pixel_aspect - - output_args.append(str( - "-filter:v scale={0}x{1}:flags=lanczos," - "setsar=1,drawbox=0:0:iw:" - "round((ih-(iw*(1/{2})))/2):t=fill:" - "c=black,drawbox=0:ih-round((ih-(iw*(" - "1/{2})))/2):iw:round((ih-(iw*(1/{2})))" - "/2):t=fill:c=black").format( - ffmpet_width, ffmpet_height, lb)) - - # In case audio is longer than video. - output_args.append("-shortest") - - # output filename - output_args.append(full_output_path) - - self.log.debug( - "__ pixel_aspect: `{}`".format(pixel_aspect)) - self.log.debug( - "__ resolution_width: `{}`".format( - resolution_width)) - self.log.debug( - "__ resolution_height: `{}`".format( - resolution_height)) - - # scaling none square pixels and 1920 width - if "reformat" in p_tags: - if resolution_ratio < delivery_ratio: - self.log.debug("lower then delivery") - width_scale = int(to_width * scale_factor) - width_half_pad = int(( - to_width - width_scale)/2) - height_scale = to_height - height_half_pad = 0 - else: - self.log.debug("heigher then delivery") - width_scale = to_width - width_half_pad = 0 - scale_factor = float(to_width) / float( - resolution_width) - self.log.debug(scale_factor) - height_scale = int( - resolution_height * scale_factor) - height_half_pad = int( - (to_height - height_scale)/2) - - self.log.debug( - "__ width_scale: `{}`".format(width_scale)) - self.log.debug( - "__ width_half_pad: `{}`".format( - width_half_pad)) - self.log.debug( - "__ height_scale: `{}`".format( - height_scale)) - self.log.debug( - "__ height_half_pad: `{}`".format( - height_half_pad)) - - scaling_arg = str( - "scale={0}x{1}:flags=lanczos," - "pad={2}:{3}:{4}:{5}:black,setsar=1" - ).format(width_scale, height_scale, - to_width, to_height, - width_half_pad, - height_half_pad - ) - - vf_back = self.add_video_filter_args( - output_args, scaling_arg) - # add it to output_args - output_args.insert(0, vf_back) - - # baking lut file application - lut_path = instance.data.get("lutPath") - if lut_path and ("bake-lut" in p_tags): - # removing Gama info as it is all baked in lut - gamma = next((g for g in input_args - if "-gamma" in g), None) - if gamma: - input_args.remove(gamma) - - # create lut argument - lut_arg = "lut3d=file='{}'".format( - lut_path.replace( - "\\", "/").replace(":/", "\\:/") - ) - lut_arg += ",colormatrix=bt601:bt709" - - vf_back = self.add_video_filter_args( - output_args, lut_arg) - # add it to output_args - output_args.insert(0, vf_back) - self.log.info("Added Lut to ffmpeg command") - self.log.debug( - "_ output_args: `{}`".format(output_args)) - - mov_args = [ - os.path.join( - os.environ.get( - "FFMPEG_PATH", - ""), "ffmpeg"), - " ".join(input_args), - " ".join(output_args) - ] - subprcs_cmd = " ".join(mov_args) - - # run subprocess - self.log.debug("Executing: {}".format(subprcs_cmd)) - output = pype.api.subprocess(subprcs_cmd) - self.log.debug("Output: {}".format(output)) - - # create representation data - repre_new.update({ - 'name': name, - 'ext': ext, - 'files': repr_file, - "tags": new_tags, - "outputName": name, - "codec": codec_args, - "_profile": profile, - "resolutionHeight": resolution_height, - "resolutionWidth": resolution_width, - }) - if repre_new.get('preview'): - repre_new.pop("preview") - if repre_new.get('thumbnail'): - repre_new.pop("thumbnail") - - # adding representation - self.log.debug("Adding: {}".format(repre_new)) - representations_new.append(repre_new) - else: - continue - else: + if repre['ext'] not in self.ext_filter: continue + tags = repre.get("tags", []) + + if "thumbnail" in tags: + continue + + self.log.info("Try repre: {}".format(repre)) + + if "review" not in tags: + continue + + staging_dir = repre["stagingDir"] + + # iterating preset output profiles + for name, profile in output_profiles.items(): + repre_new = repre.copy() + ext = profile.get("ext", None) + p_tags = profile.get('tags', []) + self.log.info("p_tags: `{}`".format(p_tags)) + + # adding control for presets to be sequence + # or single file + is_sequence = ("sequence" in p_tags) and (ext in ( + "png", "jpg", "jpeg")) + + # no handles switch from profile tags + no_handles = "no-handles" in p_tags + + self.log.debug("Profile name: {}".format(name)) + + if not ext: + ext = "mov" + self.log.warning( + str("`ext` attribute not in output " + "profile. Setting to default ext: `mov`")) + + self.log.debug( + "instance.families: {}".format( + instance.data['families'])) + self.log.debug( + "profile.families: {}".format(profile['families'])) + + profile_family_check = False + for _family in profile['families']: + if _family in instance.data['families']: + profile_family_check = True + break + + if not profile_family_check: + continue + + if isinstance(repre["files"], list): + collections, remainder = clique.assemble( + repre["files"]) + + full_input_path = os.path.join( + staging_dir, collections[0].format( + '{head}{padding}{tail}') + ) + + filename = collections[0].format('{head}') + if filename.endswith('.'): + filename = filename[:-1] + else: + full_input_path = os.path.join( + staging_dir, repre["files"]) + filename = repre["files"].split(".")[0] + + repr_file = filename + "_{0}.{1}".format(name, ext) + full_output_path = os.path.join( + staging_dir, repr_file) + + if is_sequence: + filename_base = filename + "_{0}".format(name) + repr_file = filename_base + ".%08d.{0}".format( + ext) + repre_new["sequence_file"] = repr_file + full_output_path = os.path.join( + staging_dir, filename_base, repr_file) + + self.log.info("input {}".format(full_input_path)) + self.log.info("output {}".format(full_output_path)) + + new_tags = [x for x in tags if x != "delete"] + + # add families + [instance.data["families"].append(t) + for t in p_tags + if t not in instance.data["families"]] + + # add to + [new_tags.append(t) for t in p_tags + if t not in new_tags] + + self.log.info("new_tags: `{}`".format(new_tags)) + + input_args = [] + output_args = [] + + # overrides output file + input_args.append("-y") + + # preset's input data + input_args.extend(profile.get('input', [])) + + # necessary input data + # adds start arg only if image sequence + + frame_start_handle = frame_start - handle_start + frame_end_handle = frame_end + handle_end + if isinstance(repre["files"], list): + if frame_start_handle != repre.get("detectedStart", frame_start_handle): + frame_start_handle = repre.get("detectedStart") + + # exclude handle if no handles defined + if no_handles: + frame_start_handle = frame_start + frame_end_handle = frame_end + + input_args.append( + "-start_number {0} -framerate {1}".format( + frame_start_handle, fps)) + else: + if no_handles: + start_sec = float(handle_start) / fps + input_args.append("-ss {:0.2f}".format(start_sec)) + frame_start_handle = frame_start + frame_end_handle = frame_end + + input_args.append("-i {}".format(full_input_path)) + + for audio in instance.data.get("audio", []): + offset_frames = ( + instance.data.get("frameStartFtrack") - + audio["offset"] + ) + offset_seconds = offset_frames / fps + + if offset_seconds > 0: + input_args.append("-ss") + else: + input_args.append("-itsoffset") + + input_args.append(str(abs(offset_seconds))) + + input_args.extend( + ["-i", audio["filename"]] + ) + + # Need to merge audio if there are more + # than 1 input. + if len(instance.data["audio"]) > 1: + input_args.extend( + [ + "-filter_complex", + "amerge", + "-ac", + "2" + ] + ) + + codec_args = profile.get('codec', []) + output_args.extend(codec_args) + # preset's output data + output_args.extend(profile.get('output', [])) + + # defining image ratios + resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height + delivery_ratio = float(self.to_width) / float(self.to_height) + self.log.debug( + "__ resolution_ratio: `{}`".format(resolution_ratio)) + self.log.debug( + "__ delivery_ratio: `{}`".format(delivery_ratio)) + + # get scale factor + scale_factor = float(self.to_height) / ( + resolution_height * pixel_aspect) + + # shorten two decimals long float number for testing conditions + resolution_ratio_test = float( + "{:0.2f}".format(resolution_ratio)) + delivery_ratio_test = float( + "{:0.2f}".format(delivery_ratio)) + + if resolution_ratio_test < delivery_ratio_test: + scale_factor = float(self.to_width) / ( + resolution_width * pixel_aspect) + + self.log.debug("__ scale_factor: `{}`".format(scale_factor)) + + # letter_box + lb = profile.get('letter_box', 0) + if lb != 0: + ffmpeg_width = self.to_width + ffmpeg_height = self.to_height + if "reformat" not in p_tags: + lb /= pixel_aspect + if resolution_ratio_test != delivery_ratio_test: + ffmpeg_width = resolution_width + ffmpeg_height = int( + resolution_height * pixel_aspect) + else: + if resolution_ratio_test != delivery_ratio_test: + lb /= scale_factor + else: + lb /= pixel_aspect + + output_args.append(str( + "-filter:v scale={0}x{1}:flags=lanczos," + "setsar=1,drawbox=0:0:iw:" + "round((ih-(iw*(1/{2})))/2):t=fill:" + "c=black,drawbox=0:ih-round((ih-(iw*(" + "1/{2})))/2):iw:round((ih-(iw*(1/{2})))" + "/2):t=fill:c=black").format( + ffmpeg_width, ffmpeg_height, lb)) + + # In case audio is longer than video. + output_args.append("-shortest") + + if no_handles: + duration_sec = float(frame_end_handle - frame_start_handle + 1) / fps + + output_args.append("-t {:0.2f}".format(duration_sec)) + + # output filename + output_args.append(full_output_path) + + self.log.debug( + "__ pixel_aspect: `{}`".format(pixel_aspect)) + self.log.debug( + "__ resolution_width: `{}`".format( + resolution_width)) + self.log.debug( + "__ resolution_height: `{}`".format( + resolution_height)) + + # scaling none square pixels and 1920 width + if "reformat" in p_tags: + if resolution_ratio_test < delivery_ratio_test: + self.log.debug("lower then delivery") + width_scale = int(self.to_width * scale_factor) + width_half_pad = int(( + self.to_width - width_scale)/2) + height_scale = self.to_height + height_half_pad = 0 + else: + self.log.debug("heigher then delivery") + width_scale = self.to_width + width_half_pad = 0 + scale_factor = float(self.to_width) / (float( + resolution_width) * pixel_aspect) + self.log.debug( + "__ scale_factor: `{}`".format( + scale_factor)) + height_scale = int( + resolution_height * scale_factor) + height_half_pad = int( + (self.to_height - height_scale)/2) + + self.log.debug( + "__ width_scale: `{}`".format(width_scale)) + self.log.debug( + "__ width_half_pad: `{}`".format( + width_half_pad)) + self.log.debug( + "__ height_scale: `{}`".format( + height_scale)) + self.log.debug( + "__ height_half_pad: `{}`".format( + height_half_pad)) + + scaling_arg = str( + "scale={0}x{1}:flags=lanczos," + "pad={2}:{3}:{4}:{5}:black,setsar=1" + ).format(width_scale, height_scale, + self.to_width, self.to_height, + width_half_pad, + height_half_pad + ) + + vf_back = self.add_video_filter_args( + output_args, scaling_arg) + # add it to output_args + output_args.insert(0, vf_back) + + # baking lut file application + lut_path = instance.data.get("lutPath") + if lut_path and ("bake-lut" in p_tags): + # removing Gama info as it is all baked in lut + gamma = next((g for g in input_args + if "-gamma" in g), None) + if gamma: + input_args.remove(gamma) + + # create lut argument + lut_arg = "lut3d=file='{}'".format( + lut_path.replace( + "\\", "/").replace(":/", "\\:/") + ) + lut_arg += ",colormatrix=bt601:bt709" + + vf_back = self.add_video_filter_args( + output_args, lut_arg) + # add it to output_args + output_args.insert(0, vf_back) + self.log.info("Added Lut to ffmpeg command") + self.log.debug( + "_ output_args: `{}`".format(output_args)) + + if is_sequence: + stg_dir = os.path.dirname(full_output_path) + + if not os.path.exists(stg_dir): + self.log.debug( + "creating dir: {}".format(stg_dir)) + os.mkdir(stg_dir) + + mov_args = [ + ffmpeg_path, + " ".join(input_args), + " ".join(output_args) + ] + subprcs_cmd = " ".join(mov_args) + + # run subprocess + self.log.debug("Executing: {}".format(subprcs_cmd)) + output = pype.api.subprocess(subprcs_cmd) + self.log.debug("Output: {}".format(output)) + + # create representation data + repre_new.update({ + 'name': name, + 'ext': ext, + 'files': repr_file, + "tags": new_tags, + "outputName": name, + "codec": codec_args, + "_profile": profile, + "resolutionHeight": resolution_height, + "resolutionWidth": resolution_width, + "frameStartFtrack": frame_start_handle, + "frameEndFtrack": frame_end_handle + }) + if is_sequence: + repre_new.update({ + "stagingDir": stg_dir, + "files": os.listdir(stg_dir) + }) + if no_handles: + repre_new.update({ + "outputName": name + "_noHandles", + "frameStartFtrack": frame_start, + "frameEndFtrack": frame_end + }) + if repre_new.get('preview'): + repre_new.pop("preview") + if repre_new.get('thumbnail'): + repre_new.pop("thumbnail") + + # adding representation + self.log.debug("Adding: {}".format(repre_new)) + representations_new.append(repre_new) + for repre in representations_new: if "delete" in repre.get("tags", []): representations_new.remove(repre) + instance.data.update({ + "reviewToWidth": self.to_width, + "reviewToHeight": self.to_height + }) + self.log.debug( "new representations: {}".format(representations_new)) instance.data["representations"] = representations_new diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 9a720b77a9..8c33a0d853 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -1,5 +1,6 @@ import os import pype.api +import pype.lib import pyblish @@ -21,26 +22,38 @@ class ExtractReviewSlate(pype.api.Extractor): suffix = "_slate" slate_path = inst_data.get("slateFrame") - ffmpeg_path = os.path.join(os.environ.get("FFMPEG_PATH", ""), "ffmpeg") + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + + # values are set in ExtractReview + to_width = inst_data["reviewToWidth"] + to_height = inst_data["reviewToHeight"] - to_width = 1920 - to_height = 1080 resolution_width = inst_data.get("resolutionWidth", to_width) resolution_height = inst_data.get("resolutionHeight", to_height) pixel_aspect = inst_data.get("pixelAspect", 1) fps = inst_data.get("fps") # defining image ratios - resolution_ratio = float(resolution_width / ( - resolution_height * pixel_aspect)) + resolution_ratio = (float(resolution_width) * pixel_aspect) / resolution_height delivery_ratio = float(to_width) / float(to_height) - self.log.debug(resolution_ratio) - self.log.debug(delivery_ratio) + self.log.debug("__ resolution_ratio: `{}`".format(resolution_ratio)) + self.log.debug("__ delivery_ratio: `{}`".format(delivery_ratio)) # get scale factor - scale_factor = to_height / ( + scale_factor = float(to_height) / ( resolution_height * pixel_aspect) - self.log.debug(scale_factor) + + # shorten two decimals long float number for testing conditions + resolution_ratio_test = float( + "{:0.2f}".format(resolution_ratio)) + delivery_ratio_test = float( + "{:0.2f}".format(delivery_ratio)) + + if resolution_ratio_test < delivery_ratio_test: + scale_factor = float(to_width) / ( + resolution_width * pixel_aspect) + + self.log.debug("__ scale_factor: `{}`".format(scale_factor)) for i, repre in enumerate(inst_data["representations"]): _remove_at_end = [] @@ -94,7 +107,7 @@ class ExtractReviewSlate(pype.api.Extractor): # scaling none square pixels and 1920 width if "reformat" in p_tags: - if resolution_ratio < delivery_ratio: + if resolution_ratio_test < delivery_ratio_test: self.log.debug("lower then delivery") width_scale = int(to_width * scale_factor) width_half_pad = int(( @@ -105,7 +118,8 @@ class ExtractReviewSlate(pype.api.Extractor): self.log.debug("heigher then delivery") width_scale = to_width width_half_pad = 0 - scale_factor = float(to_width) / float(resolution_width) + scale_factor = float(to_width) / (float( + resolution_width) * pixel_aspect) self.log.debug(scale_factor) height_scale = int( resolution_height * scale_factor) diff --git a/pype/plugins/global/publish/integrate.py b/pype/plugins/global/publish/integrate.py deleted file mode 100644 index e24bad362d..0000000000 --- a/pype/plugins/global/publish/integrate.py +++ /dev/null @@ -1,417 +0,0 @@ -import os -import logging -import shutil - -import errno -import pyblish.api -from avalon import api, io -from avalon.vendor import filelink - - -log = logging.getLogger(__name__) - - -class IntegrateAsset(pyblish.api.InstancePlugin): - """Resolve any dependency issies - - This plug-in resolves any paths which, if not updated might break - the published file. - - The order of families is important, when working with lookdev you want to - first publish the texture, update the texture paths in the nodes and then - publish the shading network. Same goes for file dependent assets. - """ - - label = "Integrate Asset" - order = pyblish.api.IntegratorOrder - families = ["assembly"] - exclude_families = ["clip"] - - def process(self, instance): - if [ef for ef in self.exclude_families - if instance.data["family"] in ef]: - return - - self.register(instance) - - self.log.info("Integrating Asset in to the database ...") - if instance.data.get('transfer', True): - self.integrate(instance) - - def register(self, instance): - # Required environment variables - PROJECT = api.Session["AVALON_PROJECT"] - ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"] - LOCATION = api.Session["AVALON_LOCATION"] - - context = instance.context - # Atomicity - # - # Guarantee atomic publishes - each asset contains - # an identical set of members. - # __ - # / o - # / \ - # | o | - # \ / - # o __/ - # - assert all(result["success"] for result in context.data["results"]), ( - "Atomicity not held, aborting.") - - # Assemble - # - # | - # v - # ---> <---- - # ^ - # | - # - stagingdir = instance.data.get("stagingDir") - assert stagingdir, ("Incomplete instance \"%s\": " - "Missing reference to staging area." % instance) - - # extra check if stagingDir actually exists and is available - - self.log.debug("Establishing staging directory @ %s" % stagingdir) - - # Ensure at least one file is set up for transfer in staging dir. - files = instance.data.get("files", []) - assert files, "Instance has no files to transfer" - assert isinstance(files, (list, tuple)), ( - "Instance 'files' must be a list, got: {0}".format(files) - ) - - project = io.find_one({"type": "project"}) - - asset = io.find_one({ - "type": "asset", - "name": ASSET, - "parent": project["_id"] - }) - - assert all([project, asset]), ("Could not find current project or " - "asset '%s'" % ASSET) - - subset = self.get_subset(asset, instance) - - # get next version - latest_version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - {"name": True}, - sort=[("name", -1)] - ) - - next_version = 1 - if latest_version is not None: - next_version += latest_version["name"] - - self.log.info("Verifying version from assumed destination") - - assumed_data = instance.data["assumedTemplateData"] - assumed_version = assumed_data["version"] - if assumed_version != next_version: - raise AttributeError("Assumed version 'v{0:03d}' does not match" - "next version in database " - "('v{1:03d}')".format(assumed_version, - next_version)) - - self.log.debug("Next version: v{0:03d}".format(next_version)) - - version_data = self.create_version_data(context, instance) - version = self.create_version(subset=subset, - version_number=next_version, - locations=[LOCATION], - data=version_data) - - self.log.debug("Creating version ...") - version_id = io.insert_one(version).inserted_id - - # Write to disk - # _ - # | | - # _| |_ - # ____\ / - # |\ \ / \ - # \ \ v \ - # \ \________. - # \|________| - # - root = api.registered_root() - hierarchy = "" - parents = io.find_one({ - "type": 'asset', - "name": ASSET - })['data']['parents'] - if parents and len(parents) > 0: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = os.path.join(*parents) - - template_data = {"root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - "silo": asset['silo'], - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": int(version["name"]), - "hierarchy": hierarchy} - - # template_publish = project["config"]["template"]["publish"] - anatomy = instance.context.data['anatomy'] - - # Find the representations to transfer amongst the files - # Each should be a single representation (as such, a single extension) - representations = [] - destination_list = [] - if 'transfers' not in instance.data: - instance.data['transfers'] = [] - - for files in instance.data["files"]: - - # Collection - # _______ - # |______|\ - # | |\| - # | || - # | || - # | || - # |_______| - # - - if isinstance(files, list): - collection = files - # Assert that each member has identical suffix - _, ext = os.path.splitext(collection[0]) - assert all(ext == os.path.splitext(name)[1] - for name in collection), ( - "Files had varying suffixes, this is a bug" - ) - - assert not any(os.path.isabs(name) for name in collection) - - template_data["representation"] = ext[1:] - - for fname in collection: - - src = os.path.join(stagingdir, fname) - anatomy_filled = anatomy.format(template_data) - dst = anatomy_filled["publish"]["path"] - - instance.data["transfers"].append([src, dst]) - template = anatomy.templates["publish"]["path"] - - else: - # Single file - # _______ - # | |\ - # | | - # | | - # | | - # |_______| - # - fname = files - assert not os.path.isabs(fname), ( - "Given file name is a full path" - ) - _, ext = os.path.splitext(fname) - - template_data["representation"] = ext[1:] - - src = os.path.join(stagingdir, fname) - anatomy_filled = anatomy.format(template_data) - dst = anatomy_filled["publish"]["path"] - - instance.data["transfers"].append([src, dst]) - template = anatomy.templates["publish"]["path"] - - representation = { - "schema": "pype:representation-2.0", - "type": "representation", - "parent": version_id, - "name": ext[1:], - "data": {'path': dst, 'template': template}, - "dependencies": instance.data.get("dependencies", "").split(), - - # Imprint shortcut to context - # for performance reasons. - "context": { - "root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - 'task': api.Session["AVALON_TASK"], - "silo": asset['silo'], - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": version["name"], - "hierarchy": hierarchy, - "representation": ext[1:] - } - } - - destination_list.append(dst) - instance.data['destination_list'] = destination_list - representations.append(representation) - - self.log.info("Registering {} items".format(len(representations))) - - io.insert_many(representations) - - def integrate(self, instance): - """Move the files - - Through `instance.data["transfers"]` - - Args: - instance: the instance to integrate - """ - - transfers = instance.data.get("transfers", list()) - - for src, dest in transfers: - self.log.info("Copying file .. {} -> {}".format(src, dest)) - self.copy_file(src, dest) - - # Produce hardlinked copies - # Note: hardlink can only be produced between two files on the same - # server/disk and editing one of the two will edit both files at once. - # As such it is recommended to only make hardlinks between static files - # to ensure publishes remain safe and non-edited. - hardlinks = instance.data.get("hardlinks", list()) - for src, dest in hardlinks: - self.log.info("Hardlinking file .. {} -> {}".format(src, dest)) - self.hardlink_file(src, dest) - - def copy_file(self, src, dst): - """ Copy given source to destination - - Arguments: - src (str): the source file which needs to be copied - dst (str): the destination of the sourc file - Returns: - None - """ - - dirname = os.path.dirname(dst) - try: - os.makedirs(dirname) - except OSError as e: - if e.errno == errno.EEXIST: - pass - else: - self.log.critical("An unexpected error occurred.") - raise - - shutil.copy(src, dst) - - def hardlink_file(self, src, dst): - - dirname = os.path.dirname(dst) - try: - os.makedirs(dirname) - except OSError as e: - if e.errno == errno.EEXIST: - pass - else: - self.log.critical("An unexpected error occurred.") - raise - - filelink.create(src, dst, filelink.HARDLINK) - - def get_subset(self, asset, instance): - - subset = io.find_one({ - "type": "subset", - "parent": asset["_id"], - "name": instance.data["subset"] - }) - - if subset is None: - subset_name = instance.data["subset"] - self.log.info("Subset '%s' not found, creating.." % subset_name) - - _id = io.insert_one({ - "schema": "avalon-core:subset-2.0", - "type": "subset", - "name": subset_name, - "data": {}, - "parent": asset["_id"] - }).inserted_id - - subset = io.find_one({"_id": _id}) - - return subset - - def create_version(self, subset, version_number, locations, data=None): - """ Copy given source to destination - - Args: - subset (dict): the registered subset of the asset - version_number (int): the version number - locations (list): the currently registered locations - - Returns: - dict: collection of data to create a version - """ - # Imprint currently registered location - version_locations = [location for location in locations if - location is not None] - - return {"schema": "avalon-core:version-2.0", - "type": "version", - "parent": subset["_id"], - "name": version_number, - "locations": version_locations, - "data": data} - - def create_version_data(self, context, instance): - """Create the data collection for the version - - Args: - context: the current context - instance: the current instance being published - - Returns: - dict: the required information with instance.data as key - """ - - families = [] - current_families = instance.data.get("families", list()) - instance_family = instance.data.get("family", None) - - if instance_family is not None: - families.append(instance_family) - families += current_families - - self.log.debug("Registered root: {}".format(api.registered_root())) - # create relative source path for DB - try: - source = instance.data['source'] - except KeyError: - source = context.data["currentFile"] - - relative_path = os.path.relpath(source, api.registered_root()) - source = os.path.join("{root}", relative_path).replace("\\", "/") - - self.log.debug("Source: {}".format(source)) - version_data = {"families": families, - "time": context.data["time"], - "author": context.data["user"], - "source": source, - "comment": context.data.get("comment"), - "machine": context.data.get("machine"), - "fps": context.data.get("fps")} - - # Include optional data if present in - optionals = [ - "frameStart", "frameEnd", "step", "handles", "sourceHashes" - ] - for key in optionals: - if key in instance.data: - version_data[key] = instance.data[key] - - return version_data diff --git a/pype/plugins/global/publish/integrate_assumed_destination.py b/pype/plugins/global/publish/integrate_assumed_destination.py deleted file mode 100644 index d090e2711a..0000000000 --- a/pype/plugins/global/publish/integrate_assumed_destination.py +++ /dev/null @@ -1,147 +0,0 @@ -import pyblish.api -import os - -from avalon import io, api - - -class IntegrateAssumedDestination(pyblish.api.InstancePlugin): - """Generate the assumed destination path where the file will be stored""" - - label = "Integrate Assumed Destination" - order = pyblish.api.IntegratorOrder - 0.05 - families = ["clip", "projectfile", "plate"] - - def process(self, instance): - - anatomy = instance.context.data['anatomy'] - - self.create_destination_template(instance, anatomy) - - template_data = instance.data["assumedTemplateData"] - # self.log.info(anatomy.templates) - anatomy_filled = anatomy.format(template_data) - - # self.log.info(anatomy_filled) - mock_template = anatomy_filled["publish"]["path"] - - # For now assume resources end up in a "resources" folder in the - # published folder - mock_destination = os.path.join(os.path.dirname(mock_template), - "resources") - - # Clean the path - mock_destination = os.path.abspath( - os.path.normpath(mock_destination)).replace("\\", "/") - - # Define resource destination and transfers - resources = instance.data.get("resources", list()) - transfers = instance.data.get("transfers", list()) - for resource in resources: - - # Add destination to the resource - source_filename = os.path.basename( - resource["source"]).replace("\\", "/") - destination = os.path.join(mock_destination, source_filename) - - # Force forward slashes to fix issue with software unable - # to work correctly with backslashes in specific scenarios - # (e.g. escape characters in PLN-151 V-Ray UDIM) - destination = destination.replace("\\", "/") - - resource['destination'] = destination - - # Collect transfers for the individual files of the resource - # e.g. all individual files of a cache or UDIM textures. - files = resource['files'] - for fsrc in files: - fname = os.path.basename(fsrc) - fdest = os.path.join( - mock_destination, fname).replace("\\", "/") - transfers.append([fsrc, fdest]) - - instance.data["resources"] = resources - instance.data["transfers"] = transfers - - def create_destination_template(self, instance, anatomy): - """Create a filepath based on the current data available - - Example template: - {root}/{project}/{asset}/publish/{subset}/v{version:0>3}/ - {subset}.{representation} - Args: - instance: the instance to publish - - Returns: - file path (str) - """ - - # get all the stuff from the database - subset_name = instance.data["subset"] - self.log.info(subset_name) - asset_name = instance.data["asset"] - project_name = api.Session["AVALON_PROJECT"] - a_template = anatomy.templates - - project = io.find_one( - {"type": "project", "name": project_name}, - projection={"config": True, "data": True} - ) - - template = a_template['publish']['path'] - # anatomy = instance.context.data['anatomy'] - - asset = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project["_id"] - }) - - assert asset, ("No asset found by the name '{}' " - "in project '{}'".format(asset_name, project_name)) - - subset = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset["_id"] - }) - - # assume there is no version yet, we start at `1` - version = None - version_number = 1 - if subset is not None: - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) - - # if there is a subset there ought to be version - if version is not None: - version_number += version["name"] - - if instance.data.get('version'): - version_number = int(instance.data.get('version')) - - padding = int(a_template['render']['padding']) - - hierarchy = asset['data']['parents'] - if hierarchy: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = "/".join(hierarchy) - - template_data = {"root": api.Session["AVALON_PROJECTS"], - "project": {"name": project_name, - "code": project['data']['code']}, - "family": instance.data['family'], - "asset": asset_name, - "subset": subset_name, - "frame": ('#' * padding), - "version": version_number, - "hierarchy": hierarchy, - "representation": "TEMP"} - - instance.data["assumedTemplateData"] = template_data - self.log.info(template_data) - instance.data["template"] = template diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index e577c477c3..aa214f36cb 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -2,8 +2,11 @@ import os from os.path import getsize import logging import sys +import copy import clique import errno + +from pymongo import DeleteOne, InsertOne import pyblish.api from avalon import api, io from avalon.vendor import filelink @@ -76,8 +79,15 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "source", "matchmove", "image" + "source", + "assembly", + "textures" ] exclude_families = ["clip"] + db_representation_context_keys = [ + "project", "asset", "task", "subset", "version", "representation", + "family", "hierarchy", "task", "username" + ] def process(self, instance): @@ -94,144 +104,148 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): def register(self, instance): # Required environment variables - PROJECT = api.Session["AVALON_PROJECT"] - ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"] - TASK = instance.data.get("task") or api.Session["AVALON_TASK"] - LOCATION = api.Session["AVALON_LOCATION"] + anatomy_data = instance.data["anatomyData"] + + io.install() context = instance.context - # Atomicity - # - # Guarantee atomic publishes - each asset contains - # an identical set of members. - # __ - # / o - # / \ - # | o | - # \ / - # o __/ - # - # for result in context.data["results"]: - # if not result["success"]: - # self.log.debug(result) - # exc_type, exc_value, exc_traceback = result["error_info"] - # extracted_traceback = traceback.extract_tb(exc_traceback)[-1] - # self.log.debug( - # "Error at line {}: \"{}\"".format( - # extracted_traceback[1], result["error"] - # ) - # ) - # assert all(result["success"] for result in context.data["results"]),( - # "Atomicity not held, aborting.") - # Assemble - # - # | - # v - # ---> <---- - # ^ - # | - # + project_entity = instance.data["projectEntity"] + + context_asset_name = context.data["assetEntity"]["name"] + + asset_name = instance.data["asset"] + asset_entity = instance.data.get("assetEntity") + if not asset_entity or asset_entity["name"] != context_asset_name: + asset_entity = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project_entity["_id"] + }) + assert asset_entity, ( + "No asset found by the name \"{0}\" in project \"{1}\"" + ).format(asset_name, project_entity["name"]) + + instance.data["assetEntity"] = asset_entity + + # update anatomy data with asset specific keys + # - name should already been set + hierarchy = "" + parents = asset_entity["data"]["parents"] + if parents: + hierarchy = "/".join(parents) + anatomy_data["hierarchy"] = hierarchy + + task_name = instance.data.get("task") + if task_name: + anatomy_data["task"] = task_name + stagingdir = instance.data.get("stagingDir") if not stagingdir: - self.log.info('''{} is missing reference to staging - directory Will try to get it from - representation'''.format(instance)) + self.log.info(( + "{0} is missing reference to staging directory." + " Will try to get it from representation." + ).format(instance)) - # extra check if stagingDir actually exists and is available - - self.log.debug("Establishing staging directory @ %s" % stagingdir) + else: + self.log.debug( + "Establishing staging directory @ {0}".format(stagingdir) + ) # Ensure at least one file is set up for transfer in staging dir. - repres = instance.data.get("representations", None) + repres = instance.data.get("representations") assert repres, "Instance has no files to transfer" assert isinstance(repres, (list, tuple)), ( - "Instance 'files' must be a list, got: {0}".format(repres) + "Instance 'files' must be a list, got: {0} {1}".format( + str(type(repres)), str(repres) + ) ) - # FIXME: io is not initialized at this point for shell host - io.install() - project = io.find_one({"type": "project"}) + subset = self.get_subset(asset_entity, instance) - asset = io.find_one({ - "type": "asset", - "name": ASSET, - "parent": project["_id"] - }) - - assert all([project, asset]), ("Could not find current project or " - "asset '%s'" % ASSET) - - subset = self.get_subset(asset, instance) - - # get next version - latest_version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - {"name": True}, - sort=[("name", -1)] - ) - - next_version = 1 - if latest_version is not None: - next_version += latest_version["name"] - - if instance.data.get('version'): - next_version = int(instance.data.get('version')) - - self.log.debug("Next version: v{0:03d}".format(next_version)) + version_number = instance.data["version"] + self.log.debug("Next version: v{}".format(version_number)) version_data = self.create_version_data(context, instance) version_data_instance = instance.data.get('versionData') - if version_data_instance: version_data.update(version_data_instance) - version = self.create_version(subset=subset, - version_number=next_version, - locations=[LOCATION], - data=version_data) + # TODO rename method from `create_version` to + # `prepare_version` or similar... + version = self.create_version( + subset=subset, + version_number=version_number, + data=version_data + ) self.log.debug("Creating version ...") + + new_repre_names_low = [_repre["name"].lower() for _repre in repres] + existing_version = io.find_one({ 'type': 'version', 'parent': subset["_id"], - 'name': next_version + 'name': version_number }) + if existing_version is None: version_id = io.insert_one(version).inserted_id else: + # Check if instance have set `append` mode which cause that + # only replicated representations are set to archive + append_repres = instance.data.get("append", False) + + # Update version data + # TODO query by _id and io.update_many({ 'type': 'version', 'parent': subset["_id"], - 'name': next_version - }, {'$set': version} - ) + 'name': version_number + }, { + '$set': version + }) version_id = existing_version['_id'] + + # Find representations of existing version and archive them + current_repres = list(io.find({ + "type": "representation", + "parent": version_id + })) + bulk_writes = [] + for repre in current_repres: + if append_repres: + # archive only duplicated representations + if repre["name"].lower() not in new_repre_names_low: + continue + # Representation must change type, + # `_id` must be stored to other key and replaced with new + # - that is because new representations should have same ID + repre_id = repre["_id"] + bulk_writes.append(DeleteOne({"_id": repre_id})) + + repre["orig_id"] = repre_id + repre["_id"] = io.ObjectId() + repre["type"] = "archived_representation" + bulk_writes.append(InsertOne(repre)) + + # bulk updates + if bulk_writes: + io._database[io.Session["AVALON_PROJECT"]].bulk_write( + bulk_writes + ) + + existing_repres = list(io.find({ + "parent": version_id, + "type": "archived_representation" + })) + instance.data['version'] = version['name'] - # Write to disk - # _ - # | | - # _| |_ - # ____\ / - # |\ \ / \ - # \ \ v \ - # \ \________. - # \|________| - # - root = api.registered_root() - hierarchy = "" - parents = io.find_one({ - "type": 'asset', - "name": ASSET - })['data']['parents'] - if parents and len(parents) > 0: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = os.path.join(*parents) + intent = context.data.get("intent") + if intent is not None: + anatomy_data["intent"] = intent anatomy = instance.context.data['anatomy'] @@ -244,27 +258,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): instance.data['transfers'] = [] for idx, repre in enumerate(instance.data["representations"]): - - # Collection - # _______ - # |______|\ - # | |\| - # | || - # | || - # | || - # |_______| - # # create template data for Anatomy - template_data = {"root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - "silo": asset.get('silo'), - "task": TASK, - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": int(version["name"]), - "hierarchy": hierarchy} + template_data = copy.deepcopy(anatomy_data) + if intent is not None: + template_data["intent"] = intent resolution_width = repre.get("resolutionWidth") resolution_height = repre.get("resolutionHeight") @@ -282,11 +279,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): stagingdir = repre['stagingDir'] if repre.get('anatomy_template'): template_name = repre['anatomy_template'] + if repre.get("outputName"): + template_data["output"] = repre['outputName'] + template = os.path.normpath( anatomy.templates[template_name]["path"]) sequence_repre = isinstance(files, list) - + repre_context = None if sequence_repre: src_collections, remainder = clique.assemble(files) self.log.debug( @@ -309,10 +309,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): template_data["representation"] = repre['ext'] template_data["frame"] = src_padding_exp % i anatomy_filled = anatomy.format(template_data) - + template_filled = anatomy_filled[template_name]["path"] + if repre_context is None: + repre_context = template_filled.used_values test_dest_files.append( - os.path.normpath( - anatomy_filled[template_name]["path"]) + os.path.normpath(template_filled) ) self.log.debug( @@ -326,23 +327,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): index_frame_start = None if repre.get("frameStart"): - frame_start_padding = len(str( - repre.get("frameEnd"))) + frame_start_padding = ( + anatomy.templates["render"]["padding"] + ) index_frame_start = int(repre.get("frameStart")) # exception for slate workflow - if "slate" in instance.data["families"]: + if index_frame_start and "slate" in instance.data["families"]: index_frame_start -= 1 dst_padding_exp = src_padding_exp dst_start_frame = None for i in src_collection.indexes: + # TODO 1.) do not count padding in each index iteration + # 2.) do not count dst_padding from src_padding before + # index_frame_start check src_padding = src_padding_exp % i - # for adding first frame into db - if not dst_start_frame: - dst_start_frame = src_padding - src_file_name = "{0}{1}{2}".format( src_head, src_padding, src_tail) @@ -364,6 +365,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("source: {}".format(src)) instance.data["transfers"].append([src, dst]) + # for adding first frame into db + if not dst_start_frame: + dst_start_frame = dst_padding + dst = "{0}{1}{2}".format( dst_head, dst_start_frame, @@ -387,20 +392,38 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): template_data["representation"] = repre['ext'] - if repre.get("outputName"): - template_data["output"] = repre['outputName'] - src = os.path.join(stagingdir, fname) anatomy_filled = anatomy.format(template_data) - dst = os.path.normpath( - anatomy_filled[template_name]["path"]).replace("..", ".") + template_filled = anatomy_filled[template_name]["path"] + repre_context = template_filled.used_values + dst = os.path.normpath(template_filled).replace("..", ".") instance.data["transfers"].append([src, dst]) repre['published_path'] = self.unc_convert(dst) self.log.debug("__ dst: {}".format(dst)) + for key in self.db_representation_context_keys: + value = template_data.get(key) + if not value: + continue + repre_context[key] = template_data[key] + + # Use previous representation's id if there are any + repre_id = None + repre_name_low = repre["name"].lower() + for _repre in existing_repres: + # NOTE should we check lowered names? + if repre_name_low == _repre["name"]: + repre_id = _repre["orig_id"] + break + + # Create new id if existing representations does not match + if repre_id is None: + repre_id = io.ObjectId() + representation = { + "_id": repre_id, "schema": "pype:representation-2.0", "type": "representation", "parent": version_id, @@ -410,26 +433,16 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Imprint shortcut to context # for performance reasons. - "context": { - "root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - 'task': TASK, - "silo": asset.get('silo'), - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": version["name"], - "hierarchy": hierarchy, - "representation": repre['ext'] - } + "context": repre_context } if repre.get("outputName"): representation["context"]["output"] = repre['outputName'] if sequence_repre and repre.get("frameStart"): - representation['context']['frame'] = src_padding_exp % int(repre.get("frameStart")) + representation['context']['frame'] = ( + dst_padding_exp % int(repre.get("frameStart")) + ) self.log.debug("__ representation: {}".format(representation)) destination_list.append(dst) @@ -438,11 +451,19 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): representations.append(representation) self.log.debug("__ representations: {}".format(representations)) + # Remove old representations if there are any (before insertion of new) + if existing_repres: + repre_ids_to_remove = [] + for repre in existing_repres: + repre_ids_to_remove.append(repre["_id"]) + io.delete_many({"_id": {"$in": repre_ids_to_remove}}) + self.log.debug("__ representations: {}".format(representations)) for rep in instance.data["representations"]: self.log.debug("__ represNAME: {}".format(rep['name'])) self.log.debug("__ represPATH: {}".format(rep['published_path'])) io.insert_many(representations) + instance.data["published_representations"] = representations # self.log.debug("Representation: {}".format(representations)) self.log.info("Registered {} items".format(len(representations))) @@ -502,7 +523,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): """ src = self.unc_convert(src) dst = self.unc_convert(dst) - + src = os.path.normpath(src) + dst = os.path.normpath(dst) self.log.debug("Copying file .. {} -> {}".format(src, dst)) dirname = os.path.dirname(dst) try: @@ -538,14 +560,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): filelink.create(src, dst, filelink.HARDLINK) def get_subset(self, asset, instance): + subset_name = instance.data["subset"] subset = io.find_one({ "type": "subset", "parent": asset["_id"], - "name": instance.data["subset"] + "name": subset_name }) if subset is None: - subset_name = instance.data["subset"] self.log.info("Subset '%s' not found, creating.." % subset_name) self.log.debug("families. %s" % instance.data.get('families')) self.log.debug( @@ -574,26 +596,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): return subset - def create_version(self, subset, version_number, locations, data=None): + def create_version(self, subset, version_number, data=None): """ Copy given source to destination Args: subset (dict): the registered subset of the asset version_number (int): the version number - locations (list): the currently registered locations Returns: dict: collection of data to create a version """ - # Imprint currently registered location - version_locations = [location for location in locations if - location is not None] return {"schema": "pype:version-3.0", "type": "version", "parent": subset["_id"], "name": version_number, - "locations": version_locations, "data": data} def create_version_data(self, context, instance): @@ -636,6 +653,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "fps": context.data.get( "fps", instance.data.get("fps"))} + intent = context.data.get("intent") + if intent is not None: + version_data["intent"] = intent + # Include optional data if present in optionals = [ "frameStart", "frameEnd", "step", "handles", diff --git a/pype/plugins/global/publish/integrate_rendered_frames.py b/pype/plugins/global/publish/integrate_rendered_frames.py deleted file mode 100644 index 5819051146..0000000000 --- a/pype/plugins/global/publish/integrate_rendered_frames.py +++ /dev/null @@ -1,423 +0,0 @@ -import os -import logging -import shutil -import clique - -import errno -import pyblish.api -from avalon import api, io - - -log = logging.getLogger(__name__) - - -class IntegrateFrames(pyblish.api.InstancePlugin): - """Resolve any dependency issies - - This plug-in resolves any paths which, if not updated might break - the published file. - - The order of families is important, when working with lookdev you want to - first publish the texture, update the texture paths in the nodes and then - publish the shading network. Same goes for file dependent assets. - """ - - label = "Integrate Frames" - order = pyblish.api.IntegratorOrder - families = ["imagesequence"] - - family_targets = [".frames", ".local", ".review", "imagesequence", "render", "source"] - exclude_families = ["clip"] - - def process(self, instance): - if [ef for ef in self.exclude_families - if instance.data["family"] in ef]: - return - - families = [f for f in instance.data["families"] - for search in self.family_targets - if search in f] - - if not families: - return - - self.register(instance) - - # self.log.info("Integrating Asset in to the database ...") - # self.log.info("instance.data: {}".format(instance.data)) - if instance.data.get('transfer', True): - self.integrate(instance) - - def register(self, instance): - - # Required environment variables - PROJECT = api.Session["AVALON_PROJECT"] - ASSET = instance.data.get("asset") or api.Session["AVALON_ASSET"] - LOCATION = api.Session["AVALON_LOCATION"] - - context = instance.context - # Atomicity - # - # Guarantee atomic publishes - each asset contains - # an identical set of members. - # __ - # / o - # / \ - # | o | - # \ / - # o __/ - # - assert all(result["success"] for result in context.data["results"]), ( - "Atomicity not held, aborting.") - - # Assemble - # - # | - # v - # ---> <---- - # ^ - # | - # - stagingdir = instance.data.get("stagingDir") - assert stagingdir, ("Incomplete instance \"%s\": " - "Missing reference to staging area." % instance) - - # extra check if stagingDir actually exists and is available - - self.log.debug("Establishing staging directory @ %s" % stagingdir) - - project = io.find_one({"type": "project"}) - - asset = io.find_one({ - "type": "asset", - "name": ASSET, - "parent": project["_id"] - }) - - assert all([project, asset]), ("Could not find current project or " - "asset '%s'" % ASSET) - - subset = self.get_subset(asset, instance) - - # get next version - latest_version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - {"name": True}, - sort=[("name", -1)] - ) - - next_version = 1 - if latest_version is not None: - next_version += latest_version["name"] - - self.log.info("Verifying version from assumed destination") - - assumed_data = instance.data["assumedTemplateData"] - assumed_version = assumed_data["version"] - if assumed_version != next_version: - raise AttributeError("Assumed version 'v{0:03d}' does not match" - "next version in database " - "('v{1:03d}')".format(assumed_version, - next_version)) - - if instance.data.get('version'): - next_version = int(instance.data.get('version')) - - self.log.debug("Next version: v{0:03d}".format(next_version)) - - version_data = self.create_version_data(context, instance) - version = self.create_version(subset=subset, - version_number=next_version, - locations=[LOCATION], - data=version_data) - - self.log.debug("Creating version ...") - version_id = io.insert_one(version).inserted_id - - # Write to disk - # _ - # | | - # _| |_ - # ____\ / - # |\ \ / \ - # \ \ v \ - # \ \________. - # \|________| - # - root = api.registered_root() - hierarchy = "" - parents = io.find_one({"type": 'asset', "name": ASSET})[ - 'data']['parents'] - if parents and len(parents) > 0: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = os.path.join(*parents) - - template_data = {"root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - "silo": asset.get('silo'), - "task": api.Session["AVALON_TASK"], - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": int(version["name"]), - "hierarchy": hierarchy} - - # template_publish = project["config"]["template"]["publish"] - anatomy = instance.context.data['anatomy'] - - # Find the representations to transfer amongst the files - # Each should be a single representation (as such, a single extension) - representations = [] - destination_list = [] - - if 'transfers' not in instance.data: - instance.data['transfers'] = [] - - for files in instance.data["files"]: - # Collection - # _______ - # |______|\ - # | |\| - # | || - # | || - # | || - # |_______| - # - if isinstance(files, list): - - src_collections, remainder = clique.assemble(files) - src_collection = src_collections[0] - # Assert that each member has identical suffix - src_head = src_collection.format("{head}") - src_tail = ext = src_collection.format("{tail}") - - test_dest_files = list() - for i in [1, 2]: - template_data["representation"] = src_tail[1:] - template_data["frame"] = src_collection.format( - "{padding}") % i - anatomy_filled = anatomy.format(template_data) - test_dest_files.append(anatomy_filled["render"]["path"]) - - dst_collections, remainder = clique.assemble(test_dest_files) - dst_collection = dst_collections[0] - dst_head = dst_collection.format("{head}") - dst_tail = dst_collection.format("{tail}") - - for i in src_collection.indexes: - src_padding = src_collection.format("{padding}") % i - src_file_name = "{0}{1}{2}".format( - src_head, src_padding, src_tail) - dst_padding = dst_collection.format("{padding}") % i - dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail) - - src = os.path.join(stagingdir, src_file_name) - instance.data["transfers"].append([src, dst]) - - else: - # Single file - # _______ - # | |\ - # | | - # | | - # | | - # |_______| - # - - template_data.pop("frame", None) - - fname = files - - self.log.info("fname: {}".format(fname)) - - assert not os.path.isabs(fname), ( - "Given file name is a full path" - ) - _, ext = os.path.splitext(fname) - - template_data["representation"] = ext[1:] - - src = os.path.join(stagingdir, fname) - - anatomy_filled = anatomy.format(template_data) - dst = anatomy_filled["render"]["path"] - - instance.data["transfers"].append([src, dst]) - - if ext[1:] not in ["jpeg", "jpg", "mov", "mp4", "wav"]: - template_data["frame"] = "#" * int(anatomy_filled["render"]["padding"]) - - anatomy_filled = anatomy.format(template_data) - path_to_save = anatomy_filled["render"]["path"] - template = anatomy.templates["render"]["path"] - - self.log.debug("path_to_save: {}".format(path_to_save)) - - representation = { - "schema": "pype:representation-2.0", - "type": "representation", - "parent": version_id, - "name": ext[1:], - "data": {'path': path_to_save, 'template': template}, - "dependencies": instance.data.get("dependencies", "").split(), - - # Imprint shortcut to context - # for performance reasons. - "context": { - "root": root, - "project": { - "name": PROJECT, - "code": project['data']['code'] - }, - "task": api.Session["AVALON_TASK"], - "silo": asset['silo'], - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": int(version["name"]), - "hierarchy": hierarchy, - "representation": ext[1:] - } - } - - destination_list.append(dst) - instance.data['destination_list'] = destination_list - representations.append(representation) - - self.log.info("Registering {} items".format(len(representations))) - io.insert_many(representations) - - def integrate(self, instance): - """Move the files - - Through `instance.data["transfers"]` - - Args: - instance: the instance to integrate - """ - - transfers = instance.data["transfers"] - - for src, dest in transfers: - src = os.path.normpath(src) - dest = os.path.normpath(dest) - if src in dest: - continue - - self.log.info("Copying file .. {} -> {}".format(src, dest)) - self.copy_file(src, dest) - - def copy_file(self, src, dst): - """ Copy given source to destination - - Arguments: - src (str): the source file which needs to be copied - dst (str): the destination of the sourc file - Returns: - None - """ - - dirname = os.path.dirname(dst) - try: - os.makedirs(dirname) - except OSError as e: - if e.errno == errno.EEXIST: - pass - else: - self.log.critical("An unexpected error occurred.") - raise - - shutil.copy(src, dst) - - def get_subset(self, asset, instance): - - subset = io.find_one({ - "type": "subset", - "parent": asset["_id"], - "name": instance.data["subset"] - }) - - if subset is None: - subset_name = instance.data["subset"] - self.log.info("Subset '%s' not found, creating.." % subset_name) - - _id = io.insert_one({ - "schema": "pype:subset-2.0", - "type": "subset", - "name": subset_name, - "data": {}, - "parent": asset["_id"] - }).inserted_id - - subset = io.find_one({"_id": _id}) - - return subset - - def create_version(self, subset, version_number, locations, data=None): - """ Copy given source to destination - - Args: - subset (dict): the registered subset of the asset - version_number (int): the version number - locations (list): the currently registered locations - - Returns: - dict: collection of data to create a version - """ - # Imprint currently registered location - version_locations = [location for location in locations if - location is not None] - - return {"schema": "pype:version-2.0", - "type": "version", - "parent": subset["_id"], - "name": version_number, - "locations": version_locations, - "data": data} - - def create_version_data(self, context, instance): - """Create the data collection for the version - - Args: - context: the current context - instance: the current instance being published - - Returns: - dict: the required information with instance.data as key - """ - - families = [] - current_families = instance.data.get("families", list()) - instance_family = instance.data.get("family", None) - - if instance_family is not None: - families.append(instance_family) - families += current_families - - try: - source = instance.data['source'] - except KeyError: - source = context.data["currentFile"] - - relative_path = os.path.relpath(source, api.registered_root()) - source = os.path.join("{root}", relative_path).replace("\\", "/") - - version_data = {"families": families, - "time": context.data["time"], - "author": context.data["user"], - "source": source, - "comment": context.data.get("comment")} - - # Include optional data if present in - optionals = ["frameStart", "frameEnd", "step", - "handles", "colorspace", "fps", "outputDir"] - - for key in optionals: - if key in instance.data: - version_data[key] = instance.data.get(key, None) - - return version_data diff --git a/pype/plugins/global/publish/integrate_resources_path.py b/pype/plugins/global/publish/integrate_resources_path.py new file mode 100644 index 0000000000..56dc0e5ef7 --- /dev/null +++ b/pype/plugins/global/publish/integrate_resources_path.py @@ -0,0 +1,49 @@ +import os +import pyblish.api + + +class IntegrateResourcesPath(pyblish.api.InstancePlugin): + """Generate directory path where the files and resources will be stored""" + + label = "Integrate Resources Path" + order = pyblish.api.IntegratorOrder - 0.05 + families = ["clip", "projectfile", "plate"] + + def process(self, instance): + resources = instance.data.get("resources") or [] + transfers = instance.data.get("transfers") or [] + + if not resources and not transfers: + self.log.debug( + "Instance does not have `resources` and `transfers`" + ) + return + + resources_folder = instance.data["resourcesDir"] + + # Define resource destination and transfers + for resource in resources: + # Add destination to the resource + source_filename = os.path.basename( + resource["source"]).replace("\\", "/") + destination = os.path.join(resources_folder, source_filename) + + # Force forward slashes to fix issue with software unable + # to work correctly with backslashes in specific scenarios + # (e.g. escape characters in PLN-151 V-Ray UDIM) + destination = destination.replace("\\", "/") + + resource['destination'] = destination + + # Collect transfers for the individual files of the resource + # e.g. all individual files of a cache or UDIM textures. + files = resource['files'] + for fsrc in files: + fname = os.path.basename(fsrc) + fdest = os.path.join( + resources_folder, fname + ).replace("\\", "/") + transfers.append([fsrc, fdest]) + + instance.data["resources"] = resources + instance.data["transfers"] = transfers diff --git a/pype/plugins/global/publish/integrate_thumbnail.py b/pype/plugins/global/publish/integrate_thumbnail.py new file mode 100644 index 0000000000..b623fa9072 --- /dev/null +++ b/pype/plugins/global/publish/integrate_thumbnail.py @@ -0,0 +1,148 @@ +import os +import sys +import errno +import shutil +import copy + +import six +import pyblish.api +from bson.objectid import ObjectId + +from avalon import api, io + + +class IntegrateThumbnails(pyblish.api.InstancePlugin): + """Integrate Thumbnails.""" + + label = "Integrate Thumbnails" + order = pyblish.api.IntegratorOrder + 0.01 + families = ["review"] + + def process(self, instance): + + if not os.environ.get("AVALON_THUMBNAIL_ROOT"): + self.log.info("AVALON_THUMBNAIL_ROOT is not set." + " Skipping thumbnail integration.") + return + + published_repres = instance.data.get("published_representations") + if not published_repres: + self.log.debug( + "There are not published representation ids on the instance." + ) + return + + project_name = api.Session["AVALON_PROJECT"] + + anatomy = instance.context.data["anatomy"] + if "publish" not in anatomy.templates: + raise AssertionError("Anatomy does not have set publish key!") + + if "thumbnail" not in anatomy.templates["publish"]: + raise AssertionError(( + "There is not set \"thumbnail\" template for project \"{}\"" + ).format(project_name)) + + thumbnail_template = anatomy.templates["publish"]["thumbnail"] + + io.install() + + thumb_repre = None + for repre in published_repres: + if repre["name"].lower() == "thumbnail": + thumb_repre = repre + break + + if not thumb_repre: + self.log.debug( + "There is not representation with name \"thumbnail\"" + ) + return + + version = io.find_one({"_id": thumb_repre["parent"]}) + if not version: + raise AssertionError( + "There does not exist version with id {}".format( + str(thumb_repre["parent"]) + ) + ) + + # Get full path to thumbnail file from representation + src_full_path = os.path.normpath(thumb_repre["data"]["path"]) + if not os.path.exists(src_full_path): + self.log.warning("Thumbnail file was not found. Path: {}".format( + src_full_path + )) + return + + filename, file_extension = os.path.splitext(src_full_path) + # Create id for mongo entity now to fill anatomy template + thumbnail_id = ObjectId() + + # Prepare anatomy template fill data + template_data = copy.deepcopy(thumb_repre["context"]) + template_data.update({ + "_id": str(thumbnail_id), + "thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"), + "ext": file_extension, + "thumbnail_type": "thumbnail" + }) + + anatomy_filled = anatomy.format(template_data) + final_path = anatomy_filled.get("publish", {}).get("thumbnail") + if not final_path: + raise AssertionError(( + "Anatomy template was not filled with entered data" + "\nTemplate: {} " + "\nData: {}" + ).format(thumbnail_template, str(template_data))) + + dst_full_path = os.path.normpath(final_path) + self.log.debug( + "Copying file .. {} -> {}".format(src_full_path, dst_full_path) + ) + dirname = os.path.dirname(dst_full_path) + try: + os.makedirs(dirname) + except OSError as e: + if e.errno != errno.EEXIST: + tp, value, tb = sys.exc_info() + six.reraise(tp, value, tb) + + shutil.copy(src_full_path, dst_full_path) + + # Clean template data from keys that are dynamic + template_data.pop("_id") + template_data.pop("thumbnail_root") + + thumbnail_entity = { + "_id": thumbnail_id, + "type": "thumbnail", + "schema": "pype:thumbnail-1.0", + "data": { + "template": thumbnail_template, + "template_data": template_data + } + } + # Create thumbnail entity + io.insert_one(thumbnail_entity) + self.log.debug( + "Creating entity in database {}".format(str(thumbnail_entity)) + ) + # Set thumbnail id for version + io.update_many( + {"_id": version["_id"]}, + {"$set": {"data.thumbnail_id": thumbnail_id}} + ) + self.log.debug("Setting thumbnail for version \"{}\" <{}>".format( + version["name"], str(version["_id"]) + )) + + asset_entity = instance.data["assetEntity"] + io.update_many( + {"_id": asset_entity["_id"]}, + {"$set": {"data.thumbnail_id": thumbnail_id}} + ) + self.log.debug("Setting thumbnail for asset \"{}\" <{}>".format( + asset_entity["name"], str(version["_id"]) + )) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index c01cb379d4..dcf19ae32c 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -1,7 +1,7 @@ import os import json import re -import logging +from copy import copy from avalon import api, io from avalon.vendor import requests, clique @@ -14,16 +14,15 @@ def _get_script(): try: from pype.scripts import publish_filesequence except Exception: - raise RuntimeError("Expected module 'publish_deadline'" - "to be available") + assert False, "Expected module 'publish_deadline'to be available" module_path = publish_filesequence.__file__ if module_path.endswith(".pyc"): - module_path = module_path[:-len(".pyc")] + ".py" + module_path = module_path[: -len(".pyc")] + ".py" module_path = os.path.normpath(module_path) - mount_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_MOUNT']) - network_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_PATH']) + mount_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_MOUNT"]) + network_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_PATH"]) module_path = module_path.replace(mount_root, network_root) @@ -34,39 +33,29 @@ def _get_script(): def get_latest_version(asset_name, subset_name, family): # Get asset asset_name = io.find_one( - { - "type": "asset", - "name": asset_name - }, - projection={"name": True} + {"type": "asset", "name": asset_name}, projection={"name": True} ) subset = io.find_one( - { - "type": "subset", - "name": subset_name, - "parent": asset_name["_id"] - }, - projection={"_id": True, "name": True} + {"type": "subset", "name": subset_name, "parent": asset_name["_id"]}, + projection={"_id": True, "name": True}, ) # Check if subsets actually exists (pre-run check) assert subset, "No subsets found, please publish with `extendFrames` off" # Get version - version_projection = {"name": True, - "data.startFrame": True, - "data.endFrame": True, - "parent": True} + version_projection = { + "name": True, + "data.startFrame": True, + "data.endFrame": True, + "parent": True, + } version = io.find_one( - { - "type": "version", - "parent": subset["_id"], - "data.families": family - }, + {"type": "version", "parent": subset["_id"], "data.families": family}, projection=version_projection, - sort=[("name", -1)] + sort=[("name", -1)], ) assert version, "No version found, this is a bug" @@ -87,8 +76,12 @@ def get_resources(version, extension=None): directory = api.get_representation_path(representation) print("Source: ", directory) - resources = sorted([os.path.normpath(os.path.join(directory, fname)) - for fname in os.listdir(directory)]) + resources = sorted( + [ + os.path.normpath(os.path.join(directory, fname)) + for fname in os.listdir(directory) + ] + ) return resources @@ -138,8 +131,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): - publishJobState (str, Optional): "Active" or "Suspended" This defaults to "Suspended" - This requires a "frameStart" and "frameEnd" to be present in instance.data - or in context.data. + - expectedFiles (list or dict): explained bellow """ @@ -149,22 +141,39 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): hosts = ["fusion", "maya", "nuke"] - families = [ - "render.farm", - "renderlayer", - "imagesequence" - ] + families = ["render.farm", "renderlayer", "imagesequence"] + + aov_filter = {"maya": ["beauty"]} enviro_filter = [ - "PATH", - "PYTHONPATH", - "FTRACK_API_USER", - "FTRACK_API_KEY", - "FTRACK_SERVER", - "PYPE_ROOT", - "PYPE_STUDIO_PROJECTS_PATH", - "PYPE_STUDIO_PROJECTS_MOUNT" - ] + "PATH", + "PYTHONPATH", + "FTRACK_API_USER", + "FTRACK_API_KEY", + "FTRACK_SERVER", + "PYPE_ROOT", + "PYPE_METADATA_FILE", + "PYPE_STUDIO_PROJECTS_PATH", + "PYPE_STUDIO_PROJECTS_MOUNT", + ] + + # pool used to do the publishing job + deadline_pool = "" + + # regex for finding frame number in string + R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+') + + # mapping of instance properties to be transfered to new instance for every + # specified family + instance_transfer = { + "slate": ["slateFrame"], + "review": ["lutPath"], + "render.farm": ["bakeScriptPath", "bakeRenderPath", + "bakeWriteNodeName", "version"] + } + + # list of family names to transfer to new family if present + families_transfer = ["render3d", "render2d", "ftrack", "slate"] def _submit_deadline_post_job(self, instance, job): """ @@ -175,8 +184,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): data = instance.data.copy() subset = data["subset"] job_name = "{batch} - {subset} [publish image sequence]".format( - batch=job["Props"]["Name"], - subset=subset + batch=job["Props"]["Name"], subset=subset ) metadata_filename = "{}_metadata.json".format(subset) @@ -184,10 +192,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): metadata_path = os.path.join(output_dir, metadata_filename) metadata_path = os.path.normpath(metadata_path) - mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT']) - network_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_PATH']) - + mount_root = os.path.normpath(os.environ["PYPE_STUDIO_PROJECTS_MOUNT"]) + network_root = os.environ["PYPE_STUDIO_PROJECTS_PATH"] metadata_path = metadata_path.replace(mount_root, network_root) + metadata_path = os.path.normpath(metadata_path) # Generate the payload for Deadline submission payload = { @@ -195,54 +203,287 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "Plugin": "Python", "BatchName": job["Props"]["Batch"], "Name": job_name, - "JobType": "Normal", "JobDependency0": job["_id"], "UserName": job["Props"]["User"], "Comment": instance.context.data.get("comment", ""), - "Priority": job["Props"]["Pri"] + "Priority": job["Props"]["Pri"], + "Pool": self.deadline_pool, + "OutputDirectory0": output_dir }, "PluginInfo": { "Version": "3.6", "ScriptFile": _get_script(), - "Arguments": '--paths "{}"'.format(metadata_path), - "SingleFrameOnly": "True" + "Arguments": "", + "SingleFrameOnly": "True", }, - # Mandatory for Deadline, may be empty - "AuxFiles": [] + "AuxFiles": [], } # Transfer the environment from the original job to this dependent # job so they use the same environment environment = job["Props"].get("Env", {}) - + environment["PYPE_METADATA_FILE"] = metadata_path i = 0 for index, key in enumerate(environment): - self.log.info("KEY: {}".format(key)) - self.log.info("FILTER: {}".format(self.enviro_filter)) - if key.upper() in self.enviro_filter: - payload["JobInfo"].update({ - "EnvironmentKeyValue%d" % i: "{key}={value}".format( - key=key, - value=environment[key] - ) - }) + payload["JobInfo"].update( + { + "EnvironmentKeyValue%d" + % i: "{key}={value}".format( + key=key, value=environment[key] + ) + } + ) i += 1 - # Avoid copied pools and remove secondary pool - payload["JobInfo"]["Pool"] = "none" + # remove secondary pool payload["JobInfo"].pop("SecondaryPool", None) - self.log.info("Submitting..") - self.log.info(json.dumps(payload, indent=4, sort_keys=True)) + self.log.info("Submitting Deadline job ...") + # self.log.info(json.dumps(payload, indent=4, sort_keys=True)) url = "{}/api/jobs".format(self.DEADLINE_REST_URL) response = requests.post(url, json=payload) if not response.ok: raise Exception(response.text) + def _copy_extend_frames(self, instance, representation): + """ + This will copy all existing frames from subset's latest version back + to render directory and rename them to what renderer is expecting. + + :param instance: instance to get required data from + :type instance: pyblish.plugin.Instance + """ + + import speedcopy + + self.log.info("Preparing to copy ...") + start = instance.data.get("startFrame") + end = instance.data.get("endFrame") + + # get latest version of subset + # this will stop if subset wasn't published yet + version = get_latest_version( + instance.data.get("asset"), + instance.data.get("subset"), "render") + # get its files based on extension + subset_resources = get_resources(version, representation.get("ext")) + r_col, _ = clique.assemble(subset_resources) + + # if override remove all frames we are expecting to be rendered + # so we'll copy only those missing from current render + if instance.data.get("overrideExistingFrame"): + for frame in range(start, end+1): + if frame not in r_col.indexes: + continue + r_col.indexes.remove(frame) + + # now we need to translate published names from represenation + # back. This is tricky, right now we'll just use same naming + # and only switch frame numbers + resource_files = [] + r_filename = os.path.basename( + representation.get("files")[0]) # first file + op = re.search(self.R_FRAME_NUMBER, r_filename) + pre = r_filename[:op.start("frame")] + post = r_filename[op.end("frame"):] + assert op is not None, "padding string wasn't found" + for frame in list(r_col): + fn = re.search(self.R_FRAME_NUMBER, frame) + # silencing linter as we need to compare to True, not to + # type + assert fn is not None, "padding string wasn't found" + # list of tuples (source, destination) + resource_files.append( + (frame, + os.path.join(representation.get("stagingDir"), + "{}{}{}".format(pre, + fn.group("frame"), + post))) + ) + + # test if destination dir exists and create it if not + output_dir = os.path.dirname(representation.get("files")[0]) + if not os.path.isdir(output_dir): + os.makedirs(output_dir) + + # copy files + for source in resource_files: + speedcopy.copy(source[0], source[1]) + self.log.info(" > {}".format(source[1])) + + self.log.info( + "Finished copying %i files" % len(resource_files)) + + def _create_instances_for_aov(self, instance_data, exp_files): + """ + This will create new instance for every aov it can detect in expected + files list. + + :param instance_data: skeleton data for instance (those needed) later + by collector + :type instance_data: pyblish.plugin.Instance + :param exp_files: list of expected files divided by aovs + :type exp_files: list + :returns: list of instances + :rtype: list(publish.plugin.Instance) + """ + + task = os.environ["AVALON_TASK"] + subset = instance_data["subset"] + instances = [] + # go through aovs in expected files + for aov, files in exp_files[0].items(): + cols, rem = clique.assemble(files) + # we shouldn't have any reminders + if rem: + self.log.warning( + "skipping unexpected files found " + "in sequence: {}".format(rem)) + + # but we really expect only one collection, nothing else make sense + assert len(cols) == 1, "only one image sequence type is expected" + + # create subset name `familyTaskSubset_AOV` + subset_name = 'render{}{}{}{}_{}'.format( + task[0].upper(), task[1:], + subset[0].upper(), subset[1:], + aov) + + staging = os.path.dirname(list(cols[0])[0]) + + self.log.info("Creating data for: {}".format(subset_name)) + + app = os.environ.get("AVALON_APP", "") + + preview = False + if app in self.aov_filter.keys(): + if aov in self.aov_filter[app]: + preview = True + + new_instance = copy(instance_data) + new_instance["subset"] = subset_name + + ext = cols[0].tail.lstrip(".") + + # create represenation + rep = { + "name": ext, + "ext": ext, + "files": [os.path.basename(f) for f in list(cols[0])], + "frameStart": int(instance_data.get("frameStartHandle")), + "frameEnd": int(instance_data.get("frameEndHandle")), + # If expectedFile are absolute, we need only filenames + "stagingDir": staging, + "anatomy_template": "render", + "fps": new_instance.get("fps"), + "tags": ["review"] if preview else [] + } + + self._solve_families(new_instance, preview) + + new_instance["representations"] = [rep] + + # if extending frames from existing version, copy files from there + # into our destination directory + if new_instance.get("extendFrames", False): + self._copy_extend_frames(new_instance, rep) + instances.append(new_instance) + + return instances + + def _get_representations(self, instance, exp_files): + """ + This will return representations of expected files if they are not + in hierarchy of aovs. There should be only one sequence of files for + most cases, but if not - we create representation from each of them. + + :param instance: instance for which we are setting representations + :type instance: pyblish.plugin.Instance + :param exp_files: list of expected files + :type exp_files: list + :returns: list of representations + :rtype: list(dict) + """ + + representations = [] + cols, rem = clique.assemble(exp_files) + bake_render_path = instance.get("bakeRenderPath") + + # create representation for every collected sequence + for c in cols: + ext = c.tail.lstrip(".") + preview = False + # if filtered aov name is found in filename, toggle it for + # preview video rendering + for app in self.aov_filter: + if os.environ.get("AVALON_APP", "") == app: + for aov in self.aov_filter[app]: + if re.match( + r".+(?:\.|_)({})(?:\.|_).*".format(aov), + list(c)[0] + ): + preview = True + break + break + + if bake_render_path: + preview = False + + rep = { + "name": ext, + "ext": ext, + "files": [os.path.basename(f) for f in list(c)], + "frameStart": int(instance.get("frameStartHandle")), + "frameEnd": int(instance.get("frameEndHandle")), + # If expectedFile are absolute, we need only filenames + "stagingDir": os.path.dirname(list(c)[0]), + "anatomy_template": "render", + "fps": instance.get("fps"), + "tags": ["review", "preview"] if preview else [], + } + + representations.append(rep) + + self._solve_families(instance, preview) + + # add reminders as representations + for r in rem: + ext = r.split(".")[-1] + rep = { + "name": ext, + "ext": ext, + "files": os.path.basename(r), + "stagingDir": os.path.dirname(r), + "anatomy_template": "publish", + } + if r in bake_render_path: + rep.update({ + "fps": instance.get("fps"), + "anatomy_template": "render", + "tags": ["review", "delete"] + }) + # solve families with `preview` attributes + self._solve_families(instance, True) + representations.append(rep) + + return representations + + def _solve_families(self, instance, preview=False): + families = instance.get("families") + # if we have one representation with preview tag + # flag whole instance for review and for ftrack + if preview: + if "ftrack" not in families: + if os.environ.get("FTRACK_SERVER"): + families.append("ftrack") + if "review" not in families: + families.append("review") + instance["families"] = families + def process(self, instance): """ Detect type of renderfarm submission and create and post dependend job @@ -252,210 +493,280 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): :param instance: Instance data :type instance: dict """ - # Get a submission job data = instance.data.copy() + context = instance.context + self.context = context + + if hasattr(instance, "_log"): + data['_log'] = instance._log render_job = data.pop("deadlineSubmissionJob", None) submission_type = "deadline" - if not render_job: # No deadline job. Try Muster: musterSubmissionJob render_job = data.pop("musterSubmissionJob", None) submission_type = "muster" - if not render_job: - raise RuntimeError("Can't continue without valid Deadline " - "or Muster submission prior to this " - "plug-in.") + assert render_job, ( + "Can't continue without valid Deadline " + "or Muster submission prior to this " + "plug-in." + ) if submission_type == "deadline": - self.DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL", - "http://localhost:8082") + self.DEADLINE_REST_URL = os.environ.get( + "DEADLINE_REST_URL", "http://localhost:8082" + ) assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL" self._submit_deadline_post_job(instance, render_job) asset = data.get("asset") or api.Session["AVALON_ASSET"] - subset = data["subset"] + subset = data.get("subset") - # Get start/end frame from instance, if not available get from context - context = instance.context start = instance.data.get("frameStart") if start is None: start = context.data["frameStart"] + end = instance.data.get("frameEnd") if end is None: end = context.data["frameEnd"] - # Add in regex for sequence filename - # This assumes the output files start with subset name and ends with - # a file extension. The "ext" key includes the dot with the extension. - if "ext" in instance.data: - ext = r"\." + re.escape(instance.data["ext"]) - else: - ext = r"\.\D+" + handle_start = instance.data.get("handleStart") + if handle_start is None: + handle_start = context.data["handleStart"] - regex = r"^{subset}.*\d+{ext}$".format(subset=re.escape(subset), - ext=ext) + handle_end = instance.data.get("handleEnd") + if handle_end is None: + handle_end = context.data["handleEnd"] + + fps = instance.data.get("fps") + if fps is None: + fps = context.data["fps"] + + if data.get("extendFrames", False): + start, end = self._extend_frames( + asset, + subset, + start, + end, + data["overrideExistingFrame"]) try: - source = data['source'] + source = data["source"] except KeyError: source = context.data["currentFile"] - source = source.replace(os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), - api.registered_root()) - + source = source.replace( + os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), api.registered_root() + ) relative_path = os.path.relpath(source, api.registered_root()) source = os.path.join("{root}", relative_path).replace("\\", "/") - # find subsets and version to attach render to - attach_to = instance.data.get("attachTo") - attach_subset_versions = [] - if attach_to: - for subset in attach_to: - for instance in context: - if instance.data["subset"] != subset["subset"]: - continue - attach_subset_versions.append( - {"version": instance.data["version"], - "subset": subset["subset"], - "family": subset["family"]}) + families = ["render"] - # Write metadata for publish job - metadata = { + instance_skeleton_data = { + "family": "render", + "subset": subset, + "families": families, + "asset": asset, + "frameStart": start, + "frameEnd": end, + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStartHandle": start - handle_start, + "frameEndHandle": end + handle_end, + "fps": fps, + "source": source, + "extendFrames": data.get("extendFrames"), + "overrideExistingFrame": data.get("overrideExistingFrame"), + "pixelAspect": data.get("pixelAspect", 1), + "resolutionWidth": data.get("resolutionWidth", 1920), + "resolutionHeight": data.get("resolutionHeight", 1080), + } + + # transfer specific families from original instance to new render + for item in self.families_transfer: + if item in instance.data.get("families", []): + instance_skeleton_data["families"] += [item] + + # transfer specific properties from original instance based on + # mapping dictionary `instance_transfer` + for key, values in self.instance_transfer.items(): + if key in instance.data.get("families", []): + for v in values: + instance_skeleton_data[v] = instance.data.get(v) + + # look into instance data if representations are not having any + # which are having tag `publish_on_farm` and include them + for r in instance.data.get("representations", []): + if "publish_on_farm" in r.get("tags"): + # create representations attribute of not there + if "representations" not in instance_skeleton_data.keys(): + instance_skeleton_data["representations"] = [] + + instance_skeleton_data["representations"].append(r) + + instances = None + assert data.get("expectedFiles"), ("Submission from old Pype version" + " - missing expectedFiles") + + """ + if content of `expectedFiles` are dictionaries, we will handle + it as list of AOVs, creating instance from every one of them. + + Example: + -------- + + expectedFiles = [ + { + "beauty": [ + "foo_v01.0001.exr", + "foo_v01.0002.exr" + ], + + "Z": [ + "boo_v01.0001.exr", + "boo_v01.0002.exr" + ] + } + ] + + This will create instances for `beauty` and `Z` subset + adding those files to their respective representations. + + If we've got only list of files, we collect all filesequences. + More then one doesn't probably make sense, but we'll handle it + like creating one instance with multiple representations. + + Example: + -------- + + expectedFiles = [ + "foo_v01.0001.exr", + "foo_v01.0002.exr", + "xxx_v01.0001.exr", + "xxx_v01.0002.exr" + ] + + This will result in one instance with two representations: + `foo` and `xxx` + """ + + self.log.info(data.get("expectedFiles")) + + if isinstance(data.get("expectedFiles")[0], dict): + # we cannot attach AOVs to other subsets as we consider every + # AOV subset of its own. + + if len(data.get("attachTo")) > 0: + assert len(data.get("expectedFiles")[0].keys()) == 1, ( + "attaching multiple AOVs or renderable cameras to " + "subset is not supported") + + # create instances for every AOV we found in expected files. + # note: this is done for every AOV and every render camere (if + # there are multiple renderable cameras in scene) + instances = self._create_instances_for_aov( + instance_skeleton_data, + data.get("expectedFiles")) + self.log.info("got {} instance{}".format( + len(instances), + "s" if len(instances) > 1 else "")) + + else: + representations = self._get_representations( + instance_skeleton_data, + data.get("expectedFiles") + ) + + if "representations" not in instance_skeleton_data.keys(): + instance_skeleton_data["representations"] = [] + + # add representation + instance_skeleton_data["representations"] += representations + instances = [instance_skeleton_data] + + # if we are attaching to other subsets, create copy of existing + # instances, change data to match thats subset and replace + # existing instances with modified data + if instance.data.get("attachTo"): + self.log.info("Attaching render to subset:") + new_instances = [] + for at in instance.data.get("attachTo"): + for i in instances: + new_i = copy(i) + new_i["version"] = at.get("version") + new_i["subset"] = at.get("subset") + new_i["append"] = True + new_i["families"].append(at.get("family")) + new_instances.append(new_i) + self.log.info(" - {} / v{}".format( + at.get("subset"), at.get("version"))) + instances = new_instances + + # publish job file + publish_job = { "asset": asset, - "regex": regex, "frameStart": start, "frameEnd": end, "fps": context.data.get("fps", None), - "families": ["render"], "source": source, "user": context.data["user"], - "version": context.data["version"], + "version": context.data["version"], # this is workfile version "intent": context.data.get("intent"), "comment": context.data.get("comment"), - # Optional metadata (for debugging) - "metadata": { - "instance": data, - "job": render_job, - "session": api.Session.copy() - } + "job": render_job, + "session": api.Session.copy(), + "instances": instances } - if api.Session["AVALON_APP"] == "nuke": - metadata['subset'] = subset - + # pass Ftrack credentials in case of Muster if submission_type == "muster": ftrack = { "FTRACK_API_USER": os.environ.get("FTRACK_API_USER"), "FTRACK_API_KEY": os.environ.get("FTRACK_API_KEY"), - "FTRACK_SERVER": os.environ.get("FTRACK_SERVER") + "FTRACK_SERVER": os.environ.get("FTRACK_SERVER"), } - metadata.update({"ftrack": ftrack}) + publish_job.update({"ftrack": ftrack}) # Ensure output dir exists output_dir = instance.data["outputDir"] if not os.path.isdir(output_dir): os.makedirs(output_dir) - if data.get("extendFrames", False): - - family = "render" - override = data["overrideExistingFrame"] - - # override = data.get("overrideExistingFrame", False) - out_file = render_job.get("OutFile") - if not out_file: - raise RuntimeError("OutFile not found in render job!") - - extension = os.path.splitext(out_file[0])[1] - _ext = extension[1:] - - # Frame comparison - prev_start = None - prev_end = None - resource_range = range(int(start), int(end)+1) - - # Gather all the subset files (one subset per render pass!) - subset_names = [data["subset"]] - subset_names.extend(data.get("renderPasses", [])) - resources = [] - for subset_name in subset_names: - version = get_latest_version(asset_name=data["asset"], - subset_name=subset_name, - family=family) - - # Set prev start / end frames for comparison - if not prev_start and not prev_end: - prev_start = version["data"]["frameStart"] - prev_end = version["data"]["frameEnd"] - - subset_resources = get_resources(version, _ext) - resource_files = get_resource_files(subset_resources, - resource_range, - override) - - resources.extend(resource_files) - - updated_start = min(start, prev_start) - updated_end = max(end, prev_end) - - # Update metadata and instance start / end frame - self.log.info("Updating start / end frame : " - "{} - {}".format(updated_start, updated_end)) - - # TODO : Improve logic to get new frame range for the - # publish job (publish_filesequence.py) - # The current approach is not following Pyblish logic - # which is based - # on Collect / Validate / Extract. - - # ---- Collect Plugins --- - # Collect Extend Frames - Only run if extendFrames is toggled - # # # Store in instance: - # # # Previous rendered files per subset based on frames - # # # --> Add to instance.data[resources] - # # # Update publish frame range - - # ---- Validate Plugins --- - # Validate Extend Frames - # # # Check if instance has the requirements to extend frames - # There might have been some things which can be added to the list - # Please do so when fixing this. - - # Start frame - metadata["frameStart"] = updated_start - metadata["metadata"]["instance"]["frameStart"] = updated_start - - # End frame - metadata["frameEnd"] = updated_end - metadata["metadata"]["instance"]["frameEnd"] = updated_end - metadata_filename = "{}_metadata.json".format(subset) metadata_path = os.path.join(output_dir, metadata_filename) - # convert log messages if they are `LogRecord` to their - # string format to allow serializing as JSON later on. - rendered_logs = [] - for log in metadata["metadata"]["instance"].get("_log", []): - if isinstance(log, logging.LogRecord): - rendered_logs.append(log.getMessage()) - else: - rendered_logs.append(log) - - metadata["metadata"]["instance"]["_log"] = rendered_logs + self.log.info("Writing json file: {}".format(metadata_path)) with open(metadata_path, "w") as f: - json.dump(metadata, f, indent=4, sort_keys=True) + json.dump(publish_job, f, indent=4, sort_keys=True) - # Copy files from previous render if extendFrame is True - if data.get("extendFrames", False): + def _extend_frames(self, asset, subset, start, end, override): + """ + This will get latest version of asset and update frame range based + on minimum and maximuma values + """ - self.log.info("Preparing to copy ..") - import shutil + # Frame comparison + prev_start = None + prev_end = None - dest_path = data["outputDir"] - for source in resources: - src_file = os.path.basename(source) - dest = os.path.join(dest_path, src_file) - shutil.copy(source, dest) + version = get_latest_version( + asset_name=asset, + subset_name=subset, + family='render' + ) - self.log.info("Finished copying %i files" % len(resources)) + # Set prev start / end frames for comparison + if not prev_start and not prev_end: + prev_start = version["data"]["frameStart"] + prev_end = version["data"]["frameEnd"] + + updated_start = min(start, prev_start) + updated_end = max(end, prev_end) + + self.log.info( + "Updating start / end frame : " + "{} - {}".format(updated_start, updated_end) + ) + + return updated_start, updated_end diff --git a/pype/plugins/global/publish/validate_custom_ftrack_attributes.py b/pype/plugins/global/publish/validate_custom_ftrack_attributes.py index 2386b359e4..1e8b239b33 100644 --- a/pype/plugins/global/publish/validate_custom_ftrack_attributes.py +++ b/pype/plugins/global/publish/validate_custom_ftrack_attributes.py @@ -47,7 +47,7 @@ class ValidateFtrackAttributes(pyblish.api.InstancePlugin): host = pyblish.api.current_host() to_check = context.data["presets"].get( - host, {}).get("ftrack_attributes") + host, {}).get("ftrack_custom_attributes") if not to_check: self.log.warning("ftrack_attributes preset not found") return diff --git a/pype/plugins/global/publish/validate_ffmpeg_installed.py b/pype/plugins/global/publish/validate_ffmpeg_installed.py index df7c330e95..f6738e6de1 100644 --- a/pype/plugins/global/publish/validate_ffmpeg_installed.py +++ b/pype/plugins/global/publish/validate_ffmpeg_installed.py @@ -1,13 +1,14 @@ import pyblish.api import os import subprocess +import pype.lib try: import os.errno as errno except ImportError: import errno -class ValidateFfmpegInstallef(pyblish.api.Validator): +class ValidateFFmpegInstalled(pyblish.api.Validator): """Validate availability of ffmpeg tool in PATH""" order = pyblish.api.ValidatorOrder @@ -27,10 +28,8 @@ class ValidateFfmpegInstallef(pyblish.api.Validator): return True def process(self, instance): - self.log.info("ffmpeg path: `{}`".format( - os.environ.get("FFMPEG_PATH", ""))) - if self.is_tool( - os.path.join( - os.environ.get("FFMPEG_PATH", ""), "ffmpeg")) is False: + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + self.log.info("ffmpeg path: `{}`".format(ffmpeg_path)) + if self.is_tool(ffmpeg_path) is False: self.log.error("ffmpeg not found in PATH") raise RuntimeError('ffmpeg not installed.') diff --git a/pype/plugins/global/publish/validate_templates.py b/pype/plugins/global/publish/validate_templates.py deleted file mode 100644 index f24f6b1a2e..0000000000 --- a/pype/plugins/global/publish/validate_templates.py +++ /dev/null @@ -1,43 +0,0 @@ -import pyblish.api -import os - - -class ValidateTemplates(pyblish.api.ContextPlugin): - """Check if all templates were filled""" - - label = "Validate Templates" - order = pyblish.api.ValidatorOrder - 0.1 - hosts = ["maya", "houdini", "nuke"] - - def process(self, context): - - anatomy = context.data["anatomy"] - if not anatomy: - raise RuntimeError("Did not find anatomy") - else: - data = { - "root": os.environ["PYPE_STUDIO_PROJECTS_PATH"], - "project": {"name": "D001_projectsx", - "code": "prjX"}, - "ext": "exr", - "version": 3, - "task": "animation", - "asset": "sh001", - "app": "maya", - "hierarchy": "ep101/sq01/sh010"} - - anatomy_filled = anatomy.format(data) - self.log.info(anatomy_filled) - - data = {"root": os.environ["PYPE_STUDIO_PROJECTS_PATH"], - "project": {"name": "D001_projectsy", - "code": "prjY"}, - "ext": "abc", - "version": 1, - "task": "lookdev", - "asset": "bob", - "app": "maya", - "hierarchy": "ep101/sq01/bob"} - - anatomy_filled = context.data["anatomy"].format(data) - self.log.info(anatomy_filled["work"]["folder"]) diff --git a/pype/plugins/global/publish/validate_version.py b/pype/plugins/global/publish/validate_version.py new file mode 100644 index 0000000000..4585e5a008 --- /dev/null +++ b/pype/plugins/global/publish/validate_version.py @@ -0,0 +1,25 @@ +import pyblish.api + + +class ValidateVersion(pyblish.api.InstancePlugin): + """Validate instance version. + + Pype is not allowing overwiting previously published versions. + """ + + order = pyblish.api.ValidatorOrder + + label = "Validate Version" + hosts = ["nuke", "maya", "blender"] + + def process(self, instance): + version = instance.data.get("version") + latest_version = instance.data.get("latestVersion") + + if latest_version is not None: + msg = ("Version `{0}` that you are" + " trying to publish, already" + " exists in the" + " database.").format( + version, latest_version) + assert (int(version) > int(latest_version)), msg diff --git a/pype/plugins/maya/create/create_renderglobals.py b/pype/plugins/maya/create/create_render.py similarity index 51% rename from pype/plugins/maya/create/create_renderglobals.py rename to pype/plugins/maya/create/create_render.py index 7c71bfbc36..080c6bd55d 100644 --- a/pype/plugins/maya/create/create_renderglobals.py +++ b/pype/plugins/maya/create/create_render.py @@ -2,43 +2,108 @@ import os import json import appdirs import requests + from maya import cmds +import maya.app.renderSetup.model.renderSetup as renderSetup + import pype.maya.lib as lib import avalon.maya -class CreateRenderGlobals(avalon.maya.Creator): +class CreateRender(avalon.maya.Creator): + """Create render layer for export""" - label = "Render Globals" - family = "renderglobals" - icon = "gears" - defaults = ['Main'] + label = "Render" + family = "rendering" + icon = "eye" + defaults = ["Main"] _token = None _user = None _password = None + # renderSetup instance + _rs = None + + _image_prefix_nodes = { + 'mentalray': 'defaultRenderGlobals.imageFilePrefix', + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' + } + + _image_prefixes = { + 'mentalray': 'maya///_', + 'vray': '"maya///', + 'arnold': 'maya///_', + 'renderman': 'maya///_', + 'redshift': 'maya///_' + } + def __init__(self, *args, **kwargs): - super(CreateRenderGlobals, self).__init__(*args, **kwargs) + super(CreateRender, self).__init__(*args, **kwargs) - # We won't be publishing this one - self.data["id"] = "avalon.renderglobals" + def process(self): + exists = cmds.ls(self.name) + if exists: + return cmds.warning("%s already exists." % exists[0]) + use_selection = self.options.get("useSelection") + with lib.undo_chunk(): + self._create_render_settings() + instance = super(CreateRender, self).process() + cmds.setAttr("{}.machineList".format(instance), lock=True) + self._rs = renderSetup.instance() + layers = self._rs.getRenderLayers() + if use_selection: + print(">>> processing existing layers") + sets = [] + for layer in layers: + print(" - creating set for {}".format(layer.name())) + render_set = cmds.sets(n="LAYER_{}".format(layer.name())) + sets.append(render_set) + cmds.sets(sets, forceElement=instance) + + # if no render layers are present, create default one with + # asterix selector + if not layers: + rl = self._rs.createRenderLayer('Main') + cl = rl.createCollection("defaultCollection") + cl.getSelector().setPattern('*') + + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + # handle various renderman names + if renderer.startswith('renderman'): + renderer = 'renderman' + + cmds.setAttr(self._image_prefix_nodes[renderer], + self._image_prefixes[renderer], + type="string") + + def _create_render_settings(self): # get pools pools = [] - deadline_url = os.environ.get('DEADLINE_REST_URL', None) - muster_url = os.environ.get('MUSTER_REST_URL', None) + deadline_url = os.environ.get("DEADLINE_REST_URL", None) + muster_url = os.environ.get("MUSTER_REST_URL", None) if deadline_url and muster_url: - self.log.error("Both Deadline and Muster are enabled. " - "Cannot support both.") + self.log.error( + "Both Deadline and Muster are enabled. " "Cannot support both." + ) raise RuntimeError("Both Deadline and Muster are enabled") if deadline_url is None: self.log.warning("Deadline REST API url not found.") else: argument = "{}/api/pools?NamesOnly=true".format(deadline_url) - response = self._requests_get(argument) + try: + response = self._requests_get(argument) + except requests.exceptions.ConnectionError as e: + msg = 'Cannot connect to deadline web service' + self.log.error(msg) + raise RuntimeError('{} - {}'.format(msg, e)) if not response.ok: self.log.warning("No pools retrieved") else: @@ -57,8 +122,8 @@ class CreateRenderGlobals(avalon.maya.Creator): try: pools = self._get_muster_pools() except requests.exceptions.HTTPError as e: - if e.startswith('401'): - self.log.warning('access token expired') + if e.startswith("401"): + self.log.warning("access token expired") self._show_login() raise RuntimeError("Access token expired") except requests.exceptions.ConnectionError: @@ -66,20 +131,15 @@ class CreateRenderGlobals(avalon.maya.Creator): raise RuntimeError("Cannot connect to {}".format(muster_url)) pool_names = [] for pool in pools: - self.log.info(" - pool: {}".format(pool['name'])) - pool_names.append(pool['name']) + self.log.info(" - pool: {}".format(pool["name"])) + pool_names.append(pool["name"]) self.data["primaryPool"] = pool_names - # We don't need subset or asset attributes - # self.data.pop("subset", None) - # self.data.pop("asset", None) - # self.data.pop("active", None) - self.data["suspendPublishJob"] = False self.data["extendFrames"] = False self.data["overrideExistingFrame"] = True - self.data["useLegacyRenderLayers"] = True + # self.data["useLegacyRenderLayers"] = True self.data["priority"] = 50 self.data["framesPerTask"] = 1 self.data["whitelist"] = False @@ -88,20 +148,6 @@ class CreateRenderGlobals(avalon.maya.Creator): self.options = {"useSelection": False} # Force no content - def process(self): - - exists = cmds.ls(self.name) - assert len(exists) <= 1, ( - "More than one renderglobal exists, this is a bug" - ) - - if exists: - return cmds.warning("%s already exists." % exists[0]) - - with lib.undo_chunk(): - super(CreateRenderGlobals, self).process() - cmds.setAttr("{}.machineList".format(self.name), lock=True) - def _load_credentials(self): """ Load Muster credentials from file and set `MUSTER_USER`, @@ -111,14 +157,12 @@ class CreateRenderGlobals(avalon.maya.Creator): Show login dialog if access token is invalid or missing. """ - app_dir = os.path.normpath( - appdirs.user_data_dir('pype-app', 'pype') - ) - file_name = 'muster_cred.json' + app_dir = os.path.normpath(appdirs.user_data_dir("pype-app", "pype")) + file_name = "muster_cred.json" fpath = os.path.join(app_dir, file_name) - file = open(fpath, 'r') + file = open(fpath, "r") muster_json = json.load(file) - self._token = muster_json.get('token', None) + self._token = muster_json.get("token", None) if not self._token: self._show_login() raise RuntimeError("Invalid access token for Muster") @@ -131,26 +175,25 @@ class CreateRenderGlobals(avalon.maya.Creator): """ Get render pools from muster """ - params = { - 'authToken': self._token - } - api_entry = '/api/pools/list' - response = self._requests_get( - self.MUSTER_REST_URL + api_entry, params=params) + params = {"authToken": self._token} + api_entry = "/api/pools/list" + response = self._requests_get(self.MUSTER_REST_URL + api_entry, + params=params) if response.status_code != 200: if response.status_code == 401: - self.log.warning('Authentication token expired.') + self.log.warning("Authentication token expired.") self._show_login() else: self.log.error( - 'Cannot get pools from Muster: {}'.format( - response.status_code)) - raise Exception('Cannot get pools from Muster') + ("Cannot get pools from " + "Muster: {}").format(response.status_code) + ) + raise Exception("Cannot get pools from Muster") try: - pools = response.json()['ResponseData']['pools'] + pools = response.json()["ResponseData"]["pools"] except ValueError as e: - self.log.error('Invalid response from Muster server {}'.format(e)) - raise Exception('Invalid response from Muster server') + self.log.error("Invalid response from Muster server {}".format(e)) + raise Exception("Invalid response from Muster server") return pools @@ -162,8 +205,8 @@ class CreateRenderGlobals(avalon.maya.Creator): self.log.debug(api_url) login_response = self._requests_post(api_url, timeout=1) if login_response.status_code != 200: - self.log.error('Cannot show login form to Muster') - raise Exception('Cannot show login form to Muster') + self.log.error("Cannot show login form to Muster") + raise Exception("Cannot show login form to Muster") def _requests_post(self, *args, **kwargs): """ Wrapper for requests, disabling SSL certificate validation if @@ -175,8 +218,10 @@ class CreateRenderGlobals(avalon.maya.Creator): WARNING: disabling SSL certificate validation is defeating one line of defense SSL is providing and it is not recommended. """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa + if "verify" not in kwargs: + kwargs["verify"] = ( + False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True + ) # noqa return requests.post(*args, **kwargs) def _requests_get(self, *args, **kwargs): @@ -189,6 +234,8 @@ class CreateRenderGlobals(avalon.maya.Creator): WARNING: disabling SSL certificate validation is defeating one line of defense SSL is providing and it is not recommended. """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa + if "verify" not in kwargs: + kwargs["verify"] = ( + False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True + ) # noqa return requests.get(*args, **kwargs) diff --git a/pype/plugins/maya/load/actions.py b/pype/plugins/maya/load/actions.py index 9f6a5c4d34..77d18b0ee3 100644 --- a/pype/plugins/maya/load/actions.py +++ b/pype/plugins/maya/load/actions.py @@ -140,9 +140,9 @@ class ImportMayaLoader(api.Loader): message = "Are you sure you want import this" state = QtWidgets.QMessageBox.warning(None, - "Are you sure?", - message, - buttons=buttons, - defaultButton=accept) + "Are you sure?", + message, + buttons=buttons, + defaultButton=accept) return state == accept diff --git a/pype/plugins/maya/load/load_camera.py b/pype/plugins/maya/load/load_camera.py deleted file mode 100644 index e9bf265b98..0000000000 --- a/pype/plugins/maya/load/load_camera.py +++ /dev/null @@ -1,62 +0,0 @@ -import pype.maya.plugin -import os -from pypeapp import config - - -class CameraLoader(pype.maya.plugin.ReferenceLoader): - """Specific loader of Alembic for the pype.camera family""" - - families = ["camera"] - label = "Reference camera" - representations = ["abc", "ma"] - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - import maya.cmds as cmds - # Get family type from the context - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "camera" - - cmds.loadPlugin("AbcImport.mll", quiet=True) - groupName = "{}:{}".format(namespace, name) - nodes = cmds.file(self.fname, - namespace=namespace, - sharedReferenceFile=False, - groupReference=True, - groupName="{}:{}".format(namespace, name), - reference=True, - returnNewNodes=True) - - cameras = cmds.ls(nodes, type="camera") - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - - # Check the Maya version, lockTransform has been introduced since - # Maya 2016.5 Ext 2 - version = int(cmds.about(version=True)) - if version >= 2016: - for camera in cameras: - cmds.camera(camera, edit=True, lockTransform=True) - else: - self.log.warning("This version of Maya does not support locking of" - " transforms of cameras.") - - self[:] = nodes - - return nodes - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_fbx.py b/pype/plugins/maya/load/load_fbx.py deleted file mode 100644 index 14df300c3c..0000000000 --- a/pype/plugins/maya/load/load_fbx.py +++ /dev/null @@ -1,54 +0,0 @@ -import pype.maya.plugin -import os -from pypeapp import config - - -class FBXLoader(pype.maya.plugin.ReferenceLoader): - """Load the FBX""" - - families = ["fbx"] - representations = ["fbx"] - - label = "Reference FBX" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - import maya.cmds as cmds - from avalon import maya - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "fbx" - - # Ensure FBX plug-in is loaded - cmds.loadPlugin("fbxmaya", quiet=True) - - with maya.maintained_selection(): - nodes = cmds.file(self.fname, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName="{}:{}".format(namespace, name)) - - groupName = "{}:{}".format(namespace, name) - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - - self[:] = nodes - - return nodes - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_mayaascii.py b/pype/plugins/maya/load/load_mayaascii.py deleted file mode 100644 index b9a5de2782..0000000000 --- a/pype/plugins/maya/load/load_mayaascii.py +++ /dev/null @@ -1,68 +0,0 @@ -import pype.maya.plugin -from pypeapp import config -import os - - -class MayaAsciiLoader(pype.maya.plugin.ReferenceLoader): - """Load the model""" - - families = ["mayaAscii", - "setdress", - "layout"] - representations = ["ma"] - - label = "Reference Maya Ascii" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - import maya.cmds as cmds - from avalon import maya - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "model" - - with maya.maintained_selection(): - nodes = cmds.file(self.fname, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName="{}:{}".format(namespace, name)) - - self[:] = nodes - groupName = "{}:{}".format(namespace, name) - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - cmds.setAttr(groupName + ".displayHandle", 1) - # get bounding box - bbox = cmds.exactWorldBoundingBox(groupName) - # get pivot position on world space - pivot = cmds.xform(groupName, q=True, sp=True, ws=True) - # center of bounding box - cx = (bbox[0] + bbox[3]) / 2 - cy = (bbox[1] + bbox[4]) / 2 - cz = (bbox[2] + bbox[5]) / 2 - # add pivot position to calculate offset - cx = cx + pivot[0] - cy = cy + pivot[1] - cz = cz + pivot[2] - # set selection handle offset to center of bounding box - cmds.setAttr(groupName + ".selectHandleX", cx) - cmds.setAttr(groupName + ".selectHandleY", cy) - cmds.setAttr(groupName + ".selectHandleZ", cz) - return nodes - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index 376fcc2c01..797933300c 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -1,4 +1,6 @@ import pype.maya.plugin +from avalon import api, maya +from maya import cmds import os from pypeapp import config @@ -6,8 +8,15 @@ from pypeapp import config class ReferenceLoader(pype.maya.plugin.ReferenceLoader): """Load the model""" - families = ["model", "pointcache", "animation"] - representations = ["ma", "abc"] + families = ["model", + "pointcache", + "animation", + "mayaAscii", + "setdress", + "layout", + "camera", + "rig"] + representations = ["ma", "abc", "fbx"] tool_names = ["loader"] label = "Reference" @@ -15,7 +24,7 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): icon = "code-fork" color = "orange" - def process_reference(self, context, name, namespace, data): + def process_reference(self, context, name, namespace, options): import maya.cmds as cmds from avalon import maya import pymel.core as pm @@ -37,27 +46,29 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): reference=True, returnNewNodes=True) - namespace = cmds.referenceQuery(nodes[0], namespace=True) + # namespace = cmds.referenceQuery(nodes[0], namespace=True) shapes = cmds.ls(nodes, shapes=True, long=True) - print(shapes) newNodes = (list(set(nodes) - set(shapes))) - print(newNodes) + + current_namespace = pm.namespaceInfo(currentNamespace=True) + + if current_namespace != ":": + groupName = current_namespace + ":" + groupName groupNode = pm.PyNode(groupName) roots = set() - print(nodes) for node in newNodes: try: roots.add(pm.PyNode(node).getAllParents()[-2]) - except: + except: # noqa: E722 pass for root in roots: root.setParent(world=True) - groupNode.root().zeroTransformPivots() + groupNode.zeroTransformPivots() for root in roots: root.setParent(groupNode) @@ -90,23 +101,41 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): cmds.setAttr(groupName + ".selectHandleY", cy) cmds.setAttr(groupName + ".selectHandleZ", cz) + if family == "rig": + self._post_process_rig(name, namespace, context, options) + else: + if "translate" in options: + cmds.setAttr(groupName + ".t", *options["translate"]) + return newNodes def switch(self, container, representation): self.update(container, representation) + def _post_process_rig(self, name, namespace, context, options): -# for backwards compatibility -class AbcLoader(ReferenceLoader): - label = "Deprecated loader (don't use)" - families = ["pointcache", "animation"] - representations = ["abc"] - tool_names = [] + output = next((node for node in self if + node.endswith("out_SET")), None) + controls = next((node for node in self if + node.endswith("controls_SET")), None) + assert output, "No out_SET in rig, this is a bug." + assert controls, "No controls_SET in rig, this is a bug." -# for backwards compatibility -class ModelLoader(ReferenceLoader): - label = "Deprecated loader (don't use)" - families = ["model", "pointcache"] - representations = ["abc"] - tool_names = [] + # Find the roots amongst the loaded nodes + roots = cmds.ls(self[:], assemblies=True, long=True) + assert roots, "No root nodes in rig, this is a bug." + + asset = api.Session["AVALON_ASSET"] + dependency = str(context["representation"]["_id"]) + + self.log.info("Creating subset: {}".format(namespace)) + + # Create the animation instance + with maya.maintained_selection(): + cmds.select([output, controls] + roots, noExpand=True) + api.create(name=namespace, + asset=asset, + family="animation", + options={"useSelection": True}, + data={"dependencies": dependency}) diff --git a/pype/plugins/maya/load/load_rig.py b/pype/plugins/maya/load/load_rig.py deleted file mode 100644 index fc6e666ac6..0000000000 --- a/pype/plugins/maya/load/load_rig.py +++ /dev/null @@ -1,95 +0,0 @@ -from maya import cmds - -import pype.maya.plugin -from avalon import api, maya -import os -from pypeapp import config - - -class RigLoader(pype.maya.plugin.ReferenceLoader): - """Specific loader for rigs - - This automatically creates an instance for animators upon load. - - """ - - families = ["rig"] - representations = ["ma"] - - label = "Reference rig" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "rig" - - groupName = "{}:{}".format(namespace, name) - nodes = cmds.file(self.fname, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName=groupName) - - cmds.xform(groupName, pivots=(0, 0, 0)) - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - - shapes = cmds.ls(nodes, shapes=True, long=True) - print(shapes) - - newNodes = (list(set(nodes) - set(shapes))) - print(newNodes) - - # Store for post-process - self[:] = newNodes - if data.get("post_process", True): - self._post_process(name, namespace, context, data) - - return newNodes - - def _post_process(self, name, namespace, context, data): - - # TODO(marcus): We are hardcoding the name "out_SET" here. - # Better register this keyword, so that it can be used - # elsewhere, such as in the Integrator plug-in, - # without duplication. - - output = next((node for node in self if - node.endswith("out_SET")), None) - controls = next((node for node in self if - node.endswith("controls_SET")), None) - - assert output, "No out_SET in rig, this is a bug." - assert controls, "No controls_SET in rig, this is a bug." - - # Find the roots amongst the loaded nodes - roots = cmds.ls(self[:], assemblies=True, long=True) - assert roots, "No root nodes in rig, this is a bug." - - asset = api.Session["AVALON_ASSET"] - dependency = str(context["representation"]["_id"]) - - # Create the animation instance - with maya.maintained_selection(): - cmds.select([output, controls] + roots, noExpand=True) - api.create(name=namespace, - asset=asset, - family="animation", - options={"useSelection": True}, - data={"dependencies": dependency}) - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_vrayproxy.py b/pype/plugins/maya/load/load_vrayproxy.py index 9b07dc7e30..35d93676a0 100644 --- a/pype/plugins/maya/load/load_vrayproxy.py +++ b/pype/plugins/maya/load/load_vrayproxy.py @@ -117,7 +117,7 @@ class VRayProxyLoader(api.Loader): vray_mesh = cmds.createNode('VRayMesh', name="{}_VRMS".format(name)) mesh_shape = cmds.createNode("mesh", name="{}_GEOShape".format(name)) vray_mat = cmds.shadingNode("VRayMeshMaterial", asShader=True, - name="{}_VRMM".format(name)) + name="{}_VRMM".format(name)) vray_mat_sg = cmds.sets(name="{}_VRSG".format(name), empty=True, renderable=True, diff --git a/pype/plugins/maya/publish/collect_instances.py b/pype/plugins/maya/publish/collect_instances.py index 39d7bcd86d..5af717ba4d 100644 --- a/pype/plugins/maya/publish/collect_instances.py +++ b/pype/plugins/maya/publish/collect_instances.py @@ -103,16 +103,22 @@ class CollectInstances(pyblish.api.ContextPlugin): # Store the exact members of the object set instance.data["setMembers"] = members - # Define nice label name = cmds.ls(objset, long=False)[0] # use short name label = "{0} ({1})".format(name, data["asset"]) + if "handles" in data: + data["handleStart"] = data["handles"] + data["handleEnd"] = data["handles"] + # Append start frame and end frame to label if present if "frameStart" and "frameEnd" in data: - label += " [{0}-{1}]".format(int(data["frameStart"]), - int(data["frameEnd"])) + data["frameStartHandle"] = data["frameStart"] - data["handleStart"] + data["frameEndHandle"] = data["frameEnd"] + data["handleEnd"] + + label += " [{0}-{1}]".format(int(data["frameStartHandle"]), + int(data["frameEndHandle"])) instance.data["label"] = label @@ -122,7 +128,6 @@ class CollectInstances(pyblish.api.ContextPlugin): # user interface interested in visualising it. self.log.info("Found: \"%s\" " % instance.data["name"]) self.log.debug("DATA: \"%s\" " % instance.data) - def sort_by_family(instance): """Sort by family""" diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py new file mode 100644 index 0000000000..be3878e6bd --- /dev/null +++ b/pype/plugins/maya/publish/collect_render.py @@ -0,0 +1,911 @@ +""" +This collector will go through render layers in maya and prepare all data +needed to create instances and their representations for submition and +publishing on farm. + +Requires: + instance -> families + instance -> setMembers + + context -> currentFile + context -> workspaceDir + context -> user + + session -> AVALON_ASSET + +Optional: + +Provides: + instance -> label + instance -> subset + instance -> attachTo + instance -> setMembers + instance -> publish + instance -> frameStart + instance -> frameEnd + instance -> byFrameStep + instance -> renderer + instance -> family + instance -> families + instance -> asset + instance -> time + instance -> author + instance -> source + instance -> expectedFiles + instance -> resolutionWidth + instance -> resolutionHeight + instance -> pixelAspect +""" + +import re +import os +import types +import six +from abc import ABCMeta, abstractmethod + +from maya import cmds +import maya.app.renderSetup.model.renderSetup as renderSetup + +import pyblish.api + +from avalon import maya, api +import pype.maya.lib as lib + + +R_SINGLE_FRAME = re.compile(r'^(-?)\d+$') +R_FRAME_RANGE = re.compile(r'^(?P(-?)\d+)-(?P(-?)\d+)$') +R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+') +R_LAYER_TOKEN = re.compile( + r'.*%l.*|.*.*|.*.*', re.IGNORECASE) +R_AOV_TOKEN = re.compile(r'.*%a.*|.*.*|.*.*', re.IGNORECASE) +R_SUBSTITUTE_AOV_TOKEN = re.compile(r'%a||', re.IGNORECASE) +R_REMOVE_AOV_TOKEN = re.compile(r'_%a|_|_', re.IGNORECASE) +# to remove unused renderman tokens +R_CLEAN_FRAME_TOKEN = re.compile(r'\.?\.?', re.IGNORECASE) +R_CLEAN_EXT_TOKEN = re.compile(r'\.?\.?', re.IGNORECASE) + +R_SUBSTITUTE_LAYER_TOKEN = re.compile( + r'%l||', re.IGNORECASE) +R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) +R_SUBSTITUTE_SCENE_TOKEN = re.compile(r'%s|', re.IGNORECASE) + +RENDERER_NAMES = { + 'mentalray': 'MentalRay', + 'vray': 'V-Ray', + 'arnold': 'Arnold', + 'renderman': 'Renderman', + 'redshift': 'Redshift' +} + +# not sure about the renderman image prefix +ImagePrefixes = { + 'mentalray': 'defaultRenderGlobals.imageFilePrefix', + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'rmanGlobals.imageFileFormat', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' +} + + +class CollectMayaRender(pyblish.api.ContextPlugin): + """Gather all publishable render layers from renderSetup""" + + order = pyblish.api.CollectorOrder + 0.01 + hosts = ["maya"] + label = "Collect Render Layers" + + def process(self, context): + render_instance = None + for instance in context: + if 'rendering' in instance.data['families']: + render_instance = instance + render_instance.data["remove"] = True + + # make sure workfile instance publishing is enabled + if 'workfile' in instance.data['families']: + instance.data["publish"] = True + + if not render_instance: + self.log.info("No render instance found, skipping render " + "layer collection.") + return + + render_globals = render_instance + collected_render_layers = render_instance.data['setMembers'] + filepath = context.data["currentFile"].replace("\\", "/") + asset = api.Session["AVALON_ASSET"] + workspace = context.data["workspaceDir"] + + self._rs = renderSetup.instance() + maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()} + + self.maya_layers = maya_render_layers + + for layer in collected_render_layers: + # every layer in set should start with `LAYER_` prefix + try: + expected_layer_name = re.search(r"^LAYER_(.*)", layer).group(1) + except IndexError: + msg = ("Invalid layer name in set [ {} ]".format(layer)) + self.log.warnig(msg) + continue + + self.log.info("processing %s" % layer) + # check if layer is part of renderSetup + if expected_layer_name not in maya_render_layers: + msg = ("Render layer [ {} ] is not in " + "Render Setup".format(expected_layer_name)) + self.log.warning(msg) + continue + + # check if layer is renderable + if not maya_render_layers[expected_layer_name].isRenderable(): + msg = ("Render layer [ {} ] is not " + "renderable".format(expected_layer_name)) + self.log.warning(msg) + continue + + # test if there are sets (subsets) to attach render to + sets = cmds.sets(layer, query=True) or [] + attachTo = [] + if sets: + for s in sets: + attachTo.append({ + "version": None, # we need integrator to get version + "subset": s, + "family": cmds.getAttr("{}.family".format(s)) + }) + self.log.info(" -> attach render to: {}".format(s)) + + layer_name = "rs_{}".format(expected_layer_name) + + # collect all frames we are expecting to be rendered + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + # handle various renderman names + if renderer.startswith('renderman'): + renderer = 'renderman' + + # return all expected files for all cameras and aovs in given + # frame range + exp_files = ExpectedFiles().get(renderer, layer_name) + assert exp_files, ("no file names were generated, this is bug") + + # if we want to attach render to subset, check if we have AOV's + # in expectedFiles. If so, raise error as we cannot attach AOV + # (considered to be subset on its own) to another subset + if attachTo: + assert len(exp_files[0].keys()) == 1, ( + "attaching multiple AOVs or renderable cameras to " + "subset is not supported") + + # append full path + full_exp_files = [] + aov_dict = {} + + # we either get AOVs or just list of files. List of files can + # mean two things - there are no AOVs enabled or multipass EXR + # is produced. In either case we treat those as `beauty`. + if isinstance(exp_files[0], dict): + for aov, files in exp_files[0].items(): + full_paths = [] + for ef in files: + full_path = os.path.join(workspace, "renders", ef) + full_path = full_path.replace("\\", "/") + full_paths.append(full_path) + aov_dict[aov] = full_paths + else: + full_paths = [] + for ef in exp_files: + full_path = os.path.join(workspace, "renders", ef) + full_path = full_path.replace("\\", "/") + full_paths.append(full_path) + aov_dict["beauty"] = full_paths + + full_exp_files.append(aov_dict) + self.log.info(full_exp_files) + self.log.info("collecting layer: {}".format(layer_name)) + # Get layer specific settings, might be overrides + data = { + "subset": expected_layer_name, + "attachTo": attachTo, + "setMembers": layer_name, + "publish": True, + "frameStart": int(context.data["assetEntity"]['data']['frameStart']), + "frameEnd": int(context.data["assetEntity"]['data']['frameEnd']), + "frameStartHandle": int(self.get_render_attribute("startFrame", + layer=layer_name)), + "frameEndHandle": int(self.get_render_attribute("endFrame", + layer=layer_name)), + "byFrameStep": int( + self.get_render_attribute("byFrameStep", + layer=layer_name)), + "renderer": self.get_render_attribute("currentRenderer", + layer=layer_name), + "handleStart": int(context.data["assetEntity"]['data']['handleStart']), + "handleEnd": int(context.data["assetEntity"]['data']['handleEnd']), + + # instance subset + "family": "renderlayer", + "families": ["renderlayer"], + "asset": asset, + "time": api.time(), + "author": context.data["user"], + + # Add source to allow tracing back to the scene from + # which was submitted originally + "source": filepath, + "expectedFiles": full_exp_files, + "resolutionWidth": cmds.getAttr("defaultResolution.width"), + "resolutionHeight": cmds.getAttr("defaultResolution.height"), + "pixelAspect": cmds.getAttr("defaultResolution.pixelAspect") + } + + # Apply each user defined attribute as data + for attr in cmds.listAttr(layer, userDefined=True) or list(): + try: + value = cmds.getAttr("{}.{}".format(layer, attr)) + except Exception: + # Some attributes cannot be read directly, + # such as mesh and color attributes. These + # are considered non-essential to this + # particular publishing pipeline. + value = None + + data[attr] = value + + # Include (optional) global settings + # Get global overrides and translate to Deadline values + overrides = self.parse_options(str(render_globals)) + data.update(**overrides) + + # Define nice label + label = "{0} ({1})".format(expected_layer_name, data["asset"]) + label += " [{0}-{1}]".format(int(data["frameStartHandle"]), + int(data["frameEndHandle"])) + + instance = context.create_instance(expected_layer_name) + instance.data["label"] = label + instance.data.update(data) + pass + + def parse_options(self, render_globals): + """Get all overrides with a value, skip those without + + Here's the kicker. These globals override defaults in the submission + integrator, but an empty value means no overriding is made. + Otherwise, Frames would override the default frames set under globals. + + Args: + render_globals (str): collection of render globals + + Returns: + dict: only overrides with values + """ + + attributes = maya.read(render_globals) + + options = {"renderGlobals": {}} + options["renderGlobals"]["Priority"] = attributes["priority"] + + # Check for specific pools + pool_a, pool_b = self._discover_pools(attributes) + options["renderGlobals"].update({"Pool": pool_a}) + if pool_b: + options["renderGlobals"].update({"SecondaryPool": pool_b}) + + # Machine list + machine_list = attributes["machineList"] + if machine_list: + key = "Whitelist" if attributes["whitelist"] else "Blacklist" + options['renderGlobals'][key] = machine_list + + # Suspend publish job + state = "Suspended" if attributes["suspendPublishJob"] else "Active" + options["publishJobState"] = state + + chunksize = attributes.get("framesPerTask", 1) + options["renderGlobals"]["ChunkSize"] = chunksize + + # Override frames should be False if extendFrames is False. This is + # to ensure it doesn't go off doing crazy unpredictable things + override_frames = False + extend_frames = attributes.get("extendFrames", False) + if extend_frames: + override_frames = attributes.get("overrideExistingFrame", False) + + options["extendFrames"] = extend_frames + options["overrideExistingFrame"] = override_frames + + maya_render_plugin = "MayaBatch" + if not attributes.get("useMayaBatch", True): + maya_render_plugin = "MayaCmd" + + options["mayaRenderPlugin"] = maya_render_plugin + + return options + + def _discover_pools(self, attributes): + + pool_a = None + pool_b = None + + # Check for specific pools + pool_b = [] + if "primaryPool" in attributes: + pool_a = attributes["primaryPool"] + if "secondaryPool" in attributes: + pool_b = attributes["secondaryPool"] + + else: + # Backwards compatibility + pool_str = attributes.get("pools", None) + if pool_str: + pool_a, pool_b = pool_str.split(";") + + # Ensure empty entry token is caught + if pool_b == "-": + pool_b = None + + return pool_a, pool_b + + def _get_overrides(self, layer): + rset = self.maya_layers[layer].renderSettingsCollectionInstance() + return rset.getOverrides() + + def get_render_attribute(self, attr, layer): + return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr), + layer=layer) + + +class ExpectedFiles: + + def get(self, renderer, layer): + if renderer.lower() == 'arnold': + return ExpectedFilesArnold(layer).get_files() + elif renderer.lower() == 'vray': + return ExpectedFilesVray(layer).get_files() + elif renderer.lower() == 'redshift': + return ExpectedFilesRedshift(layer).get_files() + elif renderer.lower() == 'mentalray': + return ExpectedFilesMentalray(layer).get_files() + elif renderer.lower() == 'renderman': + return ExpectedFilesRenderman(layer).get_files() + else: + raise UnsupportedRendererException( + "unsupported {}".format(renderer)) + + +@six.add_metaclass(ABCMeta) +class AExpectedFiles: + renderer = None + layer = None + + def __init__(self, layer): + self.layer = layer + + @abstractmethod + def get_aovs(self): + pass + + def get_renderer_prefix(self): + try: + file_prefix = cmds.getAttr(ImagePrefixes[self.renderer]) + except KeyError: + raise UnsupportedRendererException( + "Unsupported renderer {}".format(self.renderer)) + return file_prefix + + def _get_layer_data(self): + # ______________________________________________ + # ____________________/ ____________________________________________/ + # 1 - get scene name /__________________/ + # ____________________/ + scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True)) + scene_name, _ = os.path.splitext(scene_basename) + + # ______________________________________________ + # ____________________/ ____________________________________________/ + # 2 - detect renderer /__________________/ + # ____________________/ + renderer = self.renderer + + # ________________________________________________ + # __________________/ ______________________________________________/ + # 3 - image prefix /__________________/ + # __________________/ + file_prefix = self.get_renderer_prefix() + + if not file_prefix: + raise RuntimeError("Image prefix not set") + + default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey') + + # ________________________________________________ + # __________________/ ______________________________________________/ + # 4 - get renderable cameras_____________/ + # __________________/ + + # if we have token in prefix path we'll expect output for + # every renderable camera in layer. + + renderable_cameras = self.get_renderable_cameras() + # ________________________________________________ + # __________________/ ______________________________________________/ + # 5 - get AOVs /____________________/ + # __________________/ + + enabled_aovs = self.get_aovs() + + layer_name = self.layer + if self.layer.startswith("rs_"): + layer_name = self.layer[3:] + start_frame = int(self.get_render_attribute('startFrame')) + end_frame = int(self.get_render_attribute('endFrame')) + frame_step = int(self.get_render_attribute('byFrameStep')) + padding = int(self.get_render_attribute('extensionPadding')) + + scene_data = { + "frameStart": start_frame, + "frameEnd": end_frame, + "frameStep": frame_step, + "padding": padding, + "cameras": renderable_cameras, + "sceneName": scene_name, + "layerName": layer_name, + "renderer": renderer, + "defaultExt": default_ext, + "filePrefix": file_prefix, + "enabledAOVs": enabled_aovs + } + return scene_data + + def _generate_single_file_sequence(self, layer_data): + expected_files = [] + file_prefix = layer_data["filePrefix"] + for cam in layer_data["cameras"]: + mappings = ( + (R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]), + (R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]), + (R_SUBSTITUTE_CAMERA_TOKEN, cam), + # this is required to remove unfilled aov token, for example + # in Redshift + (R_REMOVE_AOV_TOKEN, ""), + (R_CLEAN_FRAME_TOKEN, ""), + (R_CLEAN_EXT_TOKEN, "") + ) + + for regex, value in mappings: + file_prefix = re.sub(regex, value, file_prefix) + + for frame in range( + int(layer_data["frameStart"]), + int(layer_data["frameEnd"]) + 1, + int(layer_data["frameStep"])): + expected_files.append( + '{}.{}.{}'.format(file_prefix, + str(frame).rjust( + layer_data["padding"], "0"), + layer_data["defaultExt"])) + return expected_files + + def _generate_aov_file_sequences(self, layer_data): + expected_files = [] + aov_file_list = {} + file_prefix = layer_data["filePrefix"] + for aov in layer_data["enabledAOVs"]: + for cam in layer_data["cameras"]: + + mappings = ( + (R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]), + (R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]), + (R_SUBSTITUTE_CAMERA_TOKEN, cam), + (R_SUBSTITUTE_AOV_TOKEN, aov[0]), + (R_CLEAN_FRAME_TOKEN, ""), + (R_CLEAN_EXT_TOKEN, "") + ) + + for regex, value in mappings: + file_prefix = re.sub(regex, value, file_prefix) + + aov_files = [] + for frame in range( + int(layer_data["frameStart"]), + int(layer_data["frameEnd"]) + 1, + int(layer_data["frameStep"])): + aov_files.append( + '{}.{}.{}'.format( + file_prefix, + str(frame).rjust(layer_data["padding"], "0"), + aov[1])) + + # if we have more then one renderable camera, append + # camera name to AOV to allow per camera AOVs. + aov_name = aov[0] + if len(layer_data["cameras"]) > 1: + aov_name = "{}_{}".format(aov[0], cam) + + aov_file_list[aov_name] = aov_files + file_prefix = layer_data["filePrefix"] + + expected_files.append(aov_file_list) + return expected_files + + def get_files(self): + """ + This method will return list of expected files. + + It will translate render token strings ('', etc.) to + their values. This task is tricky as every renderer deals with this + differently. It depends on `get_aovs()` abstract method implemented + for every supported renderer. + """ + layer_data = self._get_layer_data() + + expected_files = [] + if layer_data.get("enabledAOVs"): + expected_files = self._generate_aov_file_sequences(layer_data) + else: + expected_files = self._generate_single_file_sequence(layer_data) + + return expected_files + + def get_renderable_cameras(self): + cam_parents = [cmds.listRelatives(x, ap=True)[-1] + for x in cmds.ls(cameras=True)] + + renderable_cameras = [] + for cam in cam_parents: + renderable = False + if self.maya_is_true(cmds.getAttr('{}.renderable'.format(cam))): + renderable = True + + for override in self.get_layer_overrides( + '{}.renderable'.format(cam), self.layer): + renderable = self.maya_is_true(override) + + if renderable: + renderable_cameras.append(cam) + return renderable_cameras + + def maya_is_true(self, attr_val): + """ + Whether a Maya attr evaluates to True. + When querying an attribute value from an ambiguous object the + Maya API will return a list of values, which need to be properly + handled to evaluate properly. + """ + if isinstance(attr_val, types.BooleanType): + return attr_val + elif isinstance(attr_val, (types.ListType, types.GeneratorType)): + return any(attr_val) + else: + return bool(attr_val) + + def get_layer_overrides(self, attr, layer): + connections = cmds.listConnections(attr, plugs=True) + if connections: + for connection in connections: + if connection: + node_name = connection.split('.')[0] + if cmds.nodeType(node_name) == 'renderLayer': + attr_name = '%s.value' % '.'.join( + connection.split('.')[:-1]) + if node_name == layer: + yield cmds.getAttr(attr_name) + + def get_render_attribute(self, attr): + return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr), + layer=self.layer) + + +class ExpectedFilesArnold(AExpectedFiles): + + # Arnold AOV driver extension mapping + # Is there a better way? + aiDriverExtension = { + 'jpeg': 'jpg', + 'exr': 'exr', + 'deepexr': 'exr', + 'png': 'png', + 'tiff': 'tif', + 'mtoa_shaders': 'ass', # TODO: research what those last two should be + 'maya': '' + } + + def __init__(self, layer): + super(ExpectedFilesArnold, self).__init__(layer) + self.renderer = 'arnold' + + def get_aovs(self): + enabled_aovs = [] + try: + if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode') + and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')): + # AOVs are merged in mutli-channel file + return enabled_aovs + except ValueError: + # this occurs when Render Setting windows was not opened yet. In + # such case there are no Arnold options created so query for AOVs + # will fail. We terminate here as there are no AOVs specified then. + # This state will most probably fail later on some Validator + # anyway. + return enabled_aovs + + # AOVs are set to be rendered separately. We should expect + # token in path. + + ai_aovs = [n for n in cmds.ls(type='aiAOV')] + + for aov in ai_aovs: + enabled = self.maya_is_true( + cmds.getAttr('{}.enabled'.format(aov))) + ai_driver = cmds.listConnections( + '{}.outputs'.format(aov))[0] + ai_translator = cmds.getAttr( + '{}.aiTranslator'.format(ai_driver)) + try: + aov_ext = self.aiDriverExtension[ai_translator] + except KeyError: + msg = ('Unrecognized arnold ' + 'driver format for AOV - {}').format( + cmds.getAttr('{}.name'.format(aov)) + ) + raise AOVError(msg) + + for override in self.get_layer_overrides( + '{}.enabled'.format(aov), self.layer): + enabled = self.maya_is_true(override) + if enabled: + # If aov RGBA is selected, arnold will translate it to `beauty` + aov_name = cmds.getAttr('%s.name' % aov) + if aov_name == 'RGBA': + aov_name = 'beauty' + enabled_aovs.append( + ( + aov_name, + aov_ext + ) + ) + # Append 'beauty' as this is arnolds + # default. If token is specified and no AOVs are + # defined, this will be used. + enabled_aovs.append( + ( + u'beauty', + cmds.getAttr('defaultRenderGlobals.imfPluginKey') + ) + ) + return enabled_aovs + + +class ExpectedFilesVray(AExpectedFiles): + + # V-ray file extension mapping + # 5 - exr + # 6 - multichannel exr + # 13 - deep exr + + def __init__(self, layer): + super(ExpectedFilesVray, self).__init__(layer) + self.renderer = 'vray' + + def get_renderer_prefix(self): + prefix = super(ExpectedFilesVray, self).get_renderer_prefix() + prefix = "{}_".format(prefix) + return prefix + + def get_files(self): + expected_files = super(ExpectedFilesVray, self).get_files() + + # we need to add one sequence for plain beauty if AOVs are enabled. + # as vray output beauty without 'beauty' in filename. + + layer_data = self._get_layer_data() + if layer_data.get("enabledAOVs"): + expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501 + + return expected_files + + def get_aovs(self): + enabled_aovs = [] + + try: + # really? do we set it in vray just by selecting multichannel exr? + if cmds.getAttr( + "vraySettings.imageFormatStr") == "exr (multichannel)": + # AOVs are merged in mutli-channel file + return enabled_aovs + except ValueError: + # this occurs when Render Setting windows was not opened yet. In + # such case there are no Arnold options created so query for AOVs + # will fail. We terminate here as there are no AOVs specified then. + # This state will most probably fail later on some Validator + # anyway. + return enabled_aovs + + default_ext = cmds.getAttr('vraySettings.imageFormatStr') + if default_ext == "exr (multichannel)" or default_ext == "exr (deep)": + default_ext = "exr" + + vr_aovs = [n for n in cmds.ls( + type=["VRayRenderElement", "VRayRenderElementSet"])] + + # todo: find out how to detect multichannel exr for vray + for aov in vr_aovs: + enabled = self.maya_is_true( + cmds.getAttr('{}.enabled'.format(aov))) + for override in self.get_layer_overrides( + '{}.enabled'.format(aov), 'rs_{}'.format(self.layer)): + enabled = self.maya_is_true(override) + + if enabled: + # todo: find how vray set format for AOVs + enabled_aovs.append( + ( + self._get_vray_aov_name(aov), + default_ext) + ) + return enabled_aovs + + def _get_vray_aov_name(self, node): + + # Get render element pass type + vray_node_attr = next(attr for attr in cmds.listAttr(node) + if attr.startswith("vray_name")) + pass_type = vray_node_attr.rsplit("_", 1)[-1] + + # Support V-Ray extratex explicit name (if set by user) + if pass_type == "extratex": + explicit_attr = "{}.vray_explicit_name_extratex".format(node) + explicit_name = cmds.getAttr(explicit_attr) + if explicit_name: + return explicit_name + + # Node type is in the attribute name but we need to check if value + # of the attribute as it can be changed + return cmds.getAttr("{}.{}".format(node, vray_node_attr)) + + +class ExpectedFilesRedshift(AExpectedFiles): + + # mapping redshift extension dropdown values to strings + ext_mapping = ['iff', 'exr', 'tif', 'png', 'tga', 'jpg'] + + def __init__(self, layer): + super(ExpectedFilesRedshift, self).__init__(layer) + self.renderer = 'redshift' + + def get_renderer_prefix(self): + prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix() + prefix = "{}_".format(prefix) + return prefix + + def get_files(self): + expected_files = super(ExpectedFilesRedshift, self).get_files() + + # we need to add one sequence for plain beauty if AOVs are enabled. + # as redshift output beauty without 'beauty' in filename. + + layer_data = self._get_layer_data() + if layer_data.get("enabledAOVs"): + expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501 + + return expected_files + + def get_aovs(self): + enabled_aovs = [] + + try: + if self.maya_is_true( + cmds.getAttr("redshiftOptions.exrForceMultilayer")): + # AOVs are merged in mutli-channel file + return enabled_aovs + except ValueError: + # this occurs when Render Setting windows was not opened yet. In + # such case there are no Arnold options created so query for AOVs + # will fail. We terminate here as there are no AOVs specified then. + # This state will most probably fail later on some Validator + # anyway. + return enabled_aovs + + default_ext = self.ext_mapping[ + cmds.getAttr('redshiftOptions.imageFormat') + ] + rs_aovs = [n for n in cmds.ls(type='RedshiftAOV')] + + # todo: find out how to detect multichannel exr for redshift + for aov in rs_aovs: + enabled = self.maya_is_true( + cmds.getAttr('{}.enabled'.format(aov))) + for override in self.get_layer_overrides( + '{}.enabled'.format(aov), self.layer): + enabled = self.maya_is_true(override) + + if enabled: + enabled_aovs.append( + ( + cmds.getAttr('%s.name' % aov), + default_ext + ) + ) + + return enabled_aovs + + +class ExpectedFilesRenderman(AExpectedFiles): + + def __init__(self, layer): + super(ExpectedFilesRenderman, self).__init__(layer) + self.renderer = 'renderman' + + def get_aovs(self): + enabled_aovs = [] + + default_ext = "exr" + displays = cmds.listConnections("rmanGlobals.displays") + for aov in displays: + aov_name = str(aov) + if aov_name == "rmanDefaultDisplay": + aov_name = "beauty" + + enabled = self.maya_is_true( + cmds.getAttr("{}.enable".format(aov))) + for override in self.get_layer_overrides( + '{}.enable'.format(aov), self.layer): + enabled = self.maya_is_true(override) + + if enabled: + enabled_aovs.append( + ( + aov_name, + default_ext + ) + ) + + return enabled_aovs + + def get_files(self): + """ + In renderman we hack it with prepending path. This path would + normally be translated from `rmanGlobals.imageOutputDir`. We skip + this and harcode prepend path we expect. There is no place for user + to mess around with this settings anyway and it is enforced in + render settings validator. + """ + layer_data = self._get_layer_data() + new_aovs = {} + + expected_files = super(ExpectedFilesRenderman, self).get_files() + # we always get beauty + for aov, files in expected_files[0].items(): + new_files = [] + for file in files: + new_file = "{}/{}/{}".format(layer_data["sceneName"], + layer_data["layerName"], + file) + new_files.append(new_file) + new_aovs[aov] = new_files + + return [new_aovs] + + +class ExpectedFilesMentalray(AExpectedFiles): + + def __init__(self, layer): + raise UnimplementedRendererException('Mentalray not implemented') + + def get_aovs(self): + return [] + + +class AOVError(Exception): + pass + + +class UnsupportedRendererException(Exception): + pass + + +class UnimplementedRendererException(Exception): + pass diff --git a/pype/plugins/maya/publish/collect_renderable_camera.py b/pype/plugins/maya/publish/collect_renderable_camera.py index 6b1732c3cb..13b847cee4 100644 --- a/pype/plugins/maya/publish/collect_renderable_camera.py +++ b/pype/plugins/maya/publish/collect_renderable_camera.py @@ -17,7 +17,7 @@ class CollectRenderableCamera(pyblish.api.InstancePlugin): def process(self, instance): layer = instance.data["setMembers"] - + self.log.info("layer: {}".format(layer)) cameras = cmds.ls(type="camera", long=True) renderable = [c for c in cameras if lib.get_attr_in_layer("%s.renderable" % c, layer=layer)] diff --git a/pype/plugins/maya/publish/collect_renderlayers.py b/pype/plugins/maya/publish/collect_renderlayers.py deleted file mode 100644 index 73a4d237ab..0000000000 --- a/pype/plugins/maya/publish/collect_renderlayers.py +++ /dev/null @@ -1,201 +0,0 @@ -from maya import cmds - -import pyblish.api - -from avalon import maya, api -import pype.maya.lib as lib - - -class CollectMayaRenderlayers(pyblish.api.ContextPlugin): - """Gather instances by active render layers""" - - order = pyblish.api.CollectorOrder + 0.01 - hosts = ["maya"] - label = "Render Layers" - - def process(self, context): - - asset = api.Session["AVALON_ASSET"] - filepath = context.data["currentFile"].replace("\\", "/") - - # Get render globals node - try: - render_globals = cmds.ls("renderglobalsMain")[0] - for instance in context: - self.log.debug(instance.name) - if instance.data['family'] == 'workfile': - instance.data['publish'] = True - except IndexError: - self.log.info("Skipping renderlayer collection, no " - "renderGlobalsDefault found..") - return - # Get all valid renderlayers - # This is how Maya populates the renderlayer display - rlm_attribute = "renderLayerManager.renderLayerId" - connected_layers = cmds.listConnections(rlm_attribute) or [] - valid_layers = set(connected_layers) - - # Get all renderlayers and check their state - renderlayers = [i for i in cmds.ls(type="renderLayer") if - cmds.getAttr("{}.renderable".format(i)) and not - cmds.referenceQuery(i, isNodeReferenced=True)] - - # Sort by displayOrder - def sort_by_display_order(layer): - return cmds.getAttr("%s.displayOrder" % layer) - - renderlayers = sorted(renderlayers, key=sort_by_display_order) - - for layer in renderlayers: - - # Check if layer is in valid (linked) layers - if layer not in valid_layers: - self.log.warning("%s is invalid, skipping" % layer) - continue - - if layer.endswith("defaultRenderLayer"): - continue - else: - # Remove Maya render setup prefix `rs_` - layername = layer.split("rs_", 1)[-1] - - # Get layer specific settings, might be overrides - data = { - "subset": layername, - "setMembers": layer, - "publish": True, - "frameStart": self.get_render_attribute("startFrame", - layer=layer), - "frameEnd": self.get_render_attribute("endFrame", - layer=layer), - "byFrameStep": self.get_render_attribute("byFrameStep", - layer=layer), - "renderer": self.get_render_attribute("currentRenderer", - layer=layer), - - # instance subset - "family": "Render Layers", - "families": ["renderlayer"], - "asset": asset, - "time": api.time(), - "author": context.data["user"], - - # Add source to allow tracing back to the scene from - # which was submitted originally - "source": filepath - } - - # Apply each user defined attribute as data - for attr in cmds.listAttr(layer, userDefined=True) or list(): - try: - value = cmds.getAttr("{}.{}".format(layer, attr)) - except Exception: - # Some attributes cannot be read directly, - # such as mesh and color attributes. These - # are considered non-essential to this - # particular publishing pipeline. - value = None - - data[attr] = value - - # Include (optional) global settings - # TODO(marcus): Take into account layer overrides - # Get global overrides and translate to Deadline values - overrides = self.parse_options(render_globals) - data.update(**overrides) - - # Define nice label - label = "{0} ({1})".format(layername, data["asset"]) - label += " [{0}-{1}]".format(int(data["frameStart"]), - int(data["frameEnd"])) - - instance = context.create_instance(layername) - instance.data["label"] = label - instance.data.update(data) - - def get_render_attribute(self, attr, layer): - return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr), - layer=layer) - - def parse_options(self, render_globals): - """Get all overrides with a value, skip those without - - Here's the kicker. These globals override defaults in the submission - integrator, but an empty value means no overriding is made. - Otherwise, Frames would override the default frames set under globals. - - Args: - render_globals (str): collection of render globals - - Returns: - dict: only overrides with values - """ - - attributes = maya.read(render_globals) - - options = {"renderGlobals": {}} - options["renderGlobals"]["Priority"] = attributes["priority"] - - # Check for specific pools - pool_a, pool_b = self._discover_pools(attributes) - options["renderGlobals"].update({"Pool": pool_a}) - if pool_b: - options["renderGlobals"].update({"SecondaryPool": pool_b}) - - legacy = attributes["useLegacyRenderLayers"] - options["renderGlobals"]["UseLegacyRenderLayers"] = legacy - - # Machine list - machine_list = attributes["machineList"] - if machine_list: - key = "Whitelist" if attributes["whitelist"] else "Blacklist" - options['renderGlobals'][key] = machine_list - - # Suspend publish job - state = "Suspended" if attributes["suspendPublishJob"] else "Active" - options["publishJobState"] = state - - chunksize = attributes.get("framesPerTask", 1) - options["renderGlobals"]["ChunkSize"] = chunksize - - # Override frames should be False if extendFrames is False. This is - # to ensure it doesn't go off doing crazy unpredictable things - override_frames = False - extend_frames = attributes.get("extendFrames", False) - if extend_frames: - override_frames = attributes.get("overrideExistingFrame", False) - - options["extendFrames"] = extend_frames - options["overrideExistingFrame"] = override_frames - - maya_render_plugin = "MayaBatch" - if not attributes.get("useMayaBatch", True): - maya_render_plugin = "MayaCmd" - - options["mayaRenderPlugin"] = maya_render_plugin - - return options - - def _discover_pools(self, attributes): - - pool_a = None - pool_b = None - - # Check for specific pools - pool_b = [] - if "primaryPool" in attributes: - pool_a = attributes["primaryPool"] - if "secondaryPool" in attributes: - pool_b = attributes["secondaryPool"] - - else: - # Backwards compatibility - pool_str = attributes.get("pools", None) - if pool_str: - pool_a, pool_b = pool_str.split(";") - - # Ensure empty entry token is caught - if pool_b == "-": - pool_b = None - - return pool_a, pool_b diff --git a/pype/plugins/maya/publish/collect_review.py b/pype/plugins/maya/publish/collect_review.py index 18eee78a9c..9b6027b98d 100644 --- a/pype/plugins/maya/publish/collect_review.py +++ b/pype/plugins/maya/publish/collect_review.py @@ -54,8 +54,10 @@ class CollectReview(pyblish.api.InstancePlugin): self.log.debug('adding review family to {}'.format(reviewable_subset)) data['review_camera'] = camera # data["publish"] = False - data['startFrameReview'] = instance.data["frameStart"] - data['endFrameReview'] = instance.data["frameEnd"] + data['frameStartFtrack'] = instance.data["frameStartHandle"] + data['frameEndFtrack'] = instance.data["frameEndHandle"] + data['frameStartHandle'] = instance.data["frameStartHandle"] + data['frameEndHandle'] = instance.data["frameEndHandle"] data["frameStart"] = instance.data["frameStart"] data["frameEnd"] = instance.data["frameEnd"] data['handles'] = instance.data['handles'] @@ -69,8 +71,8 @@ class CollectReview(pyblish.api.InstancePlugin): else: instance.data['subset'] = task + 'Review' instance.data['review_camera'] = camera - instance.data['startFrameReview'] = instance.data["frameStart"] - instance.data['endFrameReview'] = instance.data["frameEnd"] + instance.data['frameStartFtrack'] = instance.data["frameStartHandle"] + instance.data['frameEndFtrack'] = instance.data["frameEndHandle"] # make ftrack publishable instance.data["families"] = ['ftrack'] diff --git a/pype/plugins/maya/publish/collect_scene.py b/pype/plugins/maya/publish/collect_scene.py index f2fbb4d623..089019f2d3 100644 --- a/pype/plugins/maya/publish/collect_scene.py +++ b/pype/plugins/maya/publish/collect_scene.py @@ -35,7 +35,7 @@ class CollectMayaScene(pyblish.api.ContextPlugin): "subset": subset, "asset": os.getenv("AVALON_ASSET", None), "label": subset, - "publish": False, + "publish": True, "family": 'workfile', "families": ['workfile'], "setMembers": [current_file] diff --git a/pype/plugins/maya/publish/determine_future_version.py b/pype/plugins/maya/publish/determine_future_version.py new file mode 100644 index 0000000000..afa249aca2 --- /dev/null +++ b/pype/plugins/maya/publish/determine_future_version.py @@ -0,0 +1,28 @@ +import pyblish + +class DetermineFutureVersion(pyblish.api.InstancePlugin): + """ + This will determine version of subset if we want render to be attached to. + """ + label = "Determine Subset Version" + order = pyblish.api.IntegratorOrder + hosts = ["maya"] + families = ["renderlayer"] + + def process(self, instance): + context = instance.context + attach_to_subsets = [s["subset"] for s in instance.data['attachTo']] + + if not attach_to_subsets: + return + + for i in context: + if i.data["subset"] in attach_to_subsets: + # # this will get corresponding subset in attachTo list + # # so we can set version there + sub = next(item for item in instance.data['attachTo'] if item["subset"] == i.data["subset"]) # noqa: E501 + + sub["version"] = i.data.get("version", 1) + self.log.info("render will be attached to {} v{}".format( + sub["subset"], sub["version"] + )) diff --git a/pype/plugins/maya/publish/extract_assembly.py b/pype/plugins/maya/publish/extract_assembly.py index 26b16a73c4..c12d57e836 100644 --- a/pype/plugins/maya/publish/extract_assembly.py +++ b/pype/plugins/maya/publish/extract_assembly.py @@ -22,11 +22,11 @@ class ExtractAssembly(pype.api.Extractor): def process(self, instance): - parent_dir = self.staging_dir(instance) + staging_dir = self.staging_dir(instance) hierarchy_filename = "{}.abc".format(instance.name) - hierarchy_path = os.path.join(parent_dir, hierarchy_filename) + hierarchy_path = os.path.join(staging_dir, hierarchy_filename) json_filename = "{}.json".format(instance.name) - json_path = os.path.join(parent_dir, json_filename) + json_path = os.path.join(staging_dir, json_filename) self.log.info("Dumping scene data for debugging ..") with open(json_path, "w") as filepath: @@ -46,8 +46,24 @@ class ExtractAssembly(pype.api.Extractor): "uvWrite": True, "selection": True}) - instance.data["files"] = [json_filename, hierarchy_filename] + if "representations" not in instance.data: + instance.data["representations"] = [] + representation_abc = { + 'name': 'abc', + 'ext': 'abc', + 'files': hierarchy_filename, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation_abc) + + representation_json = { + 'name': 'json', + 'ext': 'json', + 'files': json_filename, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation_json) # Remove data instance.data.pop("scenedata", None) diff --git a/pype/plugins/maya/publish/extract_camera_mayaAscii.py b/pype/plugins/maya/publish/extract_camera_mayaAscii.py index 30f686f6f5..ef80ed4ad4 100644 --- a/pype/plugins/maya/publish/extract_camera_mayaAscii.py +++ b/pype/plugins/maya/publish/extract_camera_mayaAscii.py @@ -94,11 +94,6 @@ class ExtractCameraMayaAscii(pype.api.Extractor): step = instance.data.get("step", 1.0) bake_to_worldspace = instance.data("bakeToWorldSpace", True) - # TODO: Implement a bake to non-world space - # Currently it will always bake the resulting camera to world-space - # and it does not allow to include the parent hierarchy, even though - # with `bakeToWorldSpace` set to False it should include its - # hierarchy to be correct with the family implementation. if not bake_to_worldspace: self.log.warning("Camera (Maya Ascii) export only supports world" "space baked camera extractions. The disabled " @@ -113,7 +108,7 @@ class ExtractCameraMayaAscii(pype.api.Extractor): framerange[1] + handles] # validate required settings - assert len(cameras) == 1, "Not a single camera found in extraction" + assert len(cameras) == 1, "Single camera must be found in extraction" assert isinstance(step, float), "Step must be a float value" camera = cameras[0] transform = cmds.listRelatives(camera, parent=True, fullPath=True) @@ -124,21 +119,24 @@ class ExtractCameraMayaAscii(pype.api.Extractor): path = os.path.join(dir_path, filename) # Perform extraction - self.log.info("Performing camera bakes for: {0}".format(transform)) with avalon.maya.maintained_selection(): with lib.evaluation("off"): with avalon.maya.suspended_refresh(): - baked = lib.bake_to_world_space( - transform, - frame_range=range_with_handles, - step=step - ) - baked_shapes = cmds.ls(baked, - type="camera", - dag=True, - shapes=True, - long=True) - + if bake_to_worldspace: + self.log.info( + "Performing camera bakes: {}".format(transform)) + baked = lib.bake_to_world_space( + transform, + frame_range=range_with_handles, + step=step + ) + baked_shapes = cmds.ls(baked, + type="camera", + dag=True, + shapes=True, + long=True) + else: + baked_shapes = cameras # Fix PLN-178: Don't allow background color to be non-black for cam in baked_shapes: attrs = {"backgroundColorR": 0.0, @@ -164,7 +162,8 @@ class ExtractCameraMayaAscii(pype.api.Extractor): expressions=False) # Delete the baked hierarchy - cmds.delete(baked) + if bake_to_worldspace: + cmds.delete(baked) massage_ma_file(path) diff --git a/pype/plugins/maya/publish/extract_look.py b/pype/plugins/maya/publish/extract_look.py index fa6ecd72c3..58196433aa 100644 --- a/pype/plugins/maya/publish/extract_look.py +++ b/pype/plugins/maya/publish/extract_look.py @@ -1,6 +1,7 @@ import os import sys import json +import copy import tempfile import contextlib import subprocess @@ -330,10 +331,9 @@ class ExtractLook(pype.api.Extractor): maya_path)) def resource_destination(self, instance, filepath, do_maketx): - anatomy = instance.context.data["anatomy"] - self.create_destination_template(instance, anatomy) + resources_dir = instance.data["resourcesDir"] # Compute destination location basename, ext = os.path.splitext(os.path.basename(filepath)) @@ -343,7 +343,7 @@ class ExtractLook(pype.api.Extractor): ext = ".tx" return os.path.join( - instance.data["assumedDestination"], "resources", basename + ext + resources_dir, basename + ext ) def _process_texture(self, filepath, do_maketx, staging, linearise, force): @@ -407,97 +407,3 @@ class ExtractLook(pype.api.Extractor): return converted, COPY, texture_hash return filepath, COPY, texture_hash - - def create_destination_template(self, instance, anatomy): - """Create a filepath based on the current data available - - Example template: - {root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/ - {subset}.{representation} - Args: - instance: the instance to publish - - Returns: - file path (str) - """ - - # get all the stuff from the database - subset_name = instance.data["subset"] - self.log.info(subset_name) - asset_name = instance.data["asset"] - project_name = api.Session["AVALON_PROJECT"] - a_template = anatomy.templates - - project = io.find_one( - { - "type": "project", - "name": project_name - }, - projection={"config": True, "data": True} - ) - - template = a_template["publish"]["path"] - # anatomy = instance.context.data['anatomy'] - - asset = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project["_id"] - }) - - assert asset, ("No asset found by the name '{}' " - "in project '{}'").format(asset_name, project_name) - silo = asset.get("silo") - - subset = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset["_id"] - }) - - # assume there is no version yet, we start at `1` - version = None - version_number = 1 - if subset is not None: - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) - - # if there is a subset there ought to be version - if version is not None: - version_number += version["name"] - - if instance.data.get("version"): - version_number = int(instance.data.get("version")) - - padding = int(a_template["render"]["padding"]) - - hierarchy = asset["data"]["parents"] - if hierarchy: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = "/".join(hierarchy) - - template_data = { - "root": api.Session["AVALON_PROJECTS"], - "project": {"name": project_name, "code": project["data"]["code"]}, - "silo": silo, - "family": instance.data["family"], - "asset": asset_name, - "subset": subset_name, - "frame": ("#" * padding), - "version": version_number, - "hierarchy": hierarchy, - "representation": "TEMP", - } - - instance.data["assumedTemplateData"] = template_data - self.log.info(template_data) - instance.data["template"] = template - # We take the parent folder of representation 'filepath' - instance.data["assumedDestination"] = os.path.dirname( - anatomy.format(template_data)["publish"]["path"] - ) diff --git a/pype/plugins/maya/publish/extract_quicktime.py b/pype/plugins/maya/publish/extract_quicktime.py index 1031955260..29d6b78051 100644 --- a/pype/plugins/maya/publish/extract_quicktime.py +++ b/pype/plugins/maya/publish/extract_quicktime.py @@ -1,16 +1,14 @@ import os +import glob import contextlib -import capture_gui import clique +import capture # import pype.maya.lib as lib import pype.api # from maya import cmds, mel import pymel.core as pm -# import ffmpeg -# # from pype.scripts import otio_burnin -# reload(ffmpeg) # TODO: move codec settings to presets @@ -35,17 +33,13 @@ class ExtractQuicktime(pype.api.Extractor): # if start and end frames cannot be determined, get them # from Maya timeline - start = instance.data.get("startFrameReview") - end = instance.data.get("endFrameReview") + start = instance.data.get("frameStartFtrack") + end = instance.data.get("frameEndFtrack") if start is None: start = cmds.playbackOptions(query=True, animationStartTime=True) if end is None: end = cmds.playbackOptions(query=True, animationEndTime=True) self.log.info("start: {}, end: {}".format(start, end)) - handles = instance.data.get("handles", 0) - if handles: - start -= handles - end += handles # get cameras camera = instance.data['review_camera'] @@ -93,7 +87,18 @@ class ExtractQuicktime(pype.api.Extractor): pm.currentTime(refreshFrameInt, edit=True) with maintained_time(): - playblast = capture_gui.lib.capture_scene(preset) + filename = preset.get("filename", "%TEMP%") + + # Force viewer to False in call to capture because we have our own + # viewer opening call to allow a signal to trigger between playblast + # and viewer + preset['viewer'] = False + + # Remove panel key since it's internal value to capture_gui + preset.pop("panel", None) + + path = capture.capture(**preset) + playblast = self._fix_playblast_output_path(path) self.log.info("file list {}".format(playblast)) @@ -119,6 +124,46 @@ class ExtractQuicktime(pype.api.Extractor): } instance.data["representations"].append(representation) + def _fix_playblast_output_path(self, filepath): + """Workaround a bug in maya.cmds.playblast to return correct filepath. + + When the `viewer` argument is set to False and maya.cmds.playblast + does not automatically open the playblasted file the returned + filepath does not have the file's extension added correctly. + + To workaround this we just glob.glob() for any file extensions and + assume the latest modified file is the correct file and return it. + + """ + # Catch cancelled playblast + if filepath is None: + self.log.warning("Playblast did not result in output path. " + "Playblast is probably interrupted.") + return None + + # Fix: playblast not returning correct filename (with extension) + # Lets assume the most recently modified file is the correct one. + if not os.path.exists(filepath): + directory = os.path.dirname(filepath) + filename = os.path.basename(filepath) + # check if the filepath is has frame based filename + # example : capture.####.png + parts = filename.split(".") + if len(parts) == 3: + query = os.path.join(directory, "{}.*.{}".format(parts[0], + parts[-1])) + files = glob.glob(query) + else: + files = glob.glob("{}.*".format(filepath)) + + if not files: + raise RuntimeError("Couldn't find playblast from: " + "{0}".format(filepath)) + filepath = max(files, key=os.path.getmtime) + + return filepath + + @contextlib.contextmanager def maintained_time(): diff --git a/pype/plugins/maya/publish/extract_thumbnail.py b/pype/plugins/maya/publish/extract_thumbnail.py index dc8044cf19..8377af1ac0 100644 --- a/pype/plugins/maya/publish/extract_thumbnail.py +++ b/pype/plugins/maya/publish/extract_thumbnail.py @@ -1,31 +1,14 @@ import os import contextlib -import time -import sys +import glob -import capture_gui -import clique +import capture import pype.maya.lib as lib import pype.api from maya import cmds import pymel.core as pm -# import ffmpeg -# reload(ffmpeg) - -import avalon.maya - -# import maya_utils as mu - -# from tweakHUD import master -# from tweakHUD import draft_hud as dHUD -# from tweakHUD import ftrackStrings as fStrings - -# -# def soundOffsetFunc(oSF, SF, H): -# tmOff = (oSF - H) - SF -# return tmOff class ExtractThumbnail(pype.api.Extractor): @@ -47,39 +30,8 @@ class ExtractThumbnail(pype.api.Extractor): end = cmds.currentTime(query=True) self.log.info("start: {}, end: {}".format(start, end)) - members = instance.data['setMembers'] camera = instance.data['review_camera'] - # project_code = ftrack_data['Project']['code'] - # task_type = ftrack_data['Task']['type'] - # - # # load Preset - # studio_repos = os.path.abspath(os.environ.get('studio_repos')) - # shot_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # (project_code + '_' + task_type + '_' + asset + '.json')) - # - # task_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # (project_code + '_' + task_type + '.json')) - # - # project_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # (project_code + '.json')) - # - # default_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # 'default.json') - # - # if os.path.isfile(shot_preset_path): - # preset_to_use = shot_preset_path - # elif os.path.isfile(task_preset_path): - # preset_to_use = task_preset_path - # elif os.path.isfile(project_preset_path): - # preset_to_use = project_preset_path - # else: - # preset_to_use = default_preset_path - capture_preset = "" capture_preset = instance.context.data['presets']['maya']['capture'] try: @@ -126,7 +78,18 @@ class ExtractThumbnail(pype.api.Extractor): pm.currentTime(refreshFrameInt, edit=True) with maintained_time(): - playblast = capture_gui.lib.capture_scene(preset) + filename = preset.get("filename", "%TEMP%") + + # Force viewer to False in call to capture because we have our own + # viewer opening call to allow a signal to trigger between + # playblast and viewer + preset['viewer'] = False + + # Remove panel key since it's internal value to capture_gui + preset.pop("panel", None) + + path = capture.capture(**preset) + playblast = self._fix_playblast_output_path(path) _, thumbnail = os.path.split(playblast) @@ -144,6 +107,45 @@ class ExtractThumbnail(pype.api.Extractor): } instance.data["representations"].append(representation) + def _fix_playblast_output_path(self, filepath): + """Workaround a bug in maya.cmds.playblast to return correct filepath. + + When the `viewer` argument is set to False and maya.cmds.playblast + does not automatically open the playblasted file the returned + filepath does not have the file's extension added correctly. + + To workaround this we just glob.glob() for any file extensions and + assume the latest modified file is the correct file and return it. + + """ + # Catch cancelled playblast + if filepath is None: + self.log.warning("Playblast did not result in output path. " + "Playblast is probably interrupted.") + return None + + # Fix: playblast not returning correct filename (with extension) + # Lets assume the most recently modified file is the correct one. + if not os.path.exists(filepath): + directory = os.path.dirname(filepath) + filename = os.path.basename(filepath) + # check if the filepath is has frame based filename + # example : capture.####.png + parts = filename.split(".") + if len(parts) == 3: + query = os.path.join(directory, "{}.*.{}".format(parts[0], + parts[-1])) + files = glob.glob(query) + else: + files = glob.glob("{}.*".format(filepath)) + + if not files: + raise RuntimeError("Couldn't find playblast from: " + "{0}".format(filepath)) + filepath = max(files, key=os.path.getmtime) + + return filepath + @contextlib.contextmanager def maintained_time(): diff --git a/pype/plugins/maya/publish/extract_yeti_rig.py b/pype/plugins/maya/publish/extract_yeti_rig.py index 892bc0bea6..70a509564f 100644 --- a/pype/plugins/maya/publish/extract_yeti_rig.py +++ b/pype/plugins/maya/publish/extract_yeti_rig.py @@ -110,15 +110,7 @@ class ExtractYetiRig(pype.api.Extractor): self.log.info("Writing metadata file") - # Create assumed destination folder for imageSearchPath - assumed_temp_data = instance.data["assumedTemplateData"] - template = instance.data["template"] - template_formatted = template.format(**assumed_temp_data) - - destination_folder = os.path.dirname(template_formatted) - - image_search_path = os.path.join(destination_folder, "resources") - image_search_path = os.path.normpath(image_search_path) + image_search_path = resources_dir = instance.data["resourcesDir"] settings = instance.data.get("rigsettings", None) if settings: diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index 55c04e9c41..7547f34ba1 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -1,6 +1,7 @@ import os import json import getpass +import clique from maya import cmds @@ -117,6 +118,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): else: optional = True + use_published = True + def process(self, instance): DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL", @@ -125,21 +128,66 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): context = instance.context workspace = context.data["workspaceDir"] + anatomy = context.data['anatomy'] filepath = None + if self.use_published: + for i in context: + if "workfile" in i.data["families"]: + assert i.data["publish"] is True, ( + "Workfile (scene) must be published along") + template_data = i.data.get("anatomyData") + rep = i.data.get("representations")[0].get("name") + template_data["representation"] = rep + template_data["ext"] = rep + template_data["comment"] = None + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled["publish"]["path"] + filepath = os.path.normpath(template_filled) + self.log.info("Using published scene for render {}".format( + filepath)) + + # now we need to switch scene in expected files + # because token will now point to published + # scene file and that might differ from current one + new_scene = os.path.splitext( + os.path.basename(filepath))[0] + orig_scene = os.path.splitext( + os.path.basename(context.data["currentFile"]))[0] + exp = instance.data.get("expectedFiles") + + if isinstance(exp[0], dict): + # we have aovs and we need to iterate over them + new_exp = {} + for aov, files in exp[0].items(): + replaced_files = [] + for f in files: + replaced_files.append( + f.replace(orig_scene, new_scene) + ) + new_exp[aov] = replaced_files + instance.data["expectedFiles"] = [new_exp] + else: + new_exp = [] + for f in exp: + new_exp.append( + f.replace(orig_scene, new_scene) + ) + instance.data["expectedFiles"] = [new_exp] + self.log.info("Scene name was switched {} -> {}".format( + orig_scene, new_scene + )) + allInstances = [] for result in context.data["results"]: if (result["instance"] is not None and result["instance"] not in allInstances): allInstances.append(result["instance"]) - for inst in allInstances: - print(inst) - if inst.data['family'] == 'scene': - filepath = inst.data['destination_list'][0] - + # fallback if nothing was set if not filepath: + self.log.warning("Falling back to workfile") filepath = context.data["currentFile"] self.log.debug(filepath) @@ -150,8 +198,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): dirname = os.path.join(workspace, "renders") renderlayer = instance.data['setMembers'] # rs_beauty renderlayer_name = instance.data['subset'] # beauty - renderlayer_globals = instance.data["renderGlobals"] - legacy_layers = renderlayer_globals["UseLegacyRenderLayers"] + # renderlayer_globals = instance.data["renderGlobals"] + # legacy_layers = renderlayer_globals["UseLegacyRenderLayers"] deadline_user = context.data.get("deadlineUser", getpass.getuser()) jobname = "%s - %s" % (filename, instance.name) @@ -186,8 +234,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "Plugin": instance.data.get("mayaRenderPlugin", "MayaBatch"), "Frames": "{start}-{end}x{step}".format( - start=int(instance.data["frameStart"]), - end=int(instance.data["frameEnd"]), + start=int(instance.data["frameStartHandle"]), + end=int(instance.data["frameEndHandle"]), step=int(instance.data["byFrameStep"]), ), @@ -195,7 +243,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # Optional, enable double-click to preview rendered # frames from Deadline Monitor - "OutputFilename0": output_filename_0.replace("\\", "/"), + "OutputDirectory0": os.path.dirname(output_filename_0), + "OutputFilename0": output_filename_0.replace("\\", "/") }, "PluginInfo": { # Input @@ -211,9 +260,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # Only render layers are considered renderable in this pipeline "UsingRenderLayers": True, - # Use legacy Render Layer system - "UseLegacyRenderLayers": legacy_layers, - # Render only this layer "RenderLayer": renderlayer, @@ -228,80 +274,39 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "AuxFiles": [] } - # Include critical environment variables with submission - keys = [ - # This will trigger `userSetup.py` on the slave - # such that proper initialisation happens the same - # way as it does on a local machine. - # TODO(marcus): This won't work if the slaves don't - # have accesss to these paths, such as if slaves are - # running Linux and the submitter is on Windows. - "PYTHONPATH", - "PATH", + exp = instance.data.get("expectedFiles") - "MTOA_EXTENSIONS_PATH", - "MTOA_EXTENSIONS", - "DYLD_LIBRARY_PATH", - "MAYA_RENDER_DESC_PATH", - "MAYA_MODULE_PATH", - "ARNOLD_PLUGIN_PATH", - "AVALON_SCHEMA", + OutputFilenames = {} + expIndex = 0 + + if isinstance(exp[0], dict): + # we have aovs and we need to iterate over them + for aov, files in exp[0].items(): + col = clique.assemble(files)[0][0] + outputFile = col.format('{head}{padding}{tail}') + payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile + OutputFilenames[expIndex] = outputFile + expIndex += 1 + else: + col = clique.assemble(files)[0][0] + outputFile = col.format('{head}{padding}{tail}') + payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile + # OutputFilenames[expIndex] = outputFile + + + # We need those to pass them to pype for it to set correct context + keys = [ "FTRACK_API_KEY", "FTRACK_API_USER", "FTRACK_SERVER", - "PYBLISHPLUGINPATH", - - # todo: This is a temporary fix for yeti variables - "PEREGRINEL_LICENSE", - "SOLIDANGLE_LICENSE", - "ARNOLD_LICENSE" - "MAYA_MODULE_PATH", - "TOOL_ENV" + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_TASK", + "PYPE_USERNAME" ] + environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **api.Session) - # self.log.debug("enviro: {}".format(pprint(environment))) - for path in os.environ: - if path.lower().startswith('pype_'): - environment[path] = os.environ[path] - - environment["PATH"] = os.environ["PATH"] - # self.log.debug("enviro: {}".format(environment['PYPE_SCRIPTS'])) - clean_environment = {} - for key in environment: - clean_path = "" - self.log.debug("key: {}".format(key)) - self.log.debug("value: {}".format(environment[key])) - to_process = str(environment[key]) - if key == "PYPE_STUDIO_CORE_MOUNT": - clean_path = to_process - elif "://" in to_process: - clean_path = to_process - elif os.pathsep not in str(to_process): - try: - path = to_process - path.decode('UTF-8', 'strict') - clean_path = os.path.normpath(path) - except UnicodeDecodeError: - print('path contains non UTF characters') - else: - for path in to_process.split(os.pathsep): - try: - path.decode('UTF-8', 'strict') - clean_path += os.path.normpath(path) + os.pathsep - except UnicodeDecodeError: - print('path contains non UTF characters') - - if key == "PYTHONPATH": - clean_path = clean_path.replace('python2', 'python3') - clean_path = clean_path.replace( - os.path.normpath( - environment['PYPE_STUDIO_CORE_MOUNT']), # noqa - os.path.normpath( - environment['PYPE_STUDIO_CORE_PATH'])) # noqa - clean_environment[key] = clean_path - - environment = clean_environment payload["JobInfo"].update({ "EnvironmentKeyValue%d" % index: "{key}={value}".format( @@ -319,7 +324,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): self.preflight_check(instance) - self.log.info("Submitting..") + self.log.info("Submitting ...") self.log.info(json.dumps(payload, indent=4, sort_keys=True)) # E.g. http://192.168.0.1:8082/api/jobs @@ -335,7 +340,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): def preflight_check(self, instance): """Ensure the startFrame, endFrame and byFrameStep are integers""" - for key in ("frameStart", "frameEnd", "byFrameStep"): + for key in ("frameStartHandle", "frameEndHandle", "byFrameStep"): value = instance.data[key] if int(value) == value: diff --git a/pype/plugins/maya/publish/validate_ass_relative_paths.py b/pype/plugins/maya/publish/validate_ass_relative_paths.py new file mode 100644 index 0000000000..b0fd12a550 --- /dev/null +++ b/pype/plugins/maya/publish/validate_ass_relative_paths.py @@ -0,0 +1,97 @@ +import os +import types + +import maya.cmds as cmds + +import pyblish.api +import pype.api +import pype.maya.action + + +class ValidateAssRelativePaths(pyblish.api.InstancePlugin): + """Ensure exporting ass file has set relative texture paths""" + + order = pype.api.ValidateContentsOrder + hosts = ['maya'] + families = ['ass'] + label = "ASS has relative texture paths" + actions = [pype.api.RepairAction] + + def process(self, instance): + # we cannot ask this until user open render settings as + # `defaultArnoldRenderOptions` doesn't exists + try: + relative_texture = cmds.getAttr( + "defaultArnoldRenderOptions.absolute_texture_paths") + relative_procedural = cmds.getAttr( + "defaultArnoldRenderOptions.absolute_procedural_paths") + texture_search_path = cmds.getAttr( + "defaultArnoldRenderOptions.tspath" + ) + procedural_search_path = cmds.getAttr( + "defaultArnoldRenderOptions.pspath" + ) + except ValueError: + assert False, ("Can not validate, render setting were not opened " + "yet so Arnold setting cannot be validate") + + scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True)) + scene_name, _ = os.path.splitext(scene_basename) + project_root = "{}{}{}".format( + os.environ.get("AVALON_PROJECTS"), + os.path.sep, + os.environ.get("AVALON_PROJECT") + ) + assert self.maya_is_true(relative_texture) is not True, \ + ("Texture path is set to be absolute") + assert self.maya_is_true(relative_procedural) is not True, \ + ("Procedural path is set to be absolute") + + texture_search_path = texture_search_path.replace("\\", "/") + procedural_search_path = procedural_search_path.replace("\\", "/") + project_root = project_root.replace("\\", "/") + + assert project_root in texture_search_path, \ + ("Project root is not in texture_search_path") + assert project_root in procedural_search_path, \ + ("Project root is not in procedural_search_path") + + @classmethod + def repair(cls, instance): + texture_search_path = cmds.getAttr( + "defaultArnoldRenderOptions.tspath" + ) + procedural_search_path = cmds.getAttr( + "defaultArnoldRenderOptions.pspath" + ) + + project_root = "{}{}{}".format( + os.environ.get("AVALON_PROJECTS"), + os.path.sep, + os.environ.get("AVALON_PROJECT"), + ).replace("\\", "/") + + cmds.setAttr("defaultArnoldRenderOptions.tspath", + project_root + os.pathsep + texture_search_path, + type="string") + cmds.setAttr("defaultArnoldRenderOptions.pspath", + project_root + os.pathsep + procedural_search_path, + type="string") + cmds.setAttr("defaultArnoldRenderOptions.absolute_procedural_paths", + False) + cmds.setAttr("defaultArnoldRenderOptions.absolute_texture_paths", + False) + + def maya_is_true(self, attr_val): + """ + Whether a Maya attr evaluates to True. + When querying an attribute value from an ambiguous object the + Maya API will return a list of values, which need to be properly + handled to evaluate properly. + """ + if isinstance(attr_val, types.BooleanType): + return attr_val + elif isinstance(attr_val, (types.ListType, types.GeneratorType)): + return any(attr_val) + else: + return bool(attr_val) diff --git a/pype/plugins/maya/publish/validate_render_single_camera.py b/pype/plugins/maya/publish/validate_render_single_camera.py index b8561a69c9..51c5f64c86 100644 --- a/pype/plugins/maya/publish/validate_render_single_camera.py +++ b/pype/plugins/maya/publish/validate_render_single_camera.py @@ -1,17 +1,26 @@ +import re + import pyblish.api import pype.api import pype.maya.action +from maya import cmds + + +ImagePrefixes = { + 'mentalray': 'defaultRenderGlobals.imageFilePrefix', + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' +} + class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): - """Only one camera may be renderable in a layer. - - Currently the pipeline supports only a single camera per layer. - This is because when multiple cameras are rendered the output files - automatically get different names because the render token - is not in the output path. As such the output files conflict with how - our pipeline expects the output. + """Validate renderable camera count for layer and token. + Pipeline is supporting multiple renderable cameras per layer, but image + prefix must contain token. """ order = pype.api.ValidateContentsOrder @@ -21,6 +30,8 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): "vrayscene"] actions = [pype.maya.action.SelectInvalidAction] + R_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) + def process(self, instance): """Process all the cameras in the instance""" invalid = self.get_invalid(instance) @@ -31,8 +42,17 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): def get_invalid(cls, instance): cameras = instance.data.get("cameras", []) + renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower() + # handle various renderman names + if renderer.startswith('renderman'): + renderer = 'renderman' + file_prefix = cmds.getAttr(ImagePrefixes[renderer]) if len(cameras) > 1: + if re.search(cls.R_CAMERA_TOKEN, file_prefix): + # if there is token in prefix and we have more then + # 1 camera, all is ok. + return cls.log.error("Multiple renderable cameras found for %s: %s " % (instance.data["setMembers"], cameras)) return [instance.data["setMembers"]] + cameras diff --git a/pype/plugins/maya/publish/validate_rendersettings.py b/pype/plugins/maya/publish/validate_rendersettings.py index 7bf44710e2..c98f0f8cdc 100644 --- a/pype/plugins/maya/publish/validate_rendersettings.py +++ b/pype/plugins/maya/publish/validate_rendersettings.py @@ -1,4 +1,5 @@ import os +import re from maya import cmds, mel import pymel.core as pm @@ -11,9 +12,13 @@ import pype.maya.lib as lib class ValidateRenderSettings(pyblish.api.InstancePlugin): """Validates the global render settings - * File Name Prefix must be as followed: - * vray: maya/// - * default: maya///_ + * File Name Prefix must start with: `maya/` + all other token are customizable but sane values are: + + `maya///_` + + token is supported also, usefull for multiple renderable + cameras per render layer. * Frame Padding must be: * default: 4 @@ -35,16 +40,47 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): families = ["renderlayer"] actions = [pype.api.RepairAction] + ImagePrefixes = { + 'mentalray': 'defaultRenderGlobals.imageFilePrefix', + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'rmanGlobals.imageFileFormat', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' + } + + ImagePrefixTokens = { + + 'arnold': 'maya///_', + 'redshift': 'maya///', + 'vray': 'maya///', + 'renderman': '_..' + } + + # WARNING: There is bug? in renderman, translating token + # to something left behind mayas default image prefix. So instead + # `SceneName_v01` it translates to: + # `SceneName_v01//` that means + # for example: + # `SceneName_v01/Main/Main_`. Possible solution is to define + # custom token like to point to determined scene name. + RendermanDirPrefix = "/renders/maya//" + + R_AOV_TOKEN = re.compile( + r'%a||', re.IGNORECASE) + R_LAYER_TOKEN = re.compile( + r'%l||', re.IGNORECASE) + R_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) + R_SCENE_TOKEN = re.compile(r'%s|', re.IGNORECASE) + DEFAULT_PADDING = 4 - RENDERER_PREFIX = {"vray": "maya///"} + VRAY_PREFIX = "maya///" DEFAULT_PREFIX = "maya///_" def process(self, instance): invalid = self.get_invalid(instance) - if invalid: - raise ValueError("Invalid render settings found for '%s'!" - % instance.name) + assert invalid is False, ("Invalid render settings " + "found for '{}'!".format(instance.name)) @classmethod def get_invalid(cls, instance): @@ -53,10 +89,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): renderer = instance.data['renderer'] layer = instance.data['setMembers'] + cameras = instance.data.get("cameras", []) # Get the node attributes for current renderer attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS['default']) - prefix = lib.get_attr_in_layer("{node}.{prefix}".format(**attrs), + prefix = lib.get_attr_in_layer(cls.ImagePrefixes[renderer], layer=layer) padding = lib.get_attr_in_layer("{node}.{padding}".format(**attrs), layer=layer) @@ -68,12 +105,63 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cls.log.error("Animation needs to be enabled. Use the same " "frame for start and end to render single frame") - fname_prefix = cls.get_prefix(renderer) - - if prefix != fname_prefix: + if not prefix.lower().startswith("maya/"): invalid = True - cls.log.error("Wrong file name prefix: %s (expected: %s)" - % (prefix, fname_prefix)) + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't start with: 'maya/'".format(prefix)) + + if not re.search(cls.R_LAYER_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' or " + "'' token".format(prefix)) + + if len(cameras) > 1: + if not re.search(cls.R_CAMERA_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' token".format(prefix)) + + # renderer specific checks + if renderer == "vray": + # no vray checks implemented yet + pass + elif renderer == "redshift": + # no redshift check implemented yet + pass + elif renderer == "renderman": + file_prefix = cmds.getAttr("rmanGlobals.imageFileFormat") + dir_prefix = cmds.getAttr("rmanGlobals.imageOutputDir") + + if file_prefix.lower() != cls.ImagePrefixTokens[renderer].lower(): + invalid = True + cls.log.error("Wrong image prefix [ {} ]".format(file_prefix)) + + if dir_prefix.lower() != cls.RendermanDirPrefix.lower(): + invalid = True + cls.log.error("Wrong directory prefix [ {} ]".format( + dir_prefix)) + + else: + multichannel = cmds.getAttr("defaultArnoldDriver.mergeAOVs") + if multichannel: + if re.search(cls.R_AOV_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "You can't use '' token " + "with merge AOVs turned on".format(prefix)) + else: + if not re.search(cls.R_AOV_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' or " + "token".format(prefix)) + + # prefix check + if prefix.lower() != cls.ImagePrefixTokens[renderer].lower(): + cls.log.warning("warning: prefix differs from " + "recommended {}".format( + cls.ImagePrefixTokens[renderer])) if padding != cls.DEFAULT_PADDING: invalid = True @@ -82,21 +170,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): return invalid - @classmethod - def get_prefix(cls, renderer): - prefix = cls.RENDERER_PREFIX.get(renderer, cls.DEFAULT_PREFIX) - # maya.cmds and pymel.core return only default project directory and - # not the current one but only default. - output_path = os.path.join( - mel.eval("workspace -q -rd;"), pm.workspace.fileRules["images"] - ) - # Workfile paths can be configured to have host name in file path. - # In this case we want to avoid duplicate folder names. - if "maya" in output_path.lower(): - prefix = prefix.replace("maya/", "") - - return prefix - @classmethod def repair(cls, instance): @@ -108,14 +181,23 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): render_attrs = lib.RENDER_ATTRS.get(renderer, default) # Repair prefix - node = render_attrs["node"] - prefix_attr = render_attrs["prefix"] + if renderer != "renderman": + node = render_attrs["node"] + prefix_attr = render_attrs["prefix"] - fname_prefix = cls.get_prefix(renderer) - cmds.setAttr("{}.{}".format(node, prefix_attr), - fname_prefix, type="string") + fname_prefix = cls.ImagePrefixTokens[renderer] + cmds.setAttr("{}.{}".format(node, prefix_attr), + fname_prefix, type="string") - # Repair padding - padding_attr = render_attrs["padding"] - cmds.setAttr("{}.{}".format(node, padding_attr), - cls.DEFAULT_PADDING) + # Repair padding + padding_attr = render_attrs["padding"] + cmds.setAttr("{}.{}".format(node, padding_attr), + cls.DEFAULT_PADDING) + else: + # renderman handles stuff differently + cmds.setAttr("rmanGlobals.imageFileFormat", + cls.ImagePrefixTokens[renderer], + type="string") + cmds.setAttr("rmanGlobals.imageOutputDir", + cls.RendermanDirPrefix, + type="string") diff --git a/pype/plugins/nuke/_load_unused/extract_write_next_render.py b/pype/plugins/nuke/_load_unused/extract_write_next_render.py deleted file mode 100644 index 40bfe59ec2..0000000000 --- a/pype/plugins/nuke/_load_unused/extract_write_next_render.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyblish.api - - -class WriteToRender(pyblish.api.InstancePlugin): - """Swith Render knob on write instance to on, - so next time publish will be set to render - """ - - order = pyblish.api.ExtractorOrder + 0.1 - label = "Write to render next" - optional = True - hosts = ["nuke", "nukeassist"] - families = ["write"] - - def process(self, instance): - return - if [f for f in instance.data["families"] - if ".frames" in f]: - instance[0]["render"].setValue(True) - self.log.info("Swith write node render to `on`") - else: - # swith to - instance[0]["render"].setValue(False) - self.log.info("Swith write node render to `Off`") diff --git a/pype/plugins/nuke/_load_unused/load_alembic b/pype/plugins/nuke/_load_unused/load_alembic deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pype/plugins/nuke/_load_unused/load_backdrop b/pype/plugins/nuke/_load_unused/load_backdrop deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pype/plugins/nuke/_load_unused/load_camera_abc b/pype/plugins/nuke/_load_unused/load_camera_abc deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pype/plugins/nuke/_load_unused/load_camera_nk b/pype/plugins/nuke/_load_unused/load_camera_nk deleted file mode 100644 index 8b13789179..0000000000 --- a/pype/plugins/nuke/_load_unused/load_camera_nk +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pype/plugins/nuke/_load_unused/load_still b/pype/plugins/nuke/_load_unused/load_still deleted file mode 100644 index c2aa061c5a..0000000000 --- a/pype/plugins/nuke/_load_unused/load_still +++ /dev/null @@ -1 +0,0 @@ -# usually used for mattepainting diff --git a/pype/plugins/nuke/_publish_unused/collect_active_viewer.py b/pype/plugins/nuke/_publish_unused/collect_active_viewer.py deleted file mode 100644 index 5a6cc02b88..0000000000 --- a/pype/plugins/nuke/_publish_unused/collect_active_viewer.py +++ /dev/null @@ -1,14 +0,0 @@ -import pyblish.api -import nuke - - -class CollectActiveViewer(pyblish.api.ContextPlugin): - """Collect any active viewer from nodes - """ - - order = pyblish.api.CollectorOrder + 0.3 - label = "Collect Active Viewer" - hosts = ["nuke"] - - def process(self, context): - context.data["ActiveViewer"] = nuke.activeViewer() diff --git a/pype/plugins/nuke/_publish_unused/collect_render_target.py b/pype/plugins/nuke/_publish_unused/collect_render_target.py deleted file mode 100644 index 6c04414f69..0000000000 --- a/pype/plugins/nuke/_publish_unused/collect_render_target.py +++ /dev/null @@ -1,46 +0,0 @@ -import pyblish.api - - -@pyblish.api.log -class CollectRenderTarget(pyblish.api.InstancePlugin): - """Collect families for all instances""" - - order = pyblish.api.CollectorOrder + 0.2 - label = "Collect Render Target" - hosts = ["nuke", "nukeassist"] - families = ['write'] - - def process(self, instance): - - node = instance[0] - - self.log.info('processing {}'.format(node)) - - families = [] - if instance.data.get('families'): - families += instance.data['families'] - - # set for ftrack to accept - # instance.data["families"] = ["ftrack"] - - if node["render"].value(): - # dealing with local/farm rendering - if node["render_farm"].value(): - families.append("render.farm") - else: - families.append("render.local") - else: - families.append("render.frames") - # to ignore staging dir op in integrate - instance.data['transfer'] = False - - families.append('ftrack') - - instance.data["families"] = families - - # Sort/grouped by family (preserving local index) - instance.context[:] = sorted(instance.context, key=self.sort_by_family) - - def sort_by_family(self, instance): - """Sort by family""" - return instance.data.get("families", instance.data.get("family")) diff --git a/pype/plugins/nuke/_publish_unused/extract_frames.py b/pype/plugins/nuke/_publish_unused/extract_frames.py deleted file mode 100644 index b75f893802..0000000000 --- a/pype/plugins/nuke/_publish_unused/extract_frames.py +++ /dev/null @@ -1,22 +0,0 @@ -import pyblish - - -class ExtractFramesToIntegrate(pyblish.api.InstancePlugin): - """Extract rendered frames for integrator - """ - - order = pyblish.api.ExtractorOrder - label = "Extract rendered frames" - hosts = ["nuke"] - families = ["render"] - - def process(self, instance\ - return - - # staging_dir = instance.data.get('stagingDir', None) - # output_dir = instance.data.get('outputDir', None) - # - # if not staging_dir: - # staging_dir = output_dir - # instance.data['stagingDir'] = staging_dir - # # instance.data['transfer'] = False diff --git a/pype/plugins/nuke/_publish_unused/extract_nuke_write.py b/pype/plugins/nuke/_publish_unused/extract_nuke_write.py deleted file mode 100644 index 155b5cf56d..0000000000 --- a/pype/plugins/nuke/_publish_unused/extract_nuke_write.py +++ /dev/null @@ -1,116 +0,0 @@ -import os - -import nuke -import pyblish.api - - -class Extract(pyblish.api.InstancePlugin): - """Super class for write and writegeo extractors.""" - - order = pyblish.api.ExtractorOrder - optional = True - label = "Extract Nuke [super]" - hosts = ["nuke"] - match = pyblish.api.Subset - - # targets = ["process.local"] - - def execute(self, instance): - # Get frame range - node = instance[0] - first_frame = nuke.root()["first_frame"].value() - last_frame = nuke.root()["last_frame"].value() - - if node["use_limit"].value(): - first_frame = node["first"].value() - last_frame = node["last"].value() - - # Render frames - nuke.execute(node.name(), int(first_frame), int(last_frame)) - - -class ExtractNukeWrite(Extract): - """ Extract output from write nodes. """ - - families = ["write", "local"] - label = "Extract Write" - - def process(self, instance): - - self.execute(instance) - - # Validate output - for filename in list(instance.data["collection"]): - if not os.path.exists(filename): - instance.data["collection"].remove(filename) - self.log.warning("\"{0}\" didn't render.".format(filename)) - - -class ExtractNukeCache(Extract): - - label = "Cache" - families = ["cache", "local"] - - def process(self, instance): - - self.execute(instance) - - # Validate output - msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) - assert os.path.exists(instance.data["output_path"]), msg - - -class ExtractNukeCamera(Extract): - - label = "Camera" - families = ["camera", "local"] - - def process(self, instance): - - node = instance[0] - node["writeGeometries"].setValue(False) - node["writePointClouds"].setValue(False) - node["writeAxes"].setValue(False) - - file_path = node["file"].getValue() - node["file"].setValue(instance.data["output_path"]) - - self.execute(instance) - - node["writeGeometries"].setValue(True) - node["writePointClouds"].setValue(True) - node["writeAxes"].setValue(True) - - node["file"].setValue(file_path) - - # Validate output - msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) - assert os.path.exists(instance.data["output_path"]), msg - - -class ExtractNukeGeometry(Extract): - - label = "Geometry" - families = ["geometry", "local"] - - def process(self, instance): - - node = instance[0] - node["writeCameras"].setValue(False) - node["writePointClouds"].setValue(False) - node["writeAxes"].setValue(False) - - file_path = node["file"].getValue() - node["file"].setValue(instance.data["output_path"]) - - self.execute(instance) - - node["writeCameras"].setValue(True) - node["writePointClouds"].setValue(True) - node["writeAxes"].setValue(True) - - node["file"].setValue(file_path) - - # Validate output - msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) - assert os.path.exists(instance.data["output_path"]), msg diff --git a/pype/plugins/nuke/_publish_unused/extract_script.py b/pype/plugins/nuke/_publish_unused/extract_script.py deleted file mode 100644 index 7d55ea0da4..0000000000 --- a/pype/plugins/nuke/_publish_unused/extract_script.py +++ /dev/null @@ -1,40 +0,0 @@ - -import pyblish.api -import os -import pype -import shutil - - -class ExtractScript(pype.api.Extractor): - """Publish script - """ - label = 'Extract Script' - order = pyblish.api.ExtractorOrder - 0.05 - optional = True - hosts = ['nuke'] - families = ["workfile"] - - def process(self, instance): - self.log.debug("instance extracting: {}".format(instance.data)) - current_script = instance.context.data["currentFile"] - - # Define extract output file path - stagingdir = self.staging_dir(instance) - filename = "{0}".format(instance.data["name"]) - path = os.path.join(stagingdir, filename) - - self.log.info("Performing extraction..") - shutil.copy(current_script, path) - - if "representations" not in instance.data: - instance.data["representations"] = list() - - representation = { - 'name': 'nk', - 'ext': '.nk', - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.info("Extracted instance '%s' to: %s" % (instance.name, path)) diff --git a/pype/plugins/nuke/_publish_unused/integrate_staging_dir.py b/pype/plugins/nuke/_publish_unused/integrate_staging_dir.py deleted file mode 100644 index e05c42ae50..0000000000 --- a/pype/plugins/nuke/_publish_unused/integrate_staging_dir.py +++ /dev/null @@ -1,27 +0,0 @@ -import pyblish.api -import shutil -import os - - -class CopyStagingDir(pyblish.api.InstancePlugin): - """Copy data rendered into temp local directory - """ - - order = pyblish.api.IntegratorOrder - 2 - label = "Copy data from temp dir" - hosts = ["nuke", "nukeassist"] - families = ["render.local"] - - def process(self, instance): - temp_dir = instance.data.get("stagingDir") - output_dir = instance.data.get("outputDir") - - # copy data to correct dir - if not os.path.exists(output_dir): - os.makedirs(output_dir) - self.log.info("output dir has been created") - - for f in os.listdir(temp_dir): - self.log.info("copy file to correct destination: {}".format(f)) - shutil.copy(os.path.join(temp_dir, os.path.basename(f)), - os.path.join(output_dir, os.path.basename(f))) diff --git a/pype/plugins/nuke/_publish_unused/publish_image_sequences.py b/pype/plugins/nuke/_publish_unused/publish_image_sequences.py deleted file mode 100644 index 34634dcc6b..0000000000 --- a/pype/plugins/nuke/_publish_unused/publish_image_sequences.py +++ /dev/null @@ -1,98 +0,0 @@ -import re -import os -import json -import subprocess - -import pyblish.api - -from pype.action import get_errored_plugins_from_data - - -def _get_script(): - """Get path to the image sequence script""" - - # todo: use a more elegant way to get the python script - - try: - from pype.fusion.scripts import publish_filesequence - except Exception: - raise RuntimeError("Expected module 'publish_imagesequence'" - "to be available") - - module_path = publish_filesequence.__file__ - if module_path.endswith(".pyc"): - module_path = module_path[:-len(".pyc")] + ".py" - - return module_path - - -class PublishImageSequence(pyblish.api.InstancePlugin): - """Publish the generated local image sequences.""" - - order = pyblish.api.IntegratorOrder - label = "Publish Rendered Image Sequence(s)" - hosts = ["fusion"] - families = ["saver.renderlocal"] - - def process(self, instance): - - # Skip this plug-in if the ExtractImageSequence failed - errored_plugins = get_errored_plugins_from_data(instance.context) - if any(plugin.__name__ == "FusionRenderLocal" for plugin in - errored_plugins): - raise RuntimeError("Fusion local render failed, " - "publishing images skipped.") - - subset = instance.data["subset"] - ext = instance.data["ext"] - - # Regex to match resulting renders - regex = "^{subset}.*[0-9]+{ext}+$".format(subset=re.escape(subset), - ext=re.escape(ext)) - - # The instance has most of the information already stored - metadata = { - "regex": regex, - "frameStart": instance.context.data["frameStart"], - "frameEnd": instance.context.data["frameEnd"], - "families": ["imagesequence"], - } - - # Write metadata and store the path in the instance - output_directory = instance.data["outputDir"] - path = os.path.join(output_directory, - "{}_metadata.json".format(subset)) - with open(path, "w") as f: - json.dump(metadata, f) - - assert os.path.isfile(path), ("Stored path is not a file for %s" - % instance.data["name"]) - - # Suppress any subprocess console - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - startupinfo.wShowWindow = subprocess.SW_HIDE - - process = subprocess.Popen(["python", _get_script(), - "--paths", path], - bufsize=1, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - startupinfo=startupinfo) - - while True: - output = process.stdout.readline() - # Break when there is no output or a return code has been given - if output == '' and process.poll() is not None: - process.stdout.close() - break - if output: - line = output.strip() - if line.startswith("ERROR"): - self.log.error(line) - else: - self.log.info(line) - - if process.returncode != 0: - raise RuntimeError("Process quit with non-zero " - "return code: {}".format(process.returncode)) diff --git a/pype/plugins/nuke/_publish_unused/submit_deadline.py b/pype/plugins/nuke/_publish_unused/submit_deadline.py deleted file mode 100644 index 8b86189425..0000000000 --- a/pype/plugins/nuke/_publish_unused/submit_deadline.py +++ /dev/null @@ -1,147 +0,0 @@ -import os -import json -import getpass - -from avalon import api -from avalon.vendor import requests - -import pyblish.api - - -class NukeSubmitDeadline(pyblish.api.InstancePlugin): - # TODO: rewrite docstring to nuke - """Submit current Comp to Deadline - - Renders are submitted to a Deadline Web Service as - supplied via the environment variable DEADLINE_REST_URL - - """ - - label = "Submit to Deadline" - order = pyblish.api.IntegratorOrder - hosts = ["nuke"] - families = ["write", "render.deadline"] - - def process(self, instance): - - context = instance.context - - key = "__hasRun{}".format(self.__class__.__name__) - if context.data.get(key, False): - return - else: - context.data[key] = True - - DEADLINE_REST_URL = api.Session.get("DEADLINE_REST_URL", - "http://localhost:8082") - assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL" - - # Collect all saver instances in context that are to be rendered - write_instances = [] - for instance in context[:]: - if not self.families[0] in instance.data.get("families"): - # Allow only saver family instances - continue - - if not instance.data.get("publish", True): - # Skip inactive instances - continue - self.log.debug(instance.data["name"]) - write_instances.append(instance) - - if not write_instances: - raise RuntimeError("No instances found for Deadline submittion") - - hostVersion = int(context.data["hostVersion"]) - filepath = context.data["currentFile"] - filename = os.path.basename(filepath) - comment = context.data.get("comment", "") - deadline_user = context.data.get("deadlineUser", getpass.getuser()) - - # Documentation for keys available at: - # https://docs.thinkboxsoftware.com - # /products/deadline/8.0/1_User%20Manual/manual - # /manual-submission.html#job-info-file-options - payload = { - "JobInfo": { - # Top-level group name - "BatchName": filename, - - # Job name, as seen in Monitor - "Name": filename, - - # User, as seen in Monitor - "UserName": deadline_user, - - # Use a default submission pool for Nuke - "Pool": "nuke", - - "Plugin": "Nuke", - "Frames": "{start}-{end}".format( - start=int(instance.data["frameStart"]), - end=int(instance.data["frameEnd"]) - ), - - "Comment": comment, - }, - "PluginInfo": { - # Input - "FlowFile": filepath, - - # Mandatory for Deadline - "Version": str(hostVersion), - - # Render in high quality - "HighQuality": True, - - # Whether saver output should be checked after rendering - # is complete - "CheckOutput": True, - - # Proxy: higher numbers smaller images for faster test renders - # 1 = no proxy quality - "Proxy": 1, - }, - - # Mandatory for Deadline, may be empty - "AuxFiles": [] - } - - # Enable going to rendered frames from Deadline Monitor - for index, instance in enumerate(write_instances): - path = instance.data["path"] - folder, filename = os.path.split(path) - payload["JobInfo"]["OutputDirectory%d" % index] = folder - payload["JobInfo"]["OutputFilename%d" % index] = filename - - # Include critical variables with submission - keys = [ - # TODO: This won't work if the slaves don't have accesss to - # these paths, such as if slaves are running Linux and the - # submitter is on Windows. - "PYTHONPATH", - "NUKE_PATH" - # "OFX_PLUGIN_PATH", - ] - environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) - - payload["JobInfo"].update({ - "EnvironmentKeyValue%d" % index: "{key}={value}".format( - key=key, - value=environment[key] - ) for index, key in enumerate(environment) - }) - - self.log.info("Submitting..") - self.log.info(json.dumps(payload, indent=4, sort_keys=True)) - - # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(DEADLINE_REST_URL) - response = requests.post(url, json=payload) - if not response.ok: - raise Exception(response.text) - - # Store the response for dependent job submission plug-ins - for instance in write_instances: - instance.data["deadlineSubmissionJob"] = response.json() diff --git a/pype/plugins/nuke/_publish_unused/test_instances.py b/pype/plugins/nuke/_publish_unused/test_instances.py deleted file mode 100644 index e3fcc4b8f1..0000000000 --- a/pype/plugins/nuke/_publish_unused/test_instances.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyblish.api - - -class IncrementTestPlugin(pyblish.api.ContextPlugin): - """Increment current script version.""" - - order = pyblish.api.CollectorOrder + 0.5 - label = "Test Plugin" - hosts = ['nuke'] - - def process(self, context): - instances = context[:] - - prerender_check = list() - families_check = list() - for instance in instances: - if ("prerender" in str(instance)): - prerender_check.append(instance) - if instance.data.get("families", None): - families_check.append(True) - - if len(prerender_check) != len(families_check): - self.log.info(prerender_check) - self.log.info(families_check) diff --git a/pype/plugins/nuke/_publish_unused/validate_active_viewer.py b/pype/plugins/nuke/_publish_unused/validate_active_viewer.py deleted file mode 100644 index 618a7f1502..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_active_viewer.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyblish.api -import nuke - - -class ValidateActiveViewer(pyblish.api.ContextPlugin): - """Validate presentse of the active viewer from nodes - """ - - order = pyblish.api.ValidatorOrder - label = "Validate Active Viewer" - hosts = ["nuke"] - - def process(self, context): - viewer_process_node = context.data.get("ViewerProcess") - - assert viewer_process_node, ( - "Missing active viewer process! Please click on output write node and push key number 1-9" - ) - active_viewer = context.data["ActiveViewer"] - active_input = active_viewer.activeInput() - - assert active_input is not None, ( - "Missing active viewer input! Please click on output write node and push key number 1-9" - ) diff --git a/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py b/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py deleted file mode 100644 index 441658297d..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py +++ /dev/null @@ -1,68 +0,0 @@ -import nuke -import os -import pyblish.api -from avalon import io -# TODO: add repair function - - -@pyblish.api.log -class ValidateSettingsNuke(pyblish.api.Validator): - """ Validates settings """ - - families = ['scene'] - hosts = ['nuke'] - optional = True - label = 'Settings' - - def process(self, instance): - - asset = io.find_one({"name": os.environ['AVALON_ASSET']}) - try: - avalon_resolution = asset["data"].get("resolution", '') - avalon_pixel_aspect = asset["data"].get("pixelAspect", '') - avalon_fps = asset["data"].get("fps", '') - avalon_first = asset["data"].get("frameStart", '') - avalon_last = asset["data"].get("frameEnd", '') - avalon_crop = asset["data"].get("crop", '') - except KeyError: - print( - "No resolution information found for \"{0}\".".format( - asset["name"] - ) - ) - return - - # validating first frame - local_first = nuke.root()['first_frame'].value() - msg = 'First frame is incorrect.' - msg += '\n\nLocal first: %s' % local_first - msg += '\n\nOnline first: %s' % avalon_first - assert local_first == avalon_first, msg - - # validating last frame - local_last = nuke.root()['last_frame'].value() - msg = 'Last frame is incorrect.' - msg += '\n\nLocal last: %s' % local_last - msg += '\n\nOnline last: %s' % avalon_last - assert local_last == avalon_last, msg - - # validating fps - local_fps = nuke.root()['fps'].value() - msg = 'FPS is incorrect.' - msg += '\n\nLocal fps: %s' % local_fps - msg += '\n\nOnline fps: %s' % avalon_fps - assert local_fps == avalon_fps, msg - - # validating resolution width - local_width = nuke.root().format().width() - msg = 'Width is incorrect.' - msg += '\n\nLocal width: %s' % local_width - msg += '\n\nOnline width: %s' % avalon_resolution[0] - assert local_width == avalon_resolution[0], msg - - # validating resolution width - local_height = nuke.root().format().height() - msg = 'Height is incorrect.' - msg += '\n\nLocal height: %s' % local_height - msg += '\n\nOnline height: %s' % avalon_resolution[1] - assert local_height == avalon_resolution[1], msg diff --git a/pype/plugins/nuke/_publish_unused/validate_proxy_mode.py b/pype/plugins/nuke/_publish_unused/validate_proxy_mode.py deleted file mode 100644 index a82fb16f31..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_proxy_mode.py +++ /dev/null @@ -1,33 +0,0 @@ -import nuke - -import pyblish.api - - -class RepairNukeProxyModeAction(pyblish.api.Action): - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - - nuke.root()["proxy"].setValue(0) - - -class ValidateNukeProxyMode(pyblish.api.ContextPlugin): - """Validates against having proxy mode on.""" - - order = pyblish.api.ValidatorOrder - optional = True - label = "Proxy Mode" - actions = [RepairNukeProxyModeAction] - hosts = ["nuke", "nukeassist"] - # targets = ["default", "process"] - - def process(self, context): - - msg = ( - "Proxy mode is not supported. Please disable Proxy Mode in the " - "Project settings." - ) - assert not nuke.root()["proxy"].getValue(), msg diff --git a/pype/plugins/nuke/_publish_unused/validate_version_match.py b/pype/plugins/nuke/_publish_unused/validate_version_match.py deleted file mode 100644 index 1358d9a7b3..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_version_match.py +++ /dev/null @@ -1,36 +0,0 @@ -import os -import pyblish.api -import pype.utils - - - -@pyblish.api.log -class RepairNukeWriteNodeVersionAction(pyblish.api.Action): - label = "Repair" - on = "failed" - icon = "wrench" - - def process(self, context, plugin): - import pype.nuke.lib as nukelib - instances = pype.utils.filter_instances(context, plugin) - - for instance in instances: - node = instance[0] - render_path = nukelib.get_render_path(node) - self.log.info("render_path: {}".format(render_path)) - node['file'].setValue(render_path.replace("\\", "/")) - - -class ValidateVersionMatch(pyblish.api.InstancePlugin): - """Checks if write version matches workfile version""" - - label = "Validate Version Match" - order = pyblish.api.ValidatorOrder - actions = [RepairNukeWriteNodeVersionAction] - hosts = ["nuke"] - families = ['write'] - - def process(self, instance): - - assert instance.data['version'] == instance.context.data['version'], "\ - Version in write doesn't match version of the workfile" diff --git a/pype/plugins/nuke/_publish_unused/validate_write_families.py b/pype/plugins/nuke/_publish_unused/validate_write_families.py deleted file mode 100644 index 73f710867d..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_write_families.py +++ /dev/null @@ -1,59 +0,0 @@ - -import pyblish.api -import pype.api -import pype.nuke.actions - - -class RepairWriteFamiliesAction(pyblish.api.Action): - label = "Fix Write's render attributes" - on = "failed" - icon = "wrench" - - def process(self, instance, plugin): - self.log.info("instance {}".format(instance)) - instance["render"].setValue(True) - self.log.info("Rendering toggled ON") - - -@pyblish.api.log -class ValidateWriteFamilies(pyblish.api.InstancePlugin): - """ Validates write families. """ - - order = pyblish.api.ValidatorOrder - label = "Valitade writes families" - hosts = ["nuke"] - families = ["write"] - actions = [pype.nuke.actions.SelectInvalidAction, pype.api.RepairAction] - - @staticmethod - def get_invalid(self, instance): - if not [f for f in instance.data["families"] - if ".frames" in f]: - return - - if not instance.data.get('files'): - return (instance) - - def process(self, instance): - self.log.debug('instance.data["files"]: {}'.format(instance.data['files'])) - - invalid = self.get_invalid(self, instance) - - if invalid: - raise ValueError(str("`{}`: Switch `Render` on! " - "> {}".format(__name__, invalid))) - - # if any(".frames" in f for f in instance.data["families"]): - # if not instance.data["files"]: - # raise ValueError("instance {} is set to publish frames\ - # but no files were collected, render the frames first or\ - # check 'render' checkbox onthe no to 'ON'".format(instance))) - # - # - # self.log.info("Checked correct writes families") - - @classmethod - def repair(cls, instance): - cls.log.info("instance {}".format(instance)) - instance[0]["render"].setValue(True) - cls.log.info("Rendering toggled ON") diff --git a/pype/plugins/nuke/create/create_backdrop.py b/pype/plugins/nuke/create/create_backdrop.py index 767e92b592..8609117a0d 100644 --- a/pype/plugins/nuke/create/create_backdrop.py +++ b/pype/plugins/nuke/create/create_backdrop.py @@ -2,6 +2,7 @@ from avalon.nuke.pipeline import Creator from avalon.nuke import lib as anlib import nuke + class CreateBackdrop(Creator): """Add Publishable Backdrop""" @@ -35,8 +36,10 @@ class CreateBackdrop(Creator): return instance else: - nuke.message("Please select nodes you " - "wish to add to a container") + msg = str("Please select nodes you " + "wish to add to a container") + self.log.error(msg) + nuke.message(msg) return else: bckd_node = autoBackdrop() diff --git a/pype/plugins/nuke/create/create_gizmo.py b/pype/plugins/nuke/create/create_gizmo.py index 41229862e3..ca199b8800 100644 --- a/pype/plugins/nuke/create/create_gizmo.py +++ b/pype/plugins/nuke/create/create_gizmo.py @@ -36,8 +36,10 @@ class CreateGizmo(Creator): node["tile_color"].setValue(int(self.node_color, 16)) return anlib.imprint(node, self.data) else: - nuke.message("Please select a group node " - "you wish to publish as the gizmo") + msg = ("Please select a group node " + "you wish to publish as the gizmo") + self.log.error(msg) + nuke.message(msg) if len(nodes) >= 2: anlib.select_nodes(nodes) @@ -58,8 +60,10 @@ class CreateGizmo(Creator): return anlib.imprint(gizmo_node, self.data) else: - nuke.message("Please select nodes you " - "wish to add to the gizmo") + msg = ("Please select nodes you " + "wish to add to the gizmo") + self.log.error(msg) + nuke.message(msg) return else: with anlib.maintained_selection(): diff --git a/pype/plugins/nuke/create/create_read.py b/pype/plugins/nuke/create/create_read.py index 1aa7e68746..70db580a7e 100644 --- a/pype/plugins/nuke/create/create_read.py +++ b/pype/plugins/nuke/create/create_read.py @@ -34,7 +34,9 @@ class CrateRead(avalon.nuke.Creator): nodes = self.nodes if not nodes or len(nodes) == 0: - nuke.message('Please select Read node') + msg = "Please select Read node" + self.log.error(msg) + nuke.message(msg) else: count_reads = 0 for node in nodes: @@ -46,7 +48,9 @@ class CrateRead(avalon.nuke.Creator): count_reads += 1 if count_reads < 1: - nuke.message('Please select Read node') + msg = "Please select Read node" + self.log.error(msg) + nuke.message(msg) return def change_read_node(self, name, node, data): diff --git a/pype/plugins/nuke/create/create_write.py b/pype/plugins/nuke/create/create_write.py index a85408cab3..74e450f267 100644 --- a/pype/plugins/nuke/create/create_write.py +++ b/pype/plugins/nuke/create/create_write.py @@ -41,9 +41,11 @@ class CreateWriteRender(plugin.PypeCreator): if (self.options or {}).get("useSelection"): nodes = self.nodes - assert len(nodes) < 2, self.log.error( - "Select only one node. The node you want to connect to, " - "or tick off `Use selection`") + if not (len(nodes) < 2): + msg = ("Select only one node. The node you want to connect to, " + "or tick off `Use selection`") + log.error(msg) + nuke.message(msg) selected_node = nodes[0] inputs = [selected_node] @@ -134,7 +136,11 @@ class CreateWritePrerender(plugin.PypeCreator): if (self.options or {}).get("useSelection"): nodes = self.nodes - assert len(nodes) < 2, self.log.error("Select only one node. The node you want to connect to, or tick off `Use selection`") + if not (len(nodes) < 2): + msg = ("Select only one node. The node you want to connect to, " + "or tick off `Use selection`") + self.log.error(msg) + nuke.message(msg) selected_node = nodes[0] inputs = [selected_node] diff --git a/pype/plugins/nuke/load/load_backdrop.py b/pype/plugins/nuke/load/load_backdrop.py index 7f58d4e9ec..04cff311d1 100644 --- a/pype/plugins/nuke/load/load_backdrop.py +++ b/pype/plugins/nuke/load/load_backdrop.py @@ -240,74 +240,6 @@ class LoadBackdropNodes(api.Loader): return update_container(GN, data_imprint) - def connect_active_viewer(self, group_node): - """ - Finds Active viewer and - place the node under it, also adds - name of group into Input Process of the viewer - - Arguments: - group_node (nuke node): nuke group node object - - """ - group_node_name = group_node["name"].value() - - viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()] - if len(viewer) > 0: - viewer = viewer[0] - else: - self.log.error("Please create Viewer node before you " - "run this action again") - return None - - # get coordinates of Viewer1 - xpos = viewer["xpos"].value() - ypos = viewer["ypos"].value() - - ypos += 150 - - viewer["ypos"].setValue(ypos) - - # set coordinates to group node - group_node["xpos"].setValue(xpos) - group_node["ypos"].setValue(ypos + 50) - - # add group node name to Viewer Input Process - viewer["input_process_node"].setValue(group_node_name) - - # put backdrop under - pnlib.create_backdrop(label="Input Process", layer=2, - nodes=[viewer, group_node], color="0x7c7faaff") - - return True - - def get_item(self, data, trackIndex, subTrackIndex): - return {key: val for key, val in data.items() - if subTrackIndex == val["subTrackIndex"] - if trackIndex == val["trackIndex"]} - - def byteify(self, input): - """ - Converts unicode strings to strings - It goes trought all dictionary - - Arguments: - input (dict/str): input - - Returns: - dict: with fixed values and keys - - """ - - if isinstance(input, dict): - return {self.byteify(key): self.byteify(value) - for key, value in input.iteritems()} - elif isinstance(input, list): - return [self.byteify(element) for element in input] - elif isinstance(input, unicode): - return input.encode('utf-8') - else: - return input def switch(self, container, representation): self.update(container, representation) diff --git a/pype/plugins/nuke/load/load_gizmo_ip.py b/pype/plugins/nuke/load/load_gizmo_ip.py index 0d78c14214..5fecbc4c5c 100644 --- a/pype/plugins/nuke/load/load_gizmo_ip.py +++ b/pype/plugins/nuke/load/load_gizmo_ip.py @@ -176,8 +176,10 @@ class LoadGizmoInputProcess(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - self.log.error("Please create Viewer node before you " - "run this action again") + msg = str("Please create Viewer node before you " + "run this action again") + self.log.error(msg) + nuke.message(msg) return None # get coordinates of Viewer1 diff --git a/pype/plugins/nuke/load/load_luts_ip.py b/pype/plugins/nuke/load/load_luts_ip.py index 5f09adb05f..41cc6c1a43 100644 --- a/pype/plugins/nuke/load/load_luts_ip.py +++ b/pype/plugins/nuke/load/load_luts_ip.py @@ -276,7 +276,10 @@ class LoadLutsInputProcess(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - self.log.error("Please create Viewer node before you run this action again") + msg = str("Please create Viewer node before you " + "run this action again") + self.log.error(msg) + nuke.message(msg) return None # get coordinates of Viewer1 diff --git a/pype/plugins/nuke/load/load_matchmove.py b/pype/plugins/nuke/load/load_matchmove.py index 6a674368fb..60d5dc026f 100644 --- a/pype/plugins/nuke/load/load_matchmove.py +++ b/pype/plugins/nuke/load/load_matchmove.py @@ -1,4 +1,5 @@ from avalon import api +import nuke class MatchmoveLoader(api.Loader): @@ -19,6 +20,8 @@ class MatchmoveLoader(api.Loader): exec(open(self.fname).read()) else: - self.log.error("Unsupported script type") + msg = "Unsupported script type" + self.log.error(msg) + nuke.message(msg) return True diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py index e598839405..88e65156cb 100644 --- a/pype/plugins/nuke/load/load_mov.py +++ b/pype/plugins/nuke/load/load_mov.py @@ -1,11 +1,10 @@ +import re +import nuke import contextlib from avalon import api, io - -import nuke - -from pype.api import Logger -log = Logger().get_logger(__name__, "nuke") +from pype.nuke import presets +from pypeapp import config @contextlib.contextmanager @@ -24,7 +23,7 @@ def preserve_trim(node): offset_frame = None if node['frame_mode'].value() == "start at": start_at_frame = node['frame'].value() - if node['frame_mode'].value() is "offset": + if node['frame_mode'].value() == "offset": offset_frame = node['frame'].value() try: @@ -33,14 +32,14 @@ def preserve_trim(node): if start_at_frame: node['frame_mode'].setValue("start at") node['frame'].setValue(str(script_start)) - log.info("start frame of Read was set to" - "{}".format(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) if offset_frame: node['frame_mode'].setValue("offset") node['frame'].setValue(str((script_start + offset_frame))) - log.info("start frame of Read was set to" - "{}".format(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) def loader_shift(node, frame, relative=True): @@ -69,11 +68,37 @@ def loader_shift(node, frame, relative=True): return int(script_start) +def add_review_presets_config(): + returning = { + "families": list(), + "representations": list() + } + review_presets = config.get_presets()["plugins"]["global"]["publish"].get( + "ExtractReview", {}) + + outputs = review_presets.get("outputs", {}) + # + for output, properities in outputs.items(): + returning["representations"].append(output) + returning["families"] += properities.get("families", []) + + return returning + + class LoadMov(api.Loader): """Load mov file into Nuke""" + presets = add_review_presets_config() + families = [ + "source", + "plate", + "render", + "review"] + presets["families"] - families = ["write", "source", "plate", "render", "review"] - representations = ["wipmov", "h264", "mov", "preview", "review", "mp4"] + representations = [ + "mov", + "preview", + "review", + "mp4"] + presets["representations"] label = "Load mov" order = -10 @@ -85,47 +110,52 @@ class LoadMov(api.Loader): containerise, viewer_update_and_undo_stop ) - version = context['version'] version_data = version.get("data", {}) + repr_id = context["representation"]["_id"] - orig_first = version_data.get("frameStart", None) - orig_last = version_data.get("frameEnd", None) + orig_first = version_data.get("frameStart") + orig_last = version_data.get("frameEnd") diff = orig_first - 1 - # set first to 1 + first = orig_first - diff last = orig_last - diff - handles = version_data.get("handles", None) - handle_start = version_data.get("handleStart", None) - handle_end = version_data.get("handleEnd", None) + + handle_start = version_data.get("handleStart", 0) + handle_end = version_data.get("handleEnd", 0) + + colorspace = version_data.get("colorspace") repr_cont = context["representation"]["context"] - # fix handle start and end if none are available - if not handle_start and not handle_end: - handle_start = handles - handle_end = handles + self.log.debug( + "Representation id `{}` ".format(repr_id)) + context["representation"]["_id"] # create handles offset (only to last, because of mov) last += handle_start + handle_end # offset should be with handles so it match orig frame range - offset_frame = orig_first + handle_start + offset_frame = orig_first - handle_start # Fallback to asset name when namespace is None if namespace is None: namespace = context['asset']['name'] - file = self.fname.replace("\\", "/") - log.info("file: {}\n".format(self.fname)) + file = self.fname + + if not file: + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") read_name = "Read_{0}_{1}_{2}".format( repr_cont["asset"], repr_cont["subset"], repr_cont["representation"]) - # Create the Loader with the filename path set with viewer_update_and_undo_stop(): - # TODO: it might be universal read to img/geo/camera read_node = nuke.createNode( "Read", "name {}".format(read_name) @@ -139,7 +169,23 @@ class LoadMov(api.Loader): read_node["last"].setValue(last) read_node["frame_mode"].setValue("start at") read_node["frame"].setValue(str(offset_frame)) - # add additional metadata from the version to imprint to Avalon knob + + if colorspace: + read_node["colorspace"].setValue(str(colorspace)) + + # load nuke presets for Read's colorspace + read_clrs_presets = presets.get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + if preset_clrsp is not None: + read_node["colorspace"].setValue(str(preset_clrsp)) + + # add additional metadata from the version to imprint Avalon knob add_keys = [ "frameStart", "frameEnd", "handles", "source", "author", "fps", "version", "handleStart", "handleEnd" @@ -147,7 +193,7 @@ class LoadMov(api.Loader): data_imprint = {} for key in add_keys: - if key is 'version': + if key == 'version': data_imprint.update({ key: context["version"]['name'] }) @@ -186,10 +232,18 @@ class LoadMov(api.Loader): ) node = nuke.toNode(container['objectName']) - # TODO: prepare also for other Read img/geo/camera + assert node.Class() == "Read", "Must be Read" - file = api.get_representation_path(representation) + file = self.fname + + if not file: + repr_id = representation["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") # Get start frame from version data version = io.find_one({ @@ -207,20 +261,23 @@ class LoadMov(api.Loader): version_data = version.get("data", {}) - orig_first = version_data.get("frameStart", None) - orig_last = version_data.get("frameEnd", None) + orig_first = version_data.get("frameStart") + orig_last = version_data.get("frameEnd") diff = orig_first - 1 + # set first to 1 first = orig_first - diff last = orig_last - diff handles = version_data.get("handles", 0) handle_start = version_data.get("handleStart", 0) handle_end = version_data.get("handleEnd", 0) + colorspace = version_data.get("colorspace") if first is None: - log.warning("Missing start frame for updated version" - "assuming starts at frame 0 for: " - "{} ({})".format(node['name'].value(), representation)) + self.log.warning("Missing start frame for updated version" + "assuming starts at frame 0 for: " + "{} ({})".format( + node['name'].value(), representation)) first = 0 # fix handle start and end if none are available @@ -231,12 +288,12 @@ class LoadMov(api.Loader): # create handles offset (only to last, because of mov) last += handle_start + handle_end # offset should be with handles so it match orig frame range - offset_frame = orig_first + handle_start + offset_frame = orig_first - handle_start # Update the loader's path whilst preserving some values with preserve_trim(node): - node["file"].setValue(file["path"]) - log.info("__ node['file']: {}".format(node["file"].value())) + node["file"].setValue(file) + self.log.info("__ node['file']: {}".format(node["file"].value())) # Set the global in to the start frame of the sequence loader_shift(node, first, relative=True) @@ -247,19 +304,34 @@ class LoadMov(api.Loader): node["frame_mode"].setValue("start at") node["frame"].setValue(str(offset_frame)) + if colorspace: + node["colorspace"].setValue(str(colorspace)) + + # load nuke presets for Read's colorspace + read_clrs_presets = presets.get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + if preset_clrsp is not None: + node["colorspace"].setValue(str(preset_clrsp)) + updated_dict = {} updated_dict.update({ "representation": str(representation["_id"]), - "frameStart": version_data.get("frameStart"), - "frameEnd": version_data.get("frameEnd"), - "version": version.get("name"), + "frameStart": str(first), + "frameEnd": str(last), + "version": str(version.get("name")), + "colorspace": version_data.get("colorspace"), "source": version_data.get("source"), - "handles": version_data.get("handles"), - "handleStart": version_data.get("handleStart"), - "handleEnd": version_data.get("handleEnd"), - "fps": version_data.get("fps"), + "handleStart": str(handle_start), + "handleEnd": str(handle_end), + "fps": str(version_data.get("fps")), "author": version_data.get("author"), - "outputDir": version_data.get("outputDir"), + "outputDir": version_data.get("outputDir") }) # change color of node @@ -272,7 +344,7 @@ class LoadMov(api.Loader): update_container( node, updated_dict ) - log.info("udated to version: {}".format(version.get("name"))) + self.log.info("udated to version: {}".format(version.get("name"))) def remove(self, container): diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 8f01d4511b..690f074c3f 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -1,11 +1,9 @@ +import re +import nuke import contextlib from avalon import api, io - -import nuke - -from pype.api import Logger -log = Logger().get_logger(__name__, "nuke") +from pype.nuke import presets @contextlib.contextmanager @@ -24,7 +22,7 @@ def preserve_trim(node): offset_frame = None if node['frame_mode'].value() == "start at": start_at_frame = node['frame'].value() - if node['frame_mode'].value() is "offset": + if node['frame_mode'].value() == "offset": offset_frame = node['frame'].value() try: @@ -33,14 +31,14 @@ def preserve_trim(node): if start_at_frame: node['frame_mode'].setValue("start at") node['frame'].setValue(str(script_start)) - log.info("start frame of Read was set to" - "{}".format(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) if offset_frame: node['frame_mode'].setValue("offset") node['frame'].setValue(str((script_start + offset_frame))) - log.info("start frame of Read was set to" - "{}".format(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) def loader_shift(node, frame, relative=True): @@ -72,8 +70,8 @@ def loader_shift(node, frame, relative=True): class LoadSequence(api.Loader): """Load image sequence into Nuke""" - families = ["write", "source", "plate", "render"] - representations = ["exr", "dpx", "jpg", "jpeg"] + families = ["render2d", "source", "plate", "render"] + representations = ["exr", "dpx", "jpg", "jpeg", "png"] label = "Load sequence" order = -10 @@ -88,12 +86,14 @@ class LoadSequence(api.Loader): version = context['version'] version_data = version.get("data", {}) - - log.info("version_data: {}\n".format(version_data)) + repr_id = context["representation"]["_id"] + + self.log.info("version_data: {}\n".format(version_data)) + self.log.debug( + "Representation id `{}` ".format(repr_id)) self.first_frame = int(nuke.root()["first_frame"].getValue()) self.handle_start = version_data.get("handleStart", 0) - self.handle_start = version_data.get("handleStart", 0) self.handle_end = version_data.get("handleEnd", 0) first = version_data.get("frameStart", None) @@ -106,21 +106,27 @@ class LoadSequence(api.Loader): first -= self.handle_start last += self.handle_end - file = self.fname.replace("\\", "/") + file = self.fname - log.info("file: {}\n".format(self.fname)) + if not file: + repr_id = context["representation"]["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") repr_cont = context["representation"]["context"] - read_name = "Read_{0}_{1}_{2}".format( - repr_cont["asset"], - repr_cont["subset"], - repr_cont["representation"]) - if "#" not in file: frame = repr_cont.get("frame") padding = len(frame) file = file.replace(frame, "#"*padding) + read_name = "Read_{0}_{1}_{2}".format( + repr_cont["asset"], + repr_cont["subset"], + repr_cont["representation"]) + # Create the Loader with the filename path set with viewer_update_and_undo_stop(): # TODO: it might be universal read to img/geo/camera @@ -130,24 +136,36 @@ class LoadSequence(api.Loader): r["file"].setValue(file) # Set colorspace defined in version data - colorspace = context["version"]["data"].get("colorspace", None) - if colorspace is not None: + colorspace = context["version"]["data"].get("colorspace") + if colorspace: r["colorspace"].setValue(str(colorspace)) + # load nuke presets for Read's colorspace + read_clrs_presets = presets.get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + if preset_clrsp is not None: + r["colorspace"].setValue(str(preset_clrsp)) + loader_shift(r, first, relative=True) r["origfirst"].setValue(int(first)) r["first"].setValue(int(first)) r["origlast"].setValue(int(last)) r["last"].setValue(int(last)) - # add additional metadata from the version to imprint to Avalon knob + # add additional metadata from the version to imprint Avalon knob add_keys = ["frameStart", "frameEnd", "source", "colorspace", "author", "fps", "version", "handleStart", "handleEnd"] data_imprint = {} for k in add_keys: - if k is 'version': + if k == 'version': data_imprint.update({k: context["version"]['name']}) else: data_imprint.update( @@ -179,7 +197,7 @@ class LoadSequence(api.Loader): rtn["after"].setValue("continue") rtn["input.first_lock"].setValue(True) rtn["input.first"].setValue( - self.handle_start + self.first_frame + self.handle_start + self.first_frame ) if time_warp_nodes != []: @@ -210,16 +228,29 @@ class LoadSequence(api.Loader): """ from avalon.nuke import ( - ls_img_sequence, update_container ) node = nuke.toNode(container['objectName']) - # TODO: prepare also for other Read img/geo/camera + assert node.Class() == "Read", "Must be Read" - path = api.get_representation_path(representation) - file = ls_img_sequence(path) + repr_cont = representation["context"] + + file = self.fname + + if not file: + repr_id = representation["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") + + if "#" not in file: + frame = repr_cont.get("frame") + padding = len(frame) + file = file.replace(frame, "#"*padding) # Get start frame from version data version = io.find_one({ @@ -241,13 +272,14 @@ class LoadSequence(api.Loader): self.handle_start = version_data.get("handleStart", 0) self.handle_end = version_data.get("handleEnd", 0) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) + first = version_data.get("frameStart") + last = version_data.get("frameEnd") if first is None: - log.warning("Missing start frame for updated version" - "assuming starts at frame 0 for: " - "{} ({})".format(node['name'].value(), representation)) + self.log.warning("Missing start frame for updated version" + "assuming starts at frame 0 for: " + "{} ({})".format( + node['name'].value(), representation)) first = 0 first -= self.handle_start @@ -255,8 +287,8 @@ class LoadSequence(api.Loader): # Update the loader's path whilst preserving some values with preserve_trim(node): - node["file"].setValue(file["path"]) - log.info("__ node['file']: {}".format(node["file"].value())) + node["file"].setValue(file) + self.log.info("__ node['file']: {}".format(node["file"].value())) # Set the global in to the start frame of the sequence loader_shift(node, first, relative=True) @@ -268,14 +300,14 @@ class LoadSequence(api.Loader): updated_dict = {} updated_dict.update({ "representation": str(representation["_id"]), - "frameStart": version_data.get("frameStart"), - "frameEnd": version_data.get("frameEnd"), - "version": version.get("name"), + "frameStart": str(first), + "frameEnd": str(last), + "version": str(version.get("name")), "colorspace": version_data.get("colorspace"), "source": version_data.get("source"), - "handleStart": version_data.get("handleStart"), - "handleEnd": version_data.get("handleEnd"), - "fps": version_data.get("fps"), + "handleStart": str(self.handle_start), + "handleEnd": str(self.handle_end), + "fps": str(version_data.get("fps")), "author": version_data.get("author"), "outputDir": version_data.get("outputDir"), }) @@ -296,7 +328,7 @@ class LoadSequence(api.Loader): node, updated_dict ) - log.info("udated to version: {}".format(version.get("name"))) + self.log.info("udated to version: {}".format(version.get("name"))) def remove(self, container): diff --git a/pype/plugins/nuke/publish/collect_asset_info.py b/pype/plugins/nuke/publish/collect_asset_info.py deleted file mode 100644 index 8a8791ec36..0000000000 --- a/pype/plugins/nuke/publish/collect_asset_info.py +++ /dev/null @@ -1,25 +0,0 @@ -from avalon import api, io -import pyblish.api - - -class CollectAssetInfo(pyblish.api.ContextPlugin): - """Collect framerate.""" - - order = pyblish.api.CollectorOrder - label = "Collect Asset Info" - hosts = [ - "nuke", - "nukeassist" - ] - - def process(self, context): - asset_data = io.find_one({ - "type": "asset", - "name": api.Session["AVALON_ASSET"] - }) - self.log.info("asset_data: {}".format(asset_data)) - - context.data['handles'] = int(asset_data["data"].get("handles", 0)) - context.data["handleStart"] = int(asset_data["data"].get( - "handleStart", 0)) - context.data["handleEnd"] = int(asset_data["data"].get("handleEnd", 0)) diff --git a/pype/plugins/nuke/publish/collect_backdrop.py b/pype/plugins/nuke/publish/collect_backdrop.py index d98a20aee0..10729b217b 100644 --- a/pype/plugins/nuke/publish/collect_backdrop.py +++ b/pype/plugins/nuke/publish/collect_backdrop.py @@ -58,7 +58,11 @@ class CollectBackdrops(pyblish.api.InstancePlugin): last_frame = int(nuke.root()["last_frame"].getValue()) # get version - version = pype.get_version_from_path(nuke.root().name()) + version = instance.context.data.get('version') + + if not version: + raise RuntimeError("Script name has no version in the name.") + instance.data['version'] = version # Add version data to instance diff --git a/pype/plugins/nuke/publish/collect_instances.py b/pype/plugins/nuke/publish/collect_instances.py index 5b123ed7b9..cbbef70e4a 100644 --- a/pype/plugins/nuke/publish/collect_instances.py +++ b/pype/plugins/nuke/publish/collect_instances.py @@ -28,12 +28,15 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): self.log.debug("nuke.allNodes(): {}".format(nuke.allNodes())) for node in nuke.allNodes(): + if node.Class() in ["Viewer", "Dot"]: + continue + try: if node["disable"].value(): continue except Exception as E: self.log.warning(E) - + # get data from avalon knob self.log.debug("node[name]: {}".format(node['name'].value())) diff --git a/pype/plugins/nuke/publish/collect_review.py b/pype/plugins/nuke/publish/collect_review.py index 7e7cbedd6c..c95c94541d 100644 --- a/pype/plugins/nuke/publish/collect_review.py +++ b/pype/plugins/nuke/publish/collect_review.py @@ -1,12 +1,12 @@ import pyblish.api import nuke + class CollectReview(pyblish.api.InstancePlugin): """Collect review instance from rendered frames """ order = pyblish.api.CollectorOrder + 0.3 - family = "review" label = "Collect Review" hosts = ["nuke"] families = ["render", "render.local", "render.farm"] @@ -25,4 +25,6 @@ class CollectReview(pyblish.api.InstancePlugin): instance.data["families"].append("review") instance.data['families'].append('ftrack') + self.log.info("Review collected: `{}`".format(instance)) + self.log.debug("__ instance.data: `{}`".format(instance.data)) diff --git a/pype/plugins/nuke/publish/collect_script_version.py b/pype/plugins/nuke/publish/collect_script_version.py deleted file mode 100644 index 9a6b5bf572..0000000000 --- a/pype/plugins/nuke/publish/collect_script_version.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import pype.api as pype -import pyblish.api - - -class CollectScriptVersion(pyblish. api.ContextPlugin): - """Collect Script Version.""" - - order = pyblish.api.CollectorOrder - label = "Collect Script Version" - hosts = [ - "nuke", - "nukeassist" - ] - - def process(self, context): - file_path = context.data["currentFile"] - base_name = os.path.basename(file_path) - # get version string - version = pype.get_version_from_path(base_name) - - context.data['version'] = version diff --git a/pype/plugins/nuke/publish/collect_workfile.py b/pype/plugins/nuke/publish/collect_workfile.py index 4fff9f46ed..b95edf0a93 100644 --- a/pype/plugins/nuke/publish/collect_workfile.py +++ b/pype/plugins/nuke/publish/collect_workfile.py @@ -23,11 +23,12 @@ class CollectWorkfile(pyblish.api.ContextPlugin): add_publish_knob(root) family = "workfile" + task = os.getenv("AVALON_TASK", None) # creating instances per write node file_path = context.data["currentFile"] staging_dir = os.path.dirname(file_path) base_name = os.path.basename(file_path) - subset = "{0}_{1}".format(os.getenv("AVALON_TASK", None), family) + subset = family + task.capitalize() # Get frame range first_frame = int(root["first_frame"].getValue()) @@ -72,8 +73,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): "publish": root.knob('publish').value(), "family": family, "families": [family], - "representations": list(), - "subsetGroup": "workfiles" + "representations": list() }) # adding basic script data diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index 37c86978b6..0dc7c81fae 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -14,6 +14,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): families = ["write"] def process(self, instance): + # adding 2d focused rendering + instance.data["families"].append("render2d") node = None for x in instance: @@ -34,14 +36,15 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): output_type = "mov" # Get frame range - handles = instance.context.data['handles'] handle_start = instance.context.data["handleStart"] handle_end = instance.context.data["handleEnd"] first_frame = int(nuke.root()["first_frame"].getValue()) last_frame = int(nuke.root()["last_frame"].getValue()) + frame_length = int( + last_frame - first_frame + 1 + ) if node["use_limit"].getValue(): - handles = 0 first_frame = int(node["first"].getValue()) last_frame = int(node["last"].getValue()) @@ -51,8 +54,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): self.log.debug('output dir: {}'.format(output_dir)) # get version to instance for integration - instance.data['version'] = instance.context.data.get( - "version", pype.get_version_from_path(nuke.root().name())) + instance.data['version'] = instance.context.data["version"] self.log.debug('Write Version: %s' % instance.data('version')) @@ -80,8 +82,26 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): collected_frames = [f for f in os.listdir(output_dir) if ext in f] if collected_frames: - representation['frameStart'] = "%0{}d".format( + collected_frames_len = len(collected_frames) + frame_start_str = "%0{}d".format( len(str(last_frame))) % first_frame + representation['frameStart'] = frame_start_str + + # in case slate is expected and not yet rendered + self.log.debug("_ frame_length: {}".format(frame_length)) + self.log.debug( + "_ collected_frames_len: {}".format( + collected_frames_len)) + # this will only run if slate frame is not already + # rendered from previews publishes + if "slate" in instance.data["families"] \ + and (frame_length == collected_frames_len): + frame_slate_str = "%0{}d".format( + len(str(last_frame))) % (first_frame - 1) + slate_frame = collected_frames[0].replace( + frame_start_str, frame_slate_str) + collected_frames.insert(0, slate_frame) + representation['files'] = collected_frames instance.data["representations"].append(representation) except Exception: @@ -90,16 +110,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): # Add version data to instance version_data = { - "handles": handle_start, - "handleStart": handle_start, - "handleEnd": handle_end, - "frameStart": first_frame + handle_start, - "frameEnd": last_frame - handle_end, - "version": int(instance.data['version']), "colorspace": node["colorspace"].value(), - "families": [instance.data["family"]], - "subset": instance.data["subset"], - "fps": instance.context.data["fps"] } instance.data["family"] = "write" @@ -119,16 +130,18 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "outputDir": output_dir, "ext": ext, "label": label, - "handles": handles, - "frameStart": first_frame, - "frameEnd": last_frame, + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": first_frame + handle_start, + "frameEnd": last_frame - handle_end, + "frameStartHandle": first_frame, + "frameEndHandle": last_frame, "outputType": output_type, "family": "write", "families": families, "colorspace": node["colorspace"].value(), "deadlineChunkSize": deadlineChunkSize, - "deadlinePriority": deadlinePriority, - "subsetGroup": "renders" + "deadlinePriority": deadlinePriority }) self.log.debug("instance.data: {}".format(instance.data)) diff --git a/pype/plugins/nuke/publish/extract_render_local.py b/pype/plugins/nuke/publish/extract_render_local.py index 9b8baa468b..5467d239c2 100644 --- a/pype/plugins/nuke/publish/extract_render_local.py +++ b/pype/plugins/nuke/publish/extract_render_local.py @@ -27,13 +27,13 @@ class NukeRenderLocal(pype.api.Extractor): self.log.debug("instance collected: {}".format(instance.data)) - first_frame = instance.data.get("frameStart", None) + first_frame = instance.data.get("frameStartHandle", None) # exception for slate workflow if "slate" in instance.data["families"]: first_frame -= 1 - last_frame = instance.data.get("frameEnd", None) + last_frame = instance.data.get("frameEndHandle", None) node_subset_name = instance.data.get("name", None) self.log.info("Starting render") diff --git a/pype/plugins/nuke/publish/extract_review_data_lut.py b/pype/plugins/nuke/publish/extract_review_data_lut.py index 4373309363..90b1fda1ec 100644 --- a/pype/plugins/nuke/publish/extract_review_data_lut.py +++ b/pype/plugins/nuke/publish/extract_review_data_lut.py @@ -41,7 +41,7 @@ class ExtractReviewDataLut(pype.api.Extractor): with anlib.maintained_selection(): exporter = pnlib.ExporterReviewLut( self, instance - ) + ) data = exporter.generate_lut() # assign to representations diff --git a/pype/plugins/nuke/publish/extract_review_data_mov.py b/pype/plugins/nuke/publish/extract_review_data_mov.py index 39c338b62c..8b204680a7 100644 --- a/pype/plugins/nuke/publish/extract_review_data_mov.py +++ b/pype/plugins/nuke/publish/extract_review_data_mov.py @@ -3,7 +3,6 @@ import pyblish.api from avalon.nuke import lib as anlib from pype.nuke import lib as pnlib import pype -reload(pnlib) class ExtractReviewDataMov(pype.api.Extractor): diff --git a/pype/plugins/nuke/publish/extract_slate_frame.py b/pype/plugins/nuke/publish/extract_slate_frame.py index 7e43b3cd6f..369cbe0496 100644 --- a/pype/plugins/nuke/publish/extract_slate_frame.py +++ b/pype/plugins/nuke/publish/extract_slate_frame.py @@ -12,7 +12,7 @@ class ExtractSlateFrame(pype.api.Extractor): """ - order = pyblish.api.ExtractorOrder + 0.01 + order = pyblish.api.ExtractorOrder - 0.001 label = "Extract Slate Frame" families = ["slate"] @@ -33,6 +33,7 @@ class ExtractSlateFrame(pype.api.Extractor): self.render_slate(instance) def render_slate(self, instance): + node_subset_name = instance.data.get("name", None) node = instance[0] # group node self.log.info("Creating staging dir...") @@ -47,6 +48,10 @@ class ExtractSlateFrame(pype.api.Extractor): self.log.info( "StagingDir `{0}`...".format(instance.data["stagingDir"])) + frame_length = int( + instance.data["frameEnd"] - instance.data["frameStart"] + 1 + ) + temporary_nodes = [] collection = instance.data.get("collection", None) @@ -56,17 +61,23 @@ class ExtractSlateFrame(pype.api.Extractor): "{head}{padding}{tail}")) fhead = collection.format("{head}") + collected_frames_len = int(len(collection.indexes)) + # get first and last frame first_frame = min(collection.indexes) - 1 - - if "slate" in instance.data["families"]: + self.log.info('frame_length: {}'.format(frame_length)) + self.log.info( + 'len(collection.indexes): {}'.format(collected_frames_len) + ) + if ("slate" in instance.data["families"]) \ + and (frame_length != collected_frames_len): first_frame += 1 last_frame = first_frame else: fname = os.path.basename(instance.data.get("path", None)) fhead = os.path.splitext(fname)[0] + "." - first_frame = instance.data.get("frameStart", None) - 1 + first_frame = instance.data.get("frameStartHandle", None) - 1 last_frame = first_frame if "#" in fhead: @@ -103,6 +114,8 @@ class ExtractSlateFrame(pype.api.Extractor): # Render frames nuke.execute(write_node.name(), int(first_frame), int(last_frame)) + # also render slate as sequence frame + nuke.execute(node_subset_name, int(first_frame), int(last_frame)) self.log.debug( "slate frame path: {}".format(instance.data["slateFrame"])) @@ -144,7 +157,7 @@ class ExtractSlateFrame(pype.api.Extractor): return comment = instance.context.data.get("comment") - intent = instance.context.data.get("intent") + intent = instance.context.data.get("intent", {}).get("value", "") try: node["f_submission_note"].setValue(comment) diff --git a/pype/plugins/nuke/publish/extract_thumbnail.py b/pype/plugins/nuke/publish/extract_thumbnail.py index 55ba34a0d4..88ea78e623 100644 --- a/pype/plugins/nuke/publish/extract_thumbnail.py +++ b/pype/plugins/nuke/publish/extract_thumbnail.py @@ -116,7 +116,7 @@ class ExtractThumbnail(pype.api.Extractor): write_node["raw"].setValue(1) write_node.setInput(0, previous_node) temporary_nodes.append(write_node) - tags = ["thumbnail"] + tags = ["thumbnail", "publish_on_farm"] # retime for first_frame = int(last_frame) / 2 diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index 71108189c0..0a9ef33398 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -5,7 +5,6 @@ import getpass from avalon import api from avalon.vendor import requests import re - import pyblish.api @@ -23,6 +22,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): families = ["render.farm"] optional = True + deadline_priority = 50 + deadline_pool = "" + deadline_pool_secondary = "" + deadline_chunk_size = 1 + def process(self, instance): node = instance[0] @@ -37,8 +41,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self._ver = re.search(r"\d+\.\d+", context.data.get("hostVersion")) self._deadline_user = context.data.get( "deadlineUser", getpass.getuser()) - self._frame_start = int(instance.data["frameStart"]) - self._frame_end = int(instance.data["frameEnd"]) + self._frame_start = int(instance.data["frameStartHandle"]) + self._frame_end = int(instance.data["frameEndHandle"]) # get output path render_path = instance.data['path'] @@ -55,7 +59,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): ) # Store output dir for unified publisher (filesequence) instance.data["deadlineSubmissionJob"] = response.json() - instance.data["publishJobState"] = "Active" + instance.data["outputDir"] = os.path.dirname( + render_path).replace("\\", "/") + instance.data["publishJobState"] = "Suspended" if instance.data.get("bakeScriptPath"): render_path = instance.data.get("bakeRenderPath") @@ -87,6 +93,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): script_name = os.path.basename(script_path) jobname = "%s - %s" % (script_name, instance.name) + output_filename_0 = self.preview_fname(render_path) + if not responce_data: responce_data = {} @@ -96,6 +104,15 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): except OSError: pass + # define chunk and priority + chunk_size = instance.data.get("deadlineChunkSize") + if chunk_size == 0: + chunk_size = self.deadline_chunk_size + + priority = instance.data.get("deadlinePriority") + if priority != 50: + priority = self.deadline_priority + payload = { "JobInfo": { # Top-level group name @@ -107,10 +124,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # Arbitrary username, for visualisation in Monitor "UserName": self._deadline_user, - "Priority": instance.data["deadlinePriority"], + "Priority": priority, + "ChunkSize": chunk_size, - "Pool": "2d", - "SecondaryPool": "2d", + "Pool": self.deadline_pool, + "SecondaryPool": self.deadline_pool_secondary, "Plugin": "Nuke", "Frames": "{start}-{end}".format( @@ -119,6 +137,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): ), "Comment": self._comment, + # Optional, enable double-click to preview rendered + # frames from Deadline Monitor + "OutputFilename0": output_filename_0.replace("\\", "/") + }, "PluginInfo": { # Input @@ -220,6 +242,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self.log.info("Submitting..") self.log.info(json.dumps(payload, indent=4, sort_keys=True)) + # adding expectied files to instance.data + self.expected_files(instance, render_path) + self.log.debug("__ expectedFiles: `{}`".format( + instance.data["expectedFiles"])) response = requests.post(self.deadline_url, json=payload) if not response.ok: @@ -240,3 +266,51 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "%f=%d was rounded off to nearest integer" % (value, int(value)) ) + + def preview_fname(self, path): + """Return output file path with #### for padding. + + Deadline requires the path to be formatted with # in place of numbers. + For example `/path/to/render.####.png` + + Args: + path (str): path to rendered images + + Returns: + str + + """ + self.log.debug("_ path: `{}`".format(path)) + if "%" in path: + search_results = re.search(r"(%0)(\d)(d.)", path).groups() + self.log.debug("_ search_results: `{}`".format(search_results)) + return int(search_results[1]) + if "#" in path: + self.log.debug("_ path: `{}`".format(path)) + return path + else: + return path + + def expected_files(self, + instance, + path): + """ Create expected files in instance data + """ + if not instance.data.get("expectedFiles"): + instance.data["expectedFiles"] = list() + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + pparts = file.split("#") + padding = "%0{}d".format(len(pparts) - 1) + file = pparts[0] + padding + pparts[-1] + + if "%" not in file: + instance.data["expectedFiles"].append(path) + return + + for i in range(self._frame_start, (self._frame_end + 1)): + instance.data["expectedFiles"].append( + os.path.join(dir, (file % i)).replace("\\", "/")) diff --git a/pype/plugins/nuke/publish/validate_output_resolution.py b/pype/plugins/nuke/publish/validate_output_resolution.py new file mode 100644 index 0000000000..2563ee929f --- /dev/null +++ b/pype/plugins/nuke/publish/validate_output_resolution.py @@ -0,0 +1,78 @@ +import nuke + +import pyblish.api + + +class RepairWriteResolutionDifference(pyblish.api.Action): + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + + # Get the errored instances + failed = [] + for result in context.data["results"]: + if (result["error"] is not None and result["instance"] is not None + and result["instance"] not in failed): + failed.append(result["instance"]) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(failed, plugin) + + for instance in instances: + reformat = instance[0].dependencies()[0] + if reformat.Class() != "Reformat": + reformat = nuke.nodes.Reformat(inputs=[instance[0].input(0)]) + + xpos = instance[0].xpos() + ypos = instance[0].ypos() - 26 + + dependent_ypos = instance[0].dependencies()[0].ypos() + if (instance[0].ypos() - dependent_ypos) <= 51: + xpos += 110 + + reformat.setXYpos(xpos, ypos) + + instance[0].setInput(0, reformat) + + reformat["resize"].setValue("none") + + +class ValidateOutputResolution(pyblish.api.InstancePlugin): + """Validates Output Resolution. + + It is making sure the resolution of write's input is the same as + Format definition of script in Root node. + """ + + order = pyblish.api.ValidatorOrder + optional = True + families = ["render", "render.local", "render.farm"] + label = "Write Resolution" + hosts = ["nuke"] + actions = [RepairWriteResolutionDifference] + + def process(self, instance): + + # Skip bounding box check if a crop node exists. + if instance[0].dependencies()[0].Class() == "Crop": + return + + msg = "Bounding box is outside the format." + assert self.check_resolution(instance), msg + + def check_resolution(self, instance): + node = instance[0] + + root_width = instance.data["resolutionWidth"] + root_height = instance.data["resolutionHeight"] + + write_width = node.format().width() + write_height = node.format().height() + + if (root_width != write_width) or (root_height != write_height): + return None + else: + return True diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py index c63c289947..6e9b91dd72 100644 --- a/pype/plugins/nuke/publish/validate_rendered_frames.py +++ b/pype/plugins/nuke/publish/validate_rendered_frames.py @@ -41,7 +41,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): if not repre.get('files'): msg = ("no frames were collected, " "you need to render them") - self.log.warning(msg) + self.log.error(msg) raise ValidationException(msg) collections, remainder = clique.assemble(repre["files"]) @@ -51,7 +51,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): collection = collections[0] frame_length = int( - instance.data["frameEnd"] - instance.data["frameStart"] + 1 + instance.data["frameEndHandle"] - instance.data["frameStartHandle"] + 1 ) if frame_length != 1: @@ -75,8 +75,9 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): self.log.info( 'len(collection.indexes): {}'.format(collected_frames_len) ) - - if "slate" in instance.data["families"]: + + if ("slate" in instance.data["families"]) \ + and (frame_length != collected_frames_len): collected_frames_len -= 1 assert (collected_frames_len == frame_length), ( diff --git a/pype/plugins/nuke/publish/validate_script.py b/pype/plugins/nuke/publish/validate_script.py index 307e3ade59..f7dd84d714 100644 --- a/pype/plugins/nuke/publish/validate_script.py +++ b/pype/plugins/nuke/publish/validate_script.py @@ -15,12 +15,6 @@ class ValidateScript(pyblish.api.InstancePlugin): def process(self, instance): ctx_data = instance.context.data asset_name = ctx_data["asset"] - - # asset = io.find_one({ - # "type": "asset", - # "name": asset_name - # }) - asset = lib.get_asset(asset_name) asset_data = asset["data"] diff --git a/pype/plugins/nuke/publish/validate_write_bounding_box.py b/pype/plugins/nuke/publish/validate_write_bounding_box.py index 417d4ab004..e4b7c77a25 100644 --- a/pype/plugins/nuke/publish/validate_write_bounding_box.py +++ b/pype/plugins/nuke/publish/validate_write_bounding_box.py @@ -57,7 +57,7 @@ class ValidateNukeWriteBoundingBox(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder optional = True - families = ["render"] + families = ["render", "render.local", "render.farm"] label = "Write Bounding Box" hosts = ["nuke"] actions = [RepairNukeBoundingBoxAction] diff --git a/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py new file mode 100644 index 0000000000..2ee2409b86 --- /dev/null +++ b/pype/plugins/nukestudio/load/load_sequences_to_timeline_asset_origin.py @@ -0,0 +1,49 @@ +from avalon import api +import hiero +from pype.nukestudio import lib +reload(lib) + + +class LoadSequencesToTimelineAssetOrigin(api.Loader): + """Load image sequence into Hiero timeline + + Place clip to timeline on its asset origin timings collected + during conforming to project + """ + + families = ["render2d", "source", "plate", "render"] + representations = ["exr", "dpx", "jpg", "jpeg", "png"] + + label = "Load to timeline with shot origin timing" + order = -10 + icon = "code-fork" + color = "orange" + + def load(self, context, name, namespace, data): + + data.update({ + # "projectBinPath": "Loaded", + "hieroWorkfileName": hiero.ui.activeProject().name() + }) + + self.log.debug("_ context: `{}`".format(context)) + self.log.debug("_ representation._id: `{}`".format( + context["representation"]["_id"])) + + clip_loader = lib.ClipLoader(self, context, **data) + clip_loader.load() + + self.log.info("Loader done: `{}`".format(name)) + + def switch(self, container, representation): + self.update(container, representation) + + def update(self, container, representation): + """ Updating previously loaded clips + """ + pass + + def remove(self, container): + """ Removing previously loaded clips + """ + pass diff --git a/pype/plugins/nukestudio/publish/collect_audio.py b/pype/plugins/nukestudio/publish/collect_audio.py index 61419b1ad9..727d7da795 100644 --- a/pype/plugins/nukestudio/publish/collect_audio.py +++ b/pype/plugins/nukestudio/publish/collect_audio.py @@ -1,5 +1,5 @@ from pyblish import api - +import os class CollectAudio(api.InstancePlugin): """Collect audio from tags. @@ -12,7 +12,7 @@ class CollectAudio(api.InstancePlugin): """ # Run just before CollectSubsets - order = api.CollectorOrder + 0.1025 + order = api.CollectorOrder + 0.1021 label = "Collect Audio" hosts = ["nukestudio"] families = ["clip"] @@ -21,8 +21,10 @@ class CollectAudio(api.InstancePlugin): # Exclude non-tagged instances. tagged = False for tag in instance.data["tags"]: - family = dict(tag["metadata"]).get("tag.family", "") + tag_data = dict(tag["metadata"]) + family = tag_data.get("tag.family", "") if family.lower() == "audio": + subset = tag_data.get("tag.subset", "Main") tagged = True if not tagged: @@ -40,14 +42,14 @@ class CollectAudio(api.InstancePlugin): data["family"] = "audio" data["families"] = ["ftrack"] - subset = "" - for tag in instance.data["tags"]: - tag_data = dict(tag["metadata"]) - if "tag.subset" in tag_data: - subset = tag_data["tag.subset"] data["subset"] = "audio" + subset.title() data["source"] = data["sourcePath"] + data["label"] = "{} - {} - ({})".format( + data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[ + 1] + ) + self.log.debug("Creating instance with data: {}".format(data)) instance.context.create_instance(**data) diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 0729f20957..6a1dad9a6d 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -1,7 +1,7 @@ import os from pyblish import api - +import hiero import nuke class CollectClips(api.ContextPlugin): @@ -17,8 +17,7 @@ class CollectClips(api.ContextPlugin): self.log.debug("Created `assetsShared` in context") context.data["assetsShared"] = dict() - projectdata = context.data["projectData"] - version = context.data.get("version", "001") + projectdata = context.data["projectEntity"]["data"] sequence = context.data.get("activeSequence") selection = context.data.get("selection") @@ -48,7 +47,9 @@ class CollectClips(api.ContextPlugin): track = item.parent() source = item.source().mediaSource() source_path = source.firstpath() - effects = [f for f in item.linkedItems() if f.isEnabled()] + effects = [f for f in item.linkedItems() + if f.isEnabled() + if isinstance(f, hiero.core.EffectTrackItem)] # If source is *.nk its a comp effect and we need to fetch the # write node output. This should be improved by parsing the script @@ -105,10 +106,8 @@ class CollectClips(api.ContextPlugin): "asset": asset, "family": "clip", "families": [], - "handles": 0, - "handleStart": projectdata.get("handles", 0), - "handleEnd": projectdata.get("handles", 0), - "version": int(version)}) + "handleStart": projectdata.get("handleStart", 0), + "handleEnd": projectdata.get("handleEnd", 0)}) instance = context.create_instance(**data) diff --git a/pype/plugins/nukestudio/publish/collect_effects.py b/pype/plugins/nukestudio/publish/collect_effects.py index 0aee0adf2e..55ff849c88 100644 --- a/pype/plugins/nukestudio/publish/collect_effects.py +++ b/pype/plugins/nukestudio/publish/collect_effects.py @@ -11,7 +11,9 @@ class CollectVideoTracksLuts(pyblish.api.InstancePlugin): def process(self, instance): - self.log.debug("Finding soft effect for subset: `{}`".format(instance.data.get("subset"))) + self.log.debug( + "Finding soft effect for subset: `{}`".format( + instance.data.get("subset"))) # taking active sequence subset = instance.data.get("subset") @@ -41,8 +43,12 @@ class CollectVideoTracksLuts(pyblish.api.InstancePlugin): if len(instance.data.get("effectTrackItems", {}).keys()) > 0: instance.data["families"] += ["lut"] - self.log.debug("effects.keys: {}".format(instance.data.get("effectTrackItems", {}).keys())) - self.log.debug("effects: {}".format(instance.data.get("effectTrackItems", {}))) + self.log.debug( + "effects.keys: {}".format( + instance.data.get("effectTrackItems", {}).keys())) + self.log.debug( + "effects: {}".format( + instance.data.get("effectTrackItems", {}))) def add_effect(self, instance, track_index, item): track = item.parentTrack().name() diff --git a/pype/plugins/nukestudio/publish/collect_handles.py b/pype/plugins/nukestudio/publish/collect_handles.py index 8da83e715b..c16f1a5803 100644 --- a/pype/plugins/nukestudio/publish/collect_handles.py +++ b/pype/plugins/nukestudio/publish/collect_handles.py @@ -24,7 +24,6 @@ class CollectClipHandles(api.ContextPlugin): continue # get handles - handles = int(instance.data["handles"]) handle_start = int(instance.data["handleStart"]) handle_end = int(instance.data["handleEnd"]) @@ -38,19 +37,16 @@ class CollectClipHandles(api.ContextPlugin): self.log.debug("Adding to shared assets: `{}`".format( instance.data["name"])) asset_shared.update({ - "handles": handles, "handleStart": handle_start, "handleEnd": handle_end }) - for instance in filtered_instances: if not instance.data.get("main") and not instance.data.get("handleTag"): self.log.debug("Synchronize handles on: `{}`".format( instance.data["name"])) name = instance.data["asset"] s_asset_data = assets_shared.get(name) - instance.data["handles"] = s_asset_data.get("handles", 0) instance.data["handleStart"] = s_asset_data.get( "handleStart", 0 ) @@ -59,8 +55,6 @@ class CollectClipHandles(api.ContextPlugin): # debug printing self.log.debug("_ s_asset_data: `{}`".format( s_asset_data)) - self.log.debug("_ instance.data[handles]: `{}`".format( - instance.data["handles"])) self.log.debug("_ instance.data[handleStart]: `{}`".format( instance.data["handleStart"])) self.log.debug("_ instance.data[handleEnd]: `{}`".format( diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 5f29837d80..5bc9bea7dd 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -42,6 +42,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): width = int(sequence.format().width()) height = int(sequence.format().height()) pixel_aspect = sequence.format().pixelAspect() + fps = context.data["fps"] # build data for inner nukestudio project property data = { @@ -161,9 +162,10 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): "asset": asset, "hierarchy": hierarchy, "parents": parents, - "width": width, - "height": height, + "resolutionWidth": width, + "resolutionHeight": height, "pixelAspect": pixel_aspect, + "fps": fps, "tasks": instance.data["tasks"] }) @@ -223,9 +225,12 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): instance.data["parents"] = s_asset_data["parents"] instance.data["hierarchy"] = s_asset_data["hierarchy"] instance.data["tasks"] = s_asset_data["tasks"] - instance.data["width"] = s_asset_data["width"] - instance.data["height"] = s_asset_data["height"] + instance.data["resolutionWidth"] = s_asset_data[ + "resolutionWidth"] + instance.data["resolutionHeight"] = s_asset_data[ + "resolutionHeight"] instance.data["pixelAspect"] = s_asset_data["pixelAspect"] + instance.data["fps"] = s_asset_data["fps"] # adding frame start if any on instance start_frame = s_asset_data.get("startingFrame") @@ -263,7 +268,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): # get custom attributes of the shot if instance.data.get("main"): in_info['custom_attributes'] = { - 'handles': int(instance.data.get('handles', 0)), "handleStart": handle_start, "handleEnd": handle_end, "frameStart": instance.data["frameStart"], @@ -276,8 +280,8 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): # adding SourceResolution if Tag was present if instance.data.get("main"): in_info['custom_attributes'].update({ - "resolutionWidth": instance.data["width"], - "resolutionHeight": instance.data["height"], + "resolutionWidth": instance.data["resolutionWidth"], + "resolutionHeight": instance.data["resolutionHeight"], "pixelAspect": instance.data["pixelAspect"] }) diff --git a/pype/plugins/nukestudio/publish/collect_instance_version.py b/pype/plugins/nukestudio/publish/collect_instance_version.py new file mode 100644 index 0000000000..b79ccbdf54 --- /dev/null +++ b/pype/plugins/nukestudio/publish/collect_instance_version.py @@ -0,0 +1,18 @@ +from pyblish import api + + +class CollectInstanceVersion(api.InstancePlugin): + """ Collecting versions of Hiero project into instances + + If activated then any subset version is created in + version of the actual project. + """ + + order = api.CollectorOrder + 0.011 + label = "Collect Instance Version" + + def process(self, instance): + version = instance.context.data.get("version", "001") + instance.data.update({ + "version": int(version) + }) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index be448931c8..4ed281f0ee 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -14,7 +14,7 @@ class CollectPlates(api.InstancePlugin): """ # Run just before CollectSubsets - order = api.CollectorOrder + 0.1025 + order = api.CollectorOrder + 0.1021 label = "Collect Plates" hosts = ["nukestudio"] families = ["clip"] @@ -23,8 +23,10 @@ class CollectPlates(api.InstancePlugin): # Exclude non-tagged instances. tagged = False for tag in instance.data["tags"]: - family = dict(tag["metadata"]).get("tag.family", "") + tag_data = dict(tag["metadata"]) + family = tag_data.get("tag.family", "") if family.lower() == "plate": + subset = tag_data.get("tag.subset", "Main") tagged = True break @@ -34,29 +36,27 @@ class CollectPlates(api.InstancePlugin): "\"plate\"".format(instance) ) return + self.log.debug("__ subset: `{}`".format(instance.data["subset"])) + # if "audio" in instance.data["subset"]: + # return # Collect data. data = {} for key, value in instance.data.iteritems(): data[key] = value + self.log.debug("__ family: `{}`".format(family)) + self.log.debug("__ subset: `{}`".format(subset)) + data["family"] = family.lower() data["families"] = ["ftrack"] + instance.data["families"][1:] data["source"] = data["sourcePath"] - - subset = "" - for tag in instance.data["tags"]: - tag_data = dict(tag["metadata"]) - if "tag.subset" in tag_data: - subset = tag_data["tag.subset"] - data["subset"] = data["family"] + subset.title() - + data["subset"] = family + subset.title() data["name"] = data["subset"] + "_" + data["asset"] data["label"] = "{} - {} - ({})".format( - data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[ - 1] - ) + data['asset'], data["subset"], os.path.splitext( + data["sourcePath"])[1]) if "review" in instance.data["families"]: data["label"] += " - review" @@ -83,7 +83,7 @@ class CollectPlates(api.InstancePlugin): class CollectPlatesData(api.InstancePlugin): """Collect plates""" - order = api.CollectorOrder + 0.495 + order = api.CollectorOrder + 0.48 label = "Collect Plates Data" hosts = ["nukestudio"] families = ["plate"] @@ -126,7 +126,7 @@ class CollectPlatesData(api.InstancePlugin): transfer_data = [ "handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", - "clipInH", "clipOutH", "asset", "track", "version", "width", "height", "pixelAspect" + "clipInH", "clipOutH", "asset", "track", "resolutionWidth", "resolutionHeight", "pixelAspect", "fps" ] # pass data to version @@ -134,7 +134,6 @@ class CollectPlatesData(api.InstancePlugin): # add to data of representation version_data.update({ - "handles": version_data['handleStart'], "colorspace": item.sourceMediaColourTransform(), "colorspaceScript": instance.context.data["colorspace"], "families": [f for f in families if 'ftrack' not in f], @@ -142,11 +141,19 @@ class CollectPlatesData(api.InstancePlugin): "fps": instance.context.data["fps"] }) + version = instance.data.get("version") + if version: + version_data.update({ + "version": version + }) + + try: basename, ext = os.path.splitext(source_file) head, padding = os.path.splitext(basename) ext = ext[1:] padding = padding[1:] + self.log.debug("_ padding: `{}`".format(padding)) # head, padding, ext = source_file.split('.') source_first_frame = int(padding) padding = len(padding) @@ -156,8 +163,9 @@ class CollectPlatesData(api.InstancePlugin): ext=ext ) - start_frame = source_first_frame + instance.data["sourceInH"] - duration = instance.data["sourceOutH"] - instance.data["sourceInH"] + start_frame = int(source_first_frame + instance.data["sourceInH"]) + duration = int( + instance.data["sourceOutH"] - instance.data["sourceInH"]) end_frame = start_frame + duration self.log.debug("start_frame: `{}`".format(start_frame)) self.log.debug("end_frame: `{}`".format(end_frame)) diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index c127b977e6..ed9b7a3636 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -16,7 +16,7 @@ class CollectReviews(api.InstancePlugin): order = api.CollectorOrder + 0.1022 label = "Collect Reviews" hosts = ["nukestudio"] - families = ["clip"] + families = ["plate"] def process(self, instance): # Exclude non-tagged instances. @@ -78,6 +78,8 @@ class CollectReviews(api.InstancePlugin): file_dir = os.path.dirname(file_path) file = os.path.basename(file_path) ext = os.path.splitext(file)[-1][1:] + handleStart = rev_inst.data.get("handleStart") + handleEnd = rev_inst.data.get("handleEnd") # change label instance.data["label"] = "{0} - {1} - ({2}) - review".format( @@ -86,13 +88,14 @@ class CollectReviews(api.InstancePlugin): self.log.debug("Instance review: {}".format(rev_inst.data["name"])) - # adding representation for review mov representation = { "files": file, "stagingDir": file_dir, "frameStart": rev_inst.data.get("sourceIn"), "frameEnd": rev_inst.data.get("sourceOut"), + "frameStartFtrack": rev_inst.data.get("sourceIn") - handleStart, + "frameEndFtrack": rev_inst.data.get("sourceOut") + handleEnd, "step": 1, "fps": rev_inst.data.get("fps"), "preview": True, @@ -138,7 +141,7 @@ class CollectReviews(api.InstancePlugin): thumb_path, format='png' ) - + self.log.debug("__ sourceIn: `{}`".format(instance.data["sourceIn"])) self.log.debug("__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame)) @@ -158,7 +161,10 @@ class CollectReviews(api.InstancePlugin): item = instance.data["item"] transfer_data = [ - "handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track", "version" + "handleStart", "handleEnd", "sourceIn", "sourceOut", + "frameStart", "frameEnd", "sourceInH", "sourceOutH", + "clipIn", "clipOut", "clipInH", "clipOutH", "asset", + "track", "version" ] version_data = dict() @@ -167,7 +173,6 @@ class CollectReviews(api.InstancePlugin): # add to data of representation version_data.update({ - "handles": version_data['handleStart'], "colorspace": item.sourceMediaColourTransform(), "families": instance.data["families"], "subset": instance.data["subset"], diff --git a/pype/plugins/nukestudio/publish/collect_tag_framestart.py b/pype/plugins/nukestudio/publish/collect_tag_framestart.py index 1342d996ab..993aa99a3e 100644 --- a/pype/plugins/nukestudio/publish/collect_tag_framestart.py +++ b/pype/plugins/nukestudio/publish/collect_tag_framestart.py @@ -30,9 +30,12 @@ class CollectClipTagFrameStart(api.InstancePlugin): except ValueError: if "source" in t_value: source_first = instance.data["sourceFirst"] + if source_first == 0: + source_first = 1 + self.log.info("Start frame on `{0}`".format(source_first)) source_in = instance.data["sourceIn"] - handle_start = instance.data["handleStart"] - start_frame = (source_first + source_in) - handle_start + self.log.info("Start frame on `{0}`".format(source_in)) + start_frame = source_first + source_in instance.data["startingFrame"] = start_frame self.log.info("Start frame on `{0}` set to `{1}`".format( diff --git a/pype/plugins/nukestudio/publish/collect_tag_handles.py b/pype/plugins/nukestudio/publish/collect_tag_handles.py index 929f5e3b68..a6a63faea9 100644 --- a/pype/plugins/nukestudio/publish/collect_tag_handles.py +++ b/pype/plugins/nukestudio/publish/collect_tag_handles.py @@ -38,7 +38,9 @@ class CollectClipTagHandles(api.ContextPlugin): # gets arguments if there are any t_args = t_metadata.get("tag.args", "") - assert t_args, self.log.error("Tag with Handles is missing Args. Use only handle start/end") + assert t_args, self.log.error( + "Tag with Handles is missing Args. " + "Use only handle start/end") t_args = json.loads(t_args.replace("'", "\"")) # add in start @@ -55,8 +57,8 @@ class CollectClipTagHandles(api.ContextPlugin): # adding handles to asset_shared on context if instance.data.get("handleEnd"): - assets_shared_a["handleEnd"] = instance.data["handleEnd"] + assets_shared_a[ + "handleEnd"] = instance.data["handleEnd"] if instance.data.get("handleStart"): - assets_shared_a["handleStart"] = instance.data["handleStart"] - if instance.data.get("handles"): - assets_shared_a["handles"] = instance.data["handles"] + assets_shared_a[ + "handleStart"] = instance.data["handleStart"] diff --git a/pype/plugins/nukestudio/publish/extract_audio.py b/pype/plugins/nukestudio/publish/extract_audio.py index 315ba6784d..2c4afc8412 100644 --- a/pype/plugins/nukestudio/publish/extract_audio.py +++ b/pype/plugins/nukestudio/publish/extract_audio.py @@ -10,8 +10,6 @@ class ExtractAudioFile(pype.api.Extractor): hosts = ["nukestudio"] families = ["clip", "audio"] match = api.Intersection - optional = True - active = False def process(self, instance): import os diff --git a/pype/plugins/nukestudio/publish/extract_effects.py b/pype/plugins/nukestudio/publish/extract_effects.py index 15d2a80a55..5c9ee97f2b 100644 --- a/pype/plugins/nukestudio/publish/extract_effects.py +++ b/pype/plugins/nukestudio/publish/extract_effects.py @@ -2,10 +2,12 @@ import os import json import re +import copy import pyblish.api import tempfile from avalon import io, api + class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): """Collect video tracks effects into context.""" @@ -17,9 +19,12 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): item = instance.data["item"] effects = instance.data.get("effectTrackItems") - instance.data["families"] = [f for f in instance.data.get("families", []) if f not in ["lut"]] + instance.data["families"] = [f for f in instance.data.get( + "families", []) if f not in ["lut"]] - self.log.debug("___ instance.data[families]: `{}`".format(instance.data["families"])) + self.log.debug( + "__ instance.data[families]: `{}`".format( + instance.data["families"])) # skip any without effects if not effects: @@ -71,9 +76,11 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): ) data["source"] = data["sourcePath"] + # WARNING instance should not be created in Extractor! # create new instance instance = instance.context.create_instance(**data) - + # TODO replace line below with `instance.data["resourcesDir"]` + # when instance is created during collection part dst_dir = self.resource_destination_dir(instance) # change paths in effects to files @@ -102,7 +109,6 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): # add to data of representation version_data.update({ - "handles": version_data['handleStart'], "colorspace": item.sourceMediaColourTransform(), "colorspaceScript": instance.context.data["colorspace"], "families": ["plate", "lut"], @@ -132,7 +138,7 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): def copy_linked_files(self, effect, dst_dir): for k, v in effect["node"].items(): - if k in "file" and v is not '': + if k in "file" and v != '': base_name = os.path.basename(v) dst = os.path.join(dst_dir, base_name).replace("\\", "/") @@ -141,103 +147,114 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): return (v, dst) def resource_destination_dir(self, instance): - anatomy = instance.context.data['anatomy'] - self.create_destination_template(instance, anatomy) + # WARNING this is from `collect_instance_anatomy_data.py` + anatomy_data = copy.deepcopy(instance.context.data["anatomyData"]) + project_entity = instance.context.data["projectEntity"] + context_asset_entity = instance.context.data["assetEntity"] - return os.path.join( - instance.data["assumedDestination"], - "resources" - ) - - def create_destination_template(self, instance, anatomy): - """Create a filepath based on the current data available - - Example template: - {root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/ - {subset}.{representation} - Args: - instance: the instance to publish - - Returns: - file path (str) - """ - - # get all the stuff from the database - subset_name = instance.data["subset"] - self.log.info(subset_name) asset_name = instance.data["asset"] - project_name = api.Session["AVALON_PROJECT"] - a_template = anatomy.templates + if context_asset_entity["name"] == asset_name: + asset_entity = context_asset_entity - project = io.find_one( - { - "type": "project", - "name": project_name - }, - projection={"config": True, "data": True} - ) + else: + asset_entity = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project_entity["_id"] + }) - template = a_template['publish']['path'] - # anatomy = instance.context.data['anatomy'] + subset_name = instance.data["subset"] + version_number = instance.data.get("version") + latest_version = None - asset = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project["_id"] + if asset_entity: + subset_entity = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset_entity["_id"] + }) + + if subset_entity is None: + self.log.debug("Subset entity does not exist yet.") + else: + version_entity = io.find_one( + { + "type": "version", + "parent": subset_entity["_id"] + }, + sort=[("name", -1)] + ) + if version_entity: + latest_version = version_entity["name"] + + if version_number is None: + version_number = 1 + if latest_version is not None: + version_number += int(latest_version) + + anatomy_data.update({ + "asset": asset_name, + "family": instance.data["family"], + "subset": subset_name, + "version": version_number, + "hierarchy": instance.data["hierarchy"] }) - assert asset, ("No asset found by the name '{}' " - "in project '{}'".format(asset_name, project_name)) - silo = asset.get('silo') + resolution_width = instance.data.get("resolutionWidth") + if resolution_width: + anatomy_data["resolution_width"] = resolution_width - subset = io.find_one({ - "type": "subset", - "name": subset_name, - "parent": asset["_id"] + resolution_height = instance.data.get("resolutionHeight") + if resolution_height: + anatomy_data["resolution_height"] = resolution_height + + pixel_aspect = instance.data.get("pixelAspect") + if pixel_aspect: + anatomy_data["pixel_aspect"] = float("{:0.2f}".format(pixel_aspect)) + + fps = instance.data.get("fps") + if resolution_height: + anatomy_data["fps"] = float("{:0.2f}".format(fps)) + + instance.data["projectEntity"] = project_entity + instance.data["assetEntity"] = asset_entity + instance.data["anatomyData"] = anatomy_data + instance.data["latestVersion"] = latest_version + instance.data["version"] = version_number + + # WARNING this is from `collect_resources_path.py` + anatomy = instance.context.data["anatomy"] + + template_data = copy.deepcopy(instance.data["anatomyData"]) + + # This is for cases of Deprecated anatomy without `folder` + # TODO remove when all clients have solved this issue + template_data.update({ + "frame": "FRAME_TEMP", + "representation": "TEMP" }) - # assume there is no version yet, we start at `1` - version = None - version_number = 1 - if subset is not None: - version = io.find_one( - { - "type": "version", - "parent": subset["_id"] - }, - sort=[("name", -1)] - ) + anatomy_filled = anatomy.format(template_data) - # if there is a subset there ought to be version - if version is not None: - version_number += version["name"] + if "folder" in anatomy.templates["publish"]: + publish_folder = anatomy_filled["publish"]["folder"] + else: + # solve deprecated situation when `folder` key is not underneath + # `publish` anatomy + project_name = api.Session["AVALON_PROJECT"] + self.log.warning(( + "Deprecation warning: Anatomy does not have set `folder`" + " key underneath `publish` (in global of for project `{}`)." + ).format(project_name)) - if instance.data.get('version'): - version_number = int(instance.data.get('version')) + file_path = anatomy_filled["publish"]["path"] + # Directory + publish_folder = os.path.dirname(file_path) - padding = int(a_template['render']['padding']) + publish_folder = os.path.normpath(publish_folder) + resources_folder = os.path.join(publish_folder, "resources") - hierarchy = asset['data']['parents'] - if hierarchy: - # hierarchy = os.path.sep.join(hierarchy) - hierarchy = "/".join(hierarchy) + instance.data["publishDir"] = publish_folder + instance.data["resourcesDir"] = resources_folder - template_data = {"root": api.Session["AVALON_PROJECTS"], - "project": {"name": project_name, - "code": project['data']['code']}, - "silo": silo, - "family": instance.data['family'], - "asset": asset_name, - "subset": subset_name, - "frame": ('#' * padding), - "version": version_number, - "hierarchy": hierarchy, - "representation": "TEMP"} - - instance.data["assumedTemplateData"] = template_data - self.log.info(template_data) - instance.data["template"] = template - # We take the parent folder of representation 'filepath' - instance.data["assumedDestination"] = os.path.dirname( - anatomy.format(template_data)["publish"]["path"] - ) + return resources_folder diff --git a/pype/plugins/nukestudio/publish/validate_version.py b/pype/plugins/nukestudio/publish/validate_version.py deleted file mode 100644 index ebb8f357f8..0000000000 --- a/pype/plugins/nukestudio/publish/validate_version.py +++ /dev/null @@ -1,79 +0,0 @@ -import pyblish -from avalon import io -from pype.action import get_errored_instances_from_context -import pype.api as pype - - -@pyblish.api.log -class RepairNukestudioVersionUp(pyblish.api.Action): - label = "Version Up Workfile" - on = "failed" - icon = "wrench" - - def process(self, context, plugin): - - errored_instances = get_errored_instances_from_context(context) - - # Apply pyblish logic to get the instances for the plug-in - instances = pyblish.api.instances_by_plugin(errored_instances, plugin) - - if instances: - project = context.data["activeProject"] - path = context.data.get("currentFile") - - new_path = pype.version_up(path) - - if project: - project.saveAs(new_path) - - self.log.info("Project workfile version was fixed") - - -class ValidateVersion(pyblish.api.InstancePlugin): - """Validate clip's versions. - - """ - - order = pyblish.api.ValidatorOrder - families = ["plate"] - label = "Validate Version" - actions = [RepairNukestudioVersionUp] - hosts = ["nukestudio"] - - def process(self, instance): - version = int(instance.data.get("version", 0)) - asset_name = instance.data.get("asset", None) - subset_name = instance.data.get("subset", None) - - assert version, "The file is missing version string! example: filename_v001.hrox `{}`" - - self.log.debug("Collected version: `{0}`".format(version)) - - found_v = 0 - try: - io.install() - project = io.find_one({"type": "project"}) - - asset = io.find_one({ - "type": "asset", - "name": asset_name, - "parent": project["_id"] - }) - - subset = io.find_one({ - "type": "subset", - "parent": asset["_id"], - "name": subset_name - }) - - version_db = io.find_one({ - 'type': 'version', - 'parent': subset["_id"], - 'name': version - }) or {} - found_v = version_db.get("name", 0) - self.log.debug("Found version: `{0}`".format(found_v)) - except Exception as e: - self.log.debug("Problem to get data from database for asset `{0}` subset `{1}`. Error: `{2}`".format(asset_name, subset_name, e)) - - assert (found_v != version), "Version must not be the same as in database `{0}`, Versions file: `{1}`, db: `{2}`".format(asset_name, version, found_v) diff --git a/pype/plugins/standalonepublisher/publish/collect_matchmove.py b/pype/plugins/standalonepublisher/publish/collect_matchmove.py index b46efc1cf3..5d9e8ddfb4 100644 --- a/pype/plugins/standalonepublisher/publish/collect_matchmove.py +++ b/pype/plugins/standalonepublisher/publish/collect_matchmove.py @@ -21,7 +21,7 @@ class CollectMatchmovePublish(pyblish.api.InstancePlugin): label = "Collect Matchmove - SA Publish" order = pyblish.api.CollectorOrder - family = ["matchmove"] + families = ["matchmove"] hosts = ["standalonepublisher"] def process(self, instance): diff --git a/pype/plugins/standalonepublisher/publish/extract_review.py b/pype/plugins/standalonepublisher/publish/extract_review.py index f06d9bcde0..36793d4c62 100644 --- a/pype/plugins/standalonepublisher/publish/extract_review.py +++ b/pype/plugins/standalonepublisher/publish/extract_review.py @@ -4,6 +4,7 @@ import tempfile import pyblish.api import clique import pype.api +import pype.lib class ExtractReviewSP(pyblish.api.InstancePlugin): @@ -148,12 +149,7 @@ class ExtractReviewSP(pyblish.api.InstancePlugin): # output filename output_args.append(full_output_path) - ffmpeg_path = os.getenv("FFMPEG_PATH", "") - if ffmpeg_path: - ffmpeg_path += "/ffmpeg" - else: - ffmpeg_path = "ffmpeg" - + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") mov_args = [ ffmpeg_path, " ".join(input_args), @@ -174,8 +170,8 @@ class ExtractReviewSP(pyblish.api.InstancePlugin): "stagingDir": out_stagigng_dir, "tags": new_tags, "outputName": name, - "startFrameReview": 1, - "endFrameReview": video_len + "frameStartFtrack": 1, + "frameEndFtrack": video_len }) # cleanup thumbnail from new repre if repre_new.get("thumbnail"): diff --git a/pype/plugins/standalonepublisher/publish/extract_thumbnail.py b/pype/plugins/standalonepublisher/publish/extract_thumbnail.py index 69a2e0fdad..daa3936359 100644 --- a/pype/plugins/standalonepublisher/publish/extract_thumbnail.py +++ b/pype/plugins/standalonepublisher/publish/extract_thumbnail.py @@ -3,6 +3,7 @@ import tempfile import subprocess import pyblish.api import pype.api +import pype.lib class ExtractThumbnailSP(pyblish.api.InstancePlugin): @@ -73,11 +74,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin): config_data.get("__default__", {}) ) - ffmpeg_path = os.getenv("FFMPEG_PATH", "") - if ffmpeg_path: - ffmpeg_path += "/ffmpeg" - else: - ffmpeg_path = "ffmpeg" + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") jpeg_items = [] jpeg_items.append(ffmpeg_path) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index d5bc2594a4..8d0b925089 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -1,29 +1,42 @@ import os -import datetime +import sys +import re import subprocess import json import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins from pypeapp.lib import config -from pype import api as pype -# FFmpeg in PATH is required +from pypeapp import Logger +import pype.lib + +log = Logger().get_logger("BurninWrapper", "burninwrap") -log = pype.Logger().get_logger("BurninWrapper", "burninwrap") +ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") +ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") -ffmpeg_path = os.environ.get("FFMPEG_PATH") -if ffmpeg_path and os.path.exists(ffmpeg_path): - # add separator "/" or "\" to be prepared for next part - ffmpeg_path += os.path.sep -else: - ffmpeg_path = "" - FFMPEG = ( '{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s' -).format(os.path.normpath(ffmpeg_path + "ffmpeg")) +).format(ffmpeg_path) + FFPROBE = ( '{} -v quiet -print_format json -show_format -show_streams %(source)s' -).format(os.path.normpath(ffmpeg_path + "ffprobe")) +).format(ffprobe_path) + +DRAWTEXT = ( + "drawtext=text=\\'%(text)s\\':x=%(x)s:y=%(y)s:fontcolor=" + "%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'" +) +TIMECODE = ( + "drawtext=timecode=\\'%(timecode)s\\':text=\\'%(text)s\\'" + ":timecode_rate=%(fps).2f:x=%(x)s:y=%(y)s:fontcolor=" + "%(color)s@%(opacity).1f:fontsize=%(size)d:fontfile='%(font)s'" +) + +MISSING_KEY_VALUE = "N/A" +CURRENT_FRAME_KEY = "{current_frame}" +CURRENT_FRAME_SPLITTER = "_-_CURRENT_FRAME_-_" +TIME_CODE_KEY = "{timecode}" def _streams(source): @@ -118,82 +131,69 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): if options_init: self.options_init.update(options_init) - def add_text(self, text, align, options=None): + def add_text( + self, text, align, frame_start=None, frame_end=None, options=None + ): """ Adding static text to a filter. :param str text: text to apply to the drawtext :param enum align: alignment, must use provided enum flags + :param int frame_start: starting frame for burnins current frame :param dict options: recommended to use TextOptions """ if not options: options = ffmpeg_burnins.TextOptions(**self.options_init) - self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT) - def add_datetime(self, date_format, align, options=None): - """ - Adding date text to a filter. Using pythons datetime module. + options = options.copy() + if frame_start: + options["frame_offset"] = frame_start - :param str date_format: format of date (e.g. `%d.%m.%Y`) - :param enum align: alignment, must use provided enum flags - :param dict options: recommended to use TextOptions - """ - if not options: - options = ffmpeg_burnins.TextOptions(**self.options_init) - today = datetime.datetime.today() - text = today.strftime(date_format) - self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT) + # `frame_end` is only for meassurements of text position + if frame_end: + options["frame_end"] = frame_end - def add_frame_numbers( - self, align, options=None, start_frame=None, text=None + self._add_burnin(text, align, options, DRAWTEXT) + + def add_timecode( + self, align, frame_start=None, frame_end=None, frame_start_tc=None, + text=None, options=None ): """ Convenience method to create the frame number expression. :param enum align: alignment, must use provided enum flags - :param dict options: recommended to use FrameNumberOptions - """ - if not options: - options = ffmpeg_burnins.FrameNumberOptions(**self.options_init) - if start_frame: - options['frame_offset'] = start_frame - - expr = r'%%{eif\:n+%d\:d}' % options['frame_offset'] - _text = str(int(self.end_frame + options['frame_offset'])) - if text and isinstance(text, str): - text = r"{}".format(text) - expr = text.replace("{current_frame}", expr) - text = text.replace("{current_frame}", _text) - - options['expression'] = expr - self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT) - - def add_timecode(self, align, options=None, start_frame=None): - """ - Convenience method to create the frame number expression. - - :param enum align: alignment, must use provided enum flags + :param int frame_start: starting frame for burnins current frame + :param int frame_start_tc: starting frame for burnins timecode + :param str text: text that will be before timecode :param dict options: recommended to use TimeCodeOptions """ if not options: options = ffmpeg_burnins.TimeCodeOptions(**self.options_init) - if start_frame: - options['frame_offset'] = start_frame - timecode = ffmpeg_burnins._frames_to_timecode( - options['frame_offset'], + options = options.copy() + if frame_start: + options["frame_offset"] = frame_start + + # `frame_end` is only for meassurements of text position + if frame_end: + options["frame_end"] = frame_end + + if not frame_start_tc: + frame_start_tc = options["frame_offset"] + + if not text: + text = "" + + if not options.get("fps"): + options["fps"] = self.frame_rate + + options["timecode"] = ffmpeg_burnins._frames_to_timecode( + frame_start_tc, self.frame_rate ) - options = options.copy() - if not options.get('fps'): - options['fps'] = self.frame_rate - self._add_burnin( - timecode.replace(':', r'\:'), - align, - options, - ffmpeg_burnins.TIMECODE - ) + self._add_burnin(text, align, options, TIMECODE) def _add_burnin(self, text, align, options, draw): """ @@ -202,14 +202,43 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): :param enum align: alignment, must use provided enum flags :param dict options: """ + + final_text = text + text_for_size = text + if CURRENT_FRAME_SPLITTER in text: + frame_start = options["frame_offset"] + frame_end = options.get("frame_end", frame_start) + if not frame_start: + replacement_final = replacement_size = str(MISSING_KEY_VALUE) + else: + replacement_final = "\\'{}\\'".format( + r'%%{eif\:n+%d\:d}' % frame_start + ) + replacement_size = str(frame_end) + + final_text = final_text.replace( + CURRENT_FRAME_SPLITTER, replacement_final + ) + text_for_size = text_for_size.replace( + CURRENT_FRAME_SPLITTER, replacement_size + ) + resolution = self.resolution data = { - 'text': options.get('expression') or text, + 'text': ( + final_text + .replace(",", r"\,") + .replace(':', r'\:') + ), 'color': options['font_color'], 'size': options['font_size'] } + timecode_text = options.get("timecode") or "" + text_for_size += timecode_text data.update(options) - data.update(ffmpeg_burnins._drawtext(align, resolution, text, options)) + data.update( + ffmpeg_burnins._drawtext(align, resolution, text_for_size, options) + ) if 'font' in data and ffmpeg_burnins._is_windows(): data['font'] = data['font'].replace(os.sep, r'\\' + os.sep) data['font'] = data['font'].replace(':', r'\:') @@ -248,6 +277,37 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): 'filters': filters }).strip() + def render(self, output, args=None, overwrite=False, **kwargs): + """ + Render the media to a specified destination. + + :param str output: output file + :param str args: additional FFMPEG arguments + :param bool overwrite: overwrite the output if it exists + """ + if not overwrite and os.path.exists(output): + raise RuntimeError("Destination '%s' exists, please " + "use overwrite" % output) + + is_sequence = "%" in output + + command = self.command( + output=output, + args=args, + overwrite=overwrite + ) + print(command) + + proc = subprocess.Popen(command, shell=True) + proc.communicate() + if proc.returncode != 0: + raise RuntimeError("Failed to render '%s': %s'" + % (output, command)) + if is_sequence: + output = output % kwargs.get("duration") + if not os.path.exists(output): + raise RuntimeError("Failed to generate this fucking file '%s'" % output) + def example(input_path, output_path): options_init = { @@ -266,15 +326,13 @@ def example(input_path, output_path): burnin.add_text('My Text', ModifiedBurnins.TOP_CENTERED) # Datetime burnin.add_text('%d-%m-%y', ModifiedBurnins.TOP_RIGHT) - # Frame number - burnin.add_frame_numbers(ModifiedBurnins.TOP_RIGHT, start_frame=start_frame) - # Timecode - burnin.add_timecode(ModifiedBurnins.TOP_LEFT, start_frame=start_frame) # Start render (overwrite output file if exist) burnin.render(output_path, overwrite=True) -def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True): +def burnins_from_data( + input_path, output_path, data, codec_data=None, overwrite=True +): ''' This method adds burnins to video/image file based on presets setting. Extension of output MUST be same as input. (mov -> mov, avi -> avi,...) @@ -298,47 +356,35 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) - each key of "burnins" represents Alignment, there are 6 possibilities: TOP_LEFT TOP_CENTERED TOP_RIGHT BOTTOM_LEFT BOTTOM_CENTERED BOTTOM_RIGHT - - value for each key is dict which should contain "function" which says - what kind of burnin is that: - "text", "timecode" or "frame_numbers" - - "text" key with content is also required when "text" function is used + - value must be string with text you want to burn-in + - text may contain specific formatting keys (exmplained below) Requirement of *data* keys is based on presets. - - "start_frame" - is required when "timecode" or "frame_numbers" function is used - - "start_frame_tc" - when "timecode" should start with different frame + - "frame_start" - is required when "timecode" or "current_frame" ins keys + - "frame_start_tc" - when "timecode" should start with different frame - *keys for static text* EXAMPLE: preset = { "options": {*OPTIONS FOR LOOK*}, "burnins": { - "TOP_LEFT": { - "function": "text", - "text": "static_text" - }, - "TOP_RIGHT": { - "function": "text", - "text": "{shot}" - }, - "BOTTOM_LEFT": { - "function": "timecode" - }, - "BOTTOM_RIGHT": { - "function": "frame_numbers" - } + "TOP_LEFT": "static_text", + "TOP_RIGHT": "{shot}", + "BOTTOM_LEFT": "TC: {timecode}", + "BOTTOM_RIGHT": "{frame_start}{current_frame}" } } For this preset we'll need at least this data: data = { - "start_frame": 1001, + "frame_start": 1001, "shot": "sh0010" } When Timecode should start from 1 then data need: data = { - "start_frame": 1001, - "start_frame_tc": 1, + "frame_start": 1001, + "frame_start_tc": 1, "shot": "sh0010" } ''' @@ -348,104 +394,102 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) burnin = ModifiedBurnins(input_path, options_init=options_init) frame_start = data.get("frame_start") + frame_end = data.get("frame_end") frame_start_tc = data.get('frame_start_tc', frame_start) - + stream = burnin._streams[0] if "resolution_width" not in data: - data["resolution_width"] = stream.get("width", "Unknown") + data["resolution_width"] = stream.get("width", MISSING_KEY_VALUE) if "resolution_height" not in data: - data["resolution_height"] = stream.get("height", "Unknown") + data["resolution_height"] = stream.get("height", MISSING_KEY_VALUE) if "fps" not in data: data["fps"] = get_fps(stream.get("r_frame_rate", "0/0")) - for align_text, preset in presets.get('burnins', {}).items(): + # Check frame start and add expression if is available + if frame_start is not None: + data[CURRENT_FRAME_KEY[1:-1]] = CURRENT_FRAME_SPLITTER + + if frame_start_tc is not None: + data[TIME_CODE_KEY[1:-1]] = TIME_CODE_KEY + + for align_text, value in presets.get('burnins', {}).items(): + if not value: + continue + + if isinstance(value, (dict, list, tuple)): + raise TypeError(( + "Expected string or number type." + " Got: {} - \"{}\"" + " (Make sure you have new burnin presets)." + ).format(str(type(value)), str(value))) + + has_timecode = TIME_CODE_KEY in value + align = None - if align_text == 'TOP_LEFT': + align_text = align_text.strip().lower() + if align_text == "top_left": align = ModifiedBurnins.TOP_LEFT - elif align_text == 'TOP_CENTERED': + elif align_text == "top_centered": align = ModifiedBurnins.TOP_CENTERED - elif align_text == 'TOP_RIGHT': + elif align_text == "top_right": align = ModifiedBurnins.TOP_RIGHT - elif align_text == 'BOTTOM_LEFT': + elif align_text == "bottom_left": align = ModifiedBurnins.BOTTOM_LEFT - elif align_text == 'BOTTOM_CENTERED': + elif align_text == "bottom_centered": align = ModifiedBurnins.BOTTOM_CENTERED - elif align_text == 'BOTTOM_RIGHT': + elif align_text == "bottom_right": align = ModifiedBurnins.BOTTOM_RIGHT - bi_func = preset.get('function') - if not bi_func: - log.error( - 'Missing function for burnin!' - 'Burnins are not created!' + # Replace with missing key value if frame_start_tc is not set + if frame_start_tc is None and has_timecode: + has_timecode = False + log.warning( + "`frame_start` and `frame_start_tc`" + " are not set in entered data." ) - return + value = value.replace(TIME_CODE_KEY, MISSING_KEY_VALUE) - if ( - bi_func in ['frame_numbers', 'timecode'] and - frame_start is None - ): - log.error( - 'start_frame is not set in entered data!' - 'Burnins are not created!' - ) - return + key_pattern = re.compile(r"(\{.*?[^{0]*\})") - if bi_func == 'frame_numbers': - current_frame_identifier = "{current_frame}" - text = preset.get('text') or current_frame_identifier + missing_keys = [] + for group in key_pattern.findall(value): + try: + group.format(**data) + except (TypeError, KeyError): + missing_keys.append(group) - if current_frame_identifier not in text: - log.warning(( - 'Text for Frame numbers don\'t have ' - '`{current_frame}` key in text!' - )) + missing_keys = list(set(missing_keys)) + for key in missing_keys: + value = value.replace(key, MISSING_KEY_VALUE) - text_items = [] - split_items = text.split(current_frame_identifier) - for item in split_items: - text_items.append(item.format(**data)) + # Handle timecode differently + if has_timecode: + args = [align, frame_start, frame_end, frame_start_tc] + if not value.startswith(TIME_CODE_KEY): + value_items = value.split(TIME_CODE_KEY) + text = value_items[0].format(**data) + args.append(text) - text = "{current_frame}".join(text_items) + burnin.add_timecode(*args) + continue - burnin.add_frame_numbers(align, start_frame=frame_start, text=text) + text = value.format(**data) + burnin.add_text(text, align, frame_start, frame_end) - elif bi_func == 'timecode': - burnin.add_timecode(align, start_frame=frame_start_tc) - - elif bi_func == 'text': - if not preset.get('text'): - log.error('Text is not set for text function burnin!') - return - text = preset['text'].format(**data) - burnin.add_text(text, align) - - elif bi_func == "datetime": - date_format = preset["format"] - burnin.add_datetime(date_format, align) - - else: - log.error( - 'Unknown function for burnins {}'.format(bi_func) - ) - return - - codec_args = '' - if codec_data is not []: + codec_args = "" + if codec_data: codec_args = " ".join(codec_data) - burnin.render(output_path, args=codec_args, overwrite=overwrite) + burnin.render(output_path, args=codec_args, overwrite=overwrite, **data) if __name__ == '__main__': - import sys - import json - data = json.loads(sys.argv[-1]) + in_data = json.loads(sys.argv[-1]) burnins_from_data( - data['input'], - data['codec'], - data['output'], - data['burnin_data'] + in_data['input'], + in_data['output'], + in_data['burnin_data'], + in_data['codec'] ) diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py index 5517cfeb4c..fe795564a5 100644 --- a/pype/scripts/publish_filesequence.py +++ b/pype/scripts/publish_filesequence.py @@ -1,9 +1,13 @@ """This module is used for command line publishing of image sequences.""" import os +import sys +import argparse import logging import subprocess import platform +import json + try: from shutil import which except ImportError: @@ -21,9 +25,20 @@ log.setLevel(logging.DEBUG) error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" +def _load_json(path): + assert os.path.isfile(path), ("path to json file doesn't exist") + data = None + with open(path, "r") as json_file: + try: + data = json.load(json_file) + except Exception as exc: + log.error( + "Error loading json: " + "{} - Exception: {}".format(path, exc) + ) + return data def __main__(): - import argparse parser = argparse.ArgumentParser() parser.add_argument("--paths", nargs="*", @@ -43,7 +58,11 @@ def __main__(): print("Running pype ...") auto_pype_root = os.path.dirname(os.path.abspath(__file__)) auto_pype_root = os.path.abspath(auto_pype_root + "../../../../..") + auto_pype_root = os.environ.get('PYPE_ROOT') or auto_pype_root + if os.environ.get('PYPE_ROOT'): + print("Got Pype location from environment: {}".format( + os.environ.get('PYPE_ROOT'))) pype_command = "pype.ps1" if platform.system().lower() == "linux": @@ -69,7 +88,13 @@ def __main__(): print("Set pype root to: {}".format(pype_root)) print("Paths: {}".format(kwargs.paths or [os.getcwd()])) - paths = kwargs.paths or [os.getcwd()] + paths = kwargs.paths or [os.environ.get("PYPE_METADATA_FILE")] or [os.getcwd()] # noqa + + for path in paths: + data = _load_json(path) + log.info("Setting session using data from file") + os.environ["AVALON_PROJECT"] = data["session"]["AVALON_PROJECT"] + break args = [ os.path.join(pype_root, pype_command), diff --git a/pype/standalonepublish/__init__.py b/pype/standalonepublish/__init__.py index c7be80f189..8e615afbea 100644 --- a/pype/standalonepublish/__init__.py +++ b/pype/standalonepublish/__init__.py @@ -1,3 +1,5 @@ +PUBLISH_PATHS = [] + from .standalonepublish_module import StandAlonePublishModule from .app import ( show, diff --git a/pype/standalonepublish/publish.py b/pype/standalonepublish/publish.py index fcbb6e137d..045b3d590e 100644 --- a/pype/standalonepublish/publish.py +++ b/pype/standalonepublish/publish.py @@ -5,14 +5,14 @@ import tempfile import random import string -from avalon import io -from avalon import api as avalon +from avalon import io, api from avalon.tools import publish as av_publish import pype from pypeapp import execute import pyblish.api +from . import PUBLISH_PATHS def set_context(project, asset, task, app): @@ -31,7 +31,6 @@ def set_context(project, asset, task, app): os.environ["AVALON_TASK"] = task io.Session["AVALON_TASK"] = task - io.install() av_project = io.find_one({'type': 'project'}) @@ -76,7 +75,7 @@ def avalon_api_publish(data, gui=True): io.install() # Create hash name folder in temp - chars = "".join( [random.choice(string.ascii_letters) for i in range(15)] ) + chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) staging_dir = tempfile.mkdtemp(chars) # create also json and fill with data @@ -105,8 +104,27 @@ def avalon_api_publish(data, gui=True): def cli_publish(data, gui=True): io.install() + pyblish.api.deregister_all_plugins() + # Registers Global pyblish plugins + pype.install() + # Registers Standalone pyblish plugins + for path in PUBLISH_PATHS: + pyblish.api.register_plugin_path(path) + + project_plugins_paths = os.environ.get("PYPE_PROJECT_PLUGINS") + project_name = os.environ["AVALON_PROJECT"] + if project_plugins_paths and project_name: + for path in project_plugins_paths.split(os.pathsep): + if not path: + continue + plugin_path = os.path.join(path, project_name, "plugins") + if os.path.exists(plugin_path): + pyblish.api.register_plugin_path(plugin_path) + api.register_plugin_path(api.Loader, plugin_path) + api.register_plugin_path(api.Creator, plugin_path) + # Create hash name folder in temp - chars = "".join( [random.choice(string.ascii_letters) for i in range(15)] ) + chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) staging_dir = tempfile.mkdtemp(chars) # create json for return data diff --git a/pype/standalonepublish/standalonepublish_module.py b/pype/standalonepublish/standalonepublish_module.py index 75c033e16b..64195bc271 100644 --- a/pype/standalonepublish/standalonepublish_module.py +++ b/pype/standalonepublish/standalonepublish_module.py @@ -2,16 +2,16 @@ import os from .app import show from .widgets import QtWidgets import pype -import pyblish.api +from . import PUBLISH_PATHS class StandAlonePublishModule: - PUBLISH_PATHS = [] def __init__(self, main_parent=None, parent=None): self.main_parent = main_parent self.parent_widget = parent - self.PUBLISH_PATHS.append(os.path.sep.join( + PUBLISH_PATHS.clear() + PUBLISH_PATHS.append(os.path.sep.join( [pype.PLUGINS_DIR, "standalonepublisher", "publish"] )) @@ -24,16 +24,9 @@ class StandAlonePublishModule: def process_modules(self, modules): if "FtrackModule" in modules: - self.PUBLISH_PATHS.append(os.path.sep.join( + PUBLISH_PATHS.append(os.path.sep.join( [pype.PLUGINS_DIR, "ftrack", "publish"] )) - def tray_start(self): - # Registers Global pyblish plugins - pype.install() - # Registers Standalone pyblish plugins - for path in self.PUBLISH_PATHS: - pyblish.api.register_plugin_path(path) - def show(self): show(self.main_parent, False) diff --git a/pype/standalonepublish/widgets/widget_drop_frame.py b/pype/standalonepublish/widgets/widget_drop_frame.py index 73b9f0e179..c85105a333 100644 --- a/pype/standalonepublish/widgets/widget_drop_frame.py +++ b/pype/standalonepublish/widgets/widget_drop_frame.py @@ -4,6 +4,7 @@ import json import clique import subprocess from pypeapp import config +import pype.lib from . import QtWidgets, QtCore from . import DropEmpty, ComponentsList, ComponentItem @@ -224,12 +225,7 @@ class DropDataFrame(QtWidgets.QFrame): self._process_data(data) def load_data_with_probe(self, filepath): - ffprobe_path = os.getenv("FFMPEG_PATH", "") - if ffprobe_path: - ffprobe_path += '/ffprobe' - else: - ffprobe_path = 'ffprobe' - + ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") args = [ ffprobe_path, '-v', 'quiet', diff --git a/pype/tools/assetcreator/model.py b/pype/tools/assetcreator/model.py index b77ffa7a5d..3af1d77127 100644 --- a/pype/tools/assetcreator/model.py +++ b/pype/tools/assetcreator/model.py @@ -241,7 +241,7 @@ class TasksModel(TreeModel): self.endResetModel() def flags(self, index): - return QtCore.Qt.ItemIsEnabled + return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable def headerData(self, section, orientation, role): diff --git a/pype/user/user_module.py b/pype/user/user_module.py index d70885b211..46ceb0031f 100644 --- a/pype/user/user_module.py +++ b/pype/user/user_module.py @@ -19,8 +19,8 @@ class UserModule: log = pype.Logger().get_logger("UserModule", "user") def __init__(self, main_parent=None, parent=None): + self._callbacks_on_user_change = [] self.cred = {} - self.cred_path = os.path.normpath(os.path.join( self.cred_folder_path, self.cred_filename )) @@ -28,6 +28,9 @@ class UserModule: self.load_credentials() + def register_callback_on_user_change(self, callback): + self._callbacks_on_user_change.append(callback) + def tray_start(self): """Store credentials to env and preset them to widget""" username = "" @@ -37,6 +40,9 @@ class UserModule: os.environ[self.env_name] = username self.widget_login.set_user(username) + def get_user(self): + return self.cred.get("username") or getpass.getuser() + def process_modules(self, modules): """ Gives ability to connect with imported modules from TrayManager. @@ -95,6 +101,17 @@ class UserModule: )) return self.save_credentials(getpass.getuser()) + def change_credentials(self, username): + self.save_credentials(username) + for callback in self._callbacks_on_user_change: + try: + callback() + except Exception: + self.log.warning( + "Failed to execute callback \"{}\".".format(str(callback)), + exc_info=True + ) + def save_credentials(self, username): """Save credentials to JSON file, env and widget""" if username is None: diff --git a/pype/user/widget_user.py b/pype/user/widget_user.py index 7ca12ec4d4..27faa857f5 100644 --- a/pype/user/widget_user.py +++ b/pype/user/widget_user.py @@ -77,7 +77,7 @@ class UserWidget(QtWidgets.QWidget): def click_save(self): # all what should happen - validations and saving into appsdir username = self.input_username.text() - self.module.save_credentials(username) + self.module.change_credentials(username) self._close_widget() def closeEvent(self, event): diff --git a/pype/vendor/ftrack_api_old/_version.py b/pype/vendor/ftrack_api_old/_version.py index 07f744ca5d..aa1a8c4aba 100644 --- a/pype/vendor/ftrack_api_old/_version.py +++ b/pype/vendor/ftrack_api_old/_version.py @@ -1 +1 @@ -__version__ = '1.3.3' +__version__ = '1.8.2' diff --git a/pype/vendor/ftrack_api_old/_weakref.py b/pype/vendor/ftrack_api_old/_weakref.py new file mode 100644 index 0000000000..69cc6f4b4f --- /dev/null +++ b/pype/vendor/ftrack_api_old/_weakref.py @@ -0,0 +1,66 @@ +""" +Yet another backport of WeakMethod for Python 2.7. +Changes include removing exception chaining and adding args to super() calls. + +Copyright (c) 2001-2019 Python Software Foundation.All rights reserved. + +Full license available in LICENSE.python. +""" +from weakref import ref + + +class WeakMethod(ref): + """ + A custom `weakref.ref` subclass which simulates a weak reference to + a bound method, working around the lifetime problem of bound methods. + """ + + __slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__" + + def __new__(cls, meth, callback=None): + try: + obj = meth.__self__ + func = meth.__func__ + except AttributeError: + raise TypeError( + "argument should be a bound method, not {}".format(type(meth)) + ) + + def _cb(arg): + # The self-weakref trick is needed to avoid creating a reference + # cycle. + self = self_wr() + if self._alive: + self._alive = False + if callback is not None: + callback(self) + + self = ref.__new__(cls, obj, _cb) + self._func_ref = ref(func, _cb) + self._meth_type = type(meth) + self._alive = True + self_wr = ref(self) + return self + + def __call__(self): + obj = super(WeakMethod, self).__call__() + func = self._func_ref() + if obj is None or func is None: + return None + return self._meth_type(func, obj) + + def __eq__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is other + return ref.__eq__(self, other) and self._func_ref == other._func_ref + return NotImplemented + + def __ne__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is not other + return ref.__ne__(self, other) or self._func_ref != other._func_ref + return NotImplemented + + __hash__ = ref.__hash__ diff --git a/pype/vendor/ftrack_api_old/attribute.py b/pype/vendor/ftrack_api_old/attribute.py index 66840bed66..47fd6c9616 100644 --- a/pype/vendor/ftrack_api_old/attribute.py +++ b/pype/vendor/ftrack_api_old/attribute.py @@ -148,7 +148,8 @@ class Attribute(object): '''A name and value pair persisted remotely.''' def __init__( - self, name, default_value=ftrack_api_old.symbol.NOT_SET, mutable=True + self, name, default_value=ftrack_api_old.symbol.NOT_SET, mutable=True, + computed=False ): '''Initialise attribute with *name*. @@ -161,10 +162,14 @@ class Attribute(object): are :attr:`ftrack_api_old.symbol.NOT_SET`. The exception to this is when the target value is also :attr:`ftrack_api_old.symbol.NOT_SET`. + If *computed* is set to True the value is a remote side computed value + and should not be long-term cached. + ''' super(Attribute, self).__init__() self._name = name self._mutable = mutable + self._computed = computed self.default_value = default_value self._local_key = 'local' @@ -205,6 +210,11 @@ class Attribute(object): '''Return whether attribute is mutable.''' return self._mutable + @property + def computed(self): + '''Return whether attribute is computed.''' + return self._computed + def get_value(self, entity): '''Return current value for *entity*. diff --git a/pype/vendor/ftrack_api_old/entity/factory.py b/pype/vendor/ftrack_api_old/entity/factory.py index 16721514bd..f47c92e563 100644 --- a/pype/vendor/ftrack_api_old/entity/factory.py +++ b/pype/vendor/ftrack_api_old/entity/factory.py @@ -49,9 +49,11 @@ class Factory(object): # Build attributes for class. attributes = ftrack_api_old.attribute.Attributes() - immutable = schema.get('immutable', []) + immutable_properties = schema.get('immutable', []) + computed_properties = schema.get('computed', []) for name, fragment in schema.get('properties', {}).items(): - mutable = name not in immutable + mutable = name not in immutable_properties + computed = name in computed_properties default = fragment.get('default', ftrack_api_old.symbol.NOT_SET) if default == '{uid}': @@ -62,7 +64,8 @@ class Factory(object): if data_type is not ftrack_api_old.symbol.NOT_SET: if data_type in ( - 'string', 'boolean', 'integer', 'number', 'variable' + 'string', 'boolean', 'integer', 'number', 'variable', + 'object' ): # Basic scalar attribute. if data_type == 'number': @@ -74,7 +77,7 @@ class Factory(object): data_type = 'datetime' attribute = self.create_scalar_attribute( - class_name, name, mutable, default, data_type + class_name, name, mutable, computed, default, data_type ) if attribute: attributes.add(attribute) @@ -139,11 +142,12 @@ class Factory(object): return cls def create_scalar_attribute( - self, class_name, name, mutable, default, data_type + self, class_name, name, mutable, computed, default, data_type ): '''Return appropriate scalar attribute instance.''' return ftrack_api_old.attribute.ScalarAttribute( - name, data_type=data_type, default_value=default, mutable=mutable + name, data_type=data_type, default_value=default, mutable=mutable, + computed=computed ) def create_reference_attribute(self, class_name, name, mutable, reference): diff --git a/pype/vendor/ftrack_api_old/entity/location.py b/pype/vendor/ftrack_api_old/entity/location.py index d48264abc2..8d9d52c654 100644 --- a/pype/vendor/ftrack_api_old/entity/location.py +++ b/pype/vendor/ftrack_api_old/entity/location.py @@ -526,7 +526,8 @@ class Location(ftrack_api_old.entity.base.Entity): for index, resource_identifier in enumerate(resource_identifiers): resource_identifiers[index] = ( self.resource_identifier_transformer.decode( - resource_identifier + resource_identifier, + context={'component': components[index]} ) ) diff --git a/pype/vendor/ftrack_api_old/entity/note.py b/pype/vendor/ftrack_api_old/entity/note.py index 4cacf6ac8a..c628886fd9 100644 --- a/pype/vendor/ftrack_api_old/entity/note.py +++ b/pype/vendor/ftrack_api_old/entity/note.py @@ -1,6 +1,8 @@ # :coding: utf-8 # :copyright: Copyright (c) 2015 ftrack +import warnings + import ftrack_api_old.entity.base @@ -33,26 +35,52 @@ class Note(ftrack_api_old.entity.base.Entity): class CreateNoteMixin(object): '''Mixin to add create_note method on entity class.''' - def create_note(self, content, author, recipients=None, category=None): + def create_note( + self, content, author, recipients=None, category=None, labels=None + ): '''Create note with *content*, *author*. - Note category can be set by including *category* and *recipients* - can be specified as a list of user or group instances. + NoteLabels can be set by including *labels*. + + Note category can be set by including *category*. + + *recipients* can be specified as a list of user or group instances. ''' + note_label_support = 'NoteLabel' in self.session.types + + if not labels: + labels = [] + + if labels and not note_label_support: + raise ValueError( + 'NoteLabel is not supported by the current server version.' + ) + + if category and labels: + raise ValueError( + 'Both category and labels cannot be set at the same time.' + ) + if not recipients: recipients = [] - category_id = None - if category: - category_id = category['id'] - data = { 'content': content, - 'author': author, - 'category_id': category_id + 'author': author } + if category: + if note_label_support: + labels = [category] + warnings.warn( + 'category argument will be removed in an upcoming version, ' + 'please use labels instead.', + PendingDeprecationWarning + ) + else: + data['category_id'] = category['id'] + note = self.session.create('Note', data) self['notes'].append(note) @@ -65,4 +93,13 @@ class CreateNoteMixin(object): note['recipients'].append(recipient) + for label in labels: + self.session.create( + 'NoteLabelLink', + { + 'label_id': label['id'], + 'note_id': note['id'] + } + ) + return note diff --git a/pype/vendor/ftrack_api_old/event/expression.py b/pype/vendor/ftrack_api_old/event/expression.py index e10cd85844..8de4be0d71 100644 --- a/pype/vendor/ftrack_api_old/event/expression.py +++ b/pype/vendor/ftrack_api_old/event/expression.py @@ -3,14 +3,15 @@ from operator import eq, ne, ge, le, gt, lt -from pyparsing import (ParserElement, Group, Word, CaselessKeyword, Forward, +from pyparsing import (Group, Word, CaselessKeyword, Forward, FollowedBy, Suppress, oneOf, OneOrMore, Optional, alphanums, quotedString, removeQuotes) import ftrack_api_old.exception -# Optimise parsing using packrat memoisation feature. -ParserElement.enablePackrat() +# Do not enable packrat since it is not thread-safe and will result in parsing +# exceptions in a multi threaded environment. +# ParserElement.enablePackrat() class Parser(object): diff --git a/pype/vendor/ftrack_api_old/event/hub.py b/pype/vendor/ftrack_api_old/event/hub.py index 25410aa1e1..3ffbd38056 100644 --- a/pype/vendor/ftrack_api_old/event/hub.py +++ b/pype/vendor/ftrack_api_old/event/hub.py @@ -14,6 +14,7 @@ import operator import functools import json import socket +import warnings import requests import requests.exceptions @@ -40,9 +41,20 @@ ServerDetails = collections.namedtuple('ServerDetails', [ ]) + + class EventHub(object): '''Manage routing of events.''' + _future_signature_warning = ( + 'When constructing your Session object you did not explicitly define ' + 'auto_connect_event_hub as True even though you appear to be publishing ' + 'and / or subscribing to asynchronous events. In version version 2.0 of ' + 'the ftrack-python-api the default behavior will change from True ' + 'to False. Please make sure to update your tools. You can read more at ' + 'http://ftrack-python-api.rtd.ftrack.com/en/stable/release/migration.html' + ) + def __init__(self, server_url, api_user, api_key): '''Initialise hub, connecting to ftrack *server_url*. @@ -76,6 +88,8 @@ class EventHub(object): self._auto_reconnect_attempts = 30 self._auto_reconnect_delay = 10 + self._deprecation_warning_auto_connect = False + # Mapping of Socket.IO codes to meaning. self._code_name_mapping = { '0': 'disconnect', @@ -134,6 +148,9 @@ class EventHub(object): connected or connection fails. ''' + + self._deprecation_warning_auto_connect = False + if self.connected: raise ftrack_api_old.exception.EventHubConnectionError( 'Already connected.' @@ -164,17 +181,26 @@ class EventHub(object): # https://docs.python.org/2/library/socket.html#socket.socket.setblocking self._connection = websocket.create_connection(url, timeout=60) - except Exception: + except Exception as error: + error_message = ( + 'Failed to connect to event server at {server_url} with ' + 'error: "{error}".' + ) + + error_details = { + 'error': unicode(error), + 'server_url': self.get_server_url() + } + self.logger.debug( L( - 'Error connecting to event server at {0}.', - self.get_server_url() + error_message, **error_details ), exc_info=1 ) raise ftrack_api_old.exception.EventHubConnectionError( - 'Failed to connect to event server at {0}.' - .format(self.get_server_url()) + error_message, + details=error_details ) # Start background processing thread. @@ -543,6 +569,11 @@ class EventHub(object): event will be caught by this method and ignored. ''' + if self._deprecation_warning_auto_connect and not synchronous: + warnings.warn( + self._future_signature_warning, FutureWarning + ) + try: return self._publish( event, synchronous=synchronous, on_reply=on_reply @@ -700,18 +731,23 @@ class EventHub(object): # Automatically publish a non None response as a reply when not in # synchronous mode. - if not synchronous and response is not None: - - try: - self.publish_reply( - event, data=response, source=subscriber.metadata + if not synchronous: + if self._deprecation_warning_auto_connect: + warnings.warn( + self._future_signature_warning, FutureWarning ) - except Exception: - self.logger.exception(L( - 'Error publishing response {0} from subscriber {1} ' - 'for event {2}.', response, subscriber, event - )) + if response is not None: + try: + self.publish_reply( + event, data=response, source=subscriber.metadata + ) + + except Exception: + self.logger.exception(L( + 'Error publishing response {0} from subscriber {1} ' + 'for event {2}.', response, subscriber, event + )) # Check whether to continue processing topic event. if event.is_stopped(): @@ -881,6 +917,7 @@ class EventHub(object): if code_name == 'connect': self.logger.debug('Connected to event server.') event = ftrack_api_old.event.base.Event('ftrack.meta.connected') + self._prepare_event(event) self._event_queue.put(event) elif code_name == 'disconnect': @@ -901,6 +938,7 @@ class EventHub(object): if not self.connected: event = ftrack_api_old.event.base.Event('ftrack.meta.disconnected') + self._prepare_event(event) self._event_queue.put(event) elif code_name == 'heartbeat': diff --git a/pype/vendor/ftrack_api_old/logging.py b/pype/vendor/ftrack_api_old/logging.py index 2b28ce900b..41969c5b2a 100644 --- a/pype/vendor/ftrack_api_old/logging.py +++ b/pype/vendor/ftrack_api_old/logging.py @@ -1,6 +1,23 @@ # :coding: utf-8 # :copyright: Copyright (c) 2016 ftrack +import functools +import warnings + + +def deprecation_warning(message): + def decorator(function): + @functools.wraps(function) + def wrapper(*args, **kwargs): + warnings.warn( + message, + PendingDeprecationWarning + ) + return function(*args, **kwargs) + return wrapper + + return decorator + class LazyLogMessage(object): '''A log message that can be evaluated lazily for improved performance. diff --git a/pype/vendor/ftrack_api_old/session.py b/pype/vendor/ftrack_api_old/session.py index c313203a0c..0986962ca4 100644 --- a/pype/vendor/ftrack_api_old/session.py +++ b/pype/vendor/ftrack_api_old/session.py @@ -16,6 +16,7 @@ import hashlib import tempfile import threading import atexit +import warnings import requests import requests.auth @@ -42,8 +43,14 @@ import ftrack_api_old.structure.origin import ftrack_api_old.structure.entity_id import ftrack_api_old.accessor.server import ftrack_api_old._centralized_storage_scenario +import ftrack_api_old.logging from ftrack_api_old.logging import LazyLogMessage as L +try: + from weakref import WeakMethod +except ImportError: + from ftrack_api_old._weakref import WeakMethod + class SessionAuthentication(requests.auth.AuthBase): '''Attach ftrack session authentication information to requests.''' @@ -69,7 +76,7 @@ class Session(object): def __init__( self, server_url=None, api_key=None, api_user=None, auto_populate=True, plugin_paths=None, cache=None, cache_key_maker=None, - auto_connect_event_hub=True, schema_cache_path=None, + auto_connect_event_hub=None, schema_cache_path=None, plugin_arguments=None ): '''Initialise session. @@ -233,7 +240,8 @@ class Session(object): self._api_key ) - if auto_connect_event_hub: + self._auto_connect_event_hub_thread = None + if auto_connect_event_hub in (None, True): # Connect to event hub in background thread so as not to block main # session usage waiting for event hub connection. self._auto_connect_event_hub_thread = threading.Thread( @@ -242,8 +250,14 @@ class Session(object): self._auto_connect_event_hub_thread.daemon = True self._auto_connect_event_hub_thread.start() + # To help with migration from auto_connect_event_hub default changing + # from True to False. + self._event_hub._deprecation_warning_auto_connect = ( + auto_connect_event_hub is None + ) + # Register to auto-close session on exit. - atexit.register(self.close) + atexit.register(WeakMethod(self.close)) self._plugin_paths = plugin_paths if self._plugin_paths is None: @@ -271,6 +285,15 @@ class Session(object): ftrack_api_old._centralized_storage_scenario.register(self) self._configure_locations() + self.event_hub.publish( + ftrack_api_old.event.base.Event( + topic='ftrack.api.session.ready', + data=dict( + session=self + ) + ), + synchronous=True + ) def __enter__(self): '''Return session as context manager.''' @@ -389,7 +412,8 @@ class Session(object): try: self.event_hub.disconnect() - self._auto_connect_event_hub_thread.join() + if self._auto_connect_event_hub_thread: + self._auto_connect_event_hub_thread.join() except ftrack_api_old.exception.EventHubConnectionError: pass @@ -428,6 +452,16 @@ class Session(object): # Re-configure certain session aspects that may be dependant on cache. self._configure_locations() + self.event_hub.publish( + ftrack_api_old.event.base.Event( + topic='ftrack.api.session.reset', + data=dict( + session=self + ) + ), + synchronous=True + ) + def auto_populating(self, auto_populate): '''Temporarily set auto populate to *auto_populate*. @@ -508,7 +542,7 @@ class Session(object): 'entity_key': entity.get('id') }) - result = self._call( + result = self.call( [payload] ) @@ -790,12 +824,13 @@ class Session(object): }] # TODO: When should this execute? How to handle background=True? - results = self._call(batch) + results = self.call(batch) # Merge entities into local cache and return merged entities. data = [] + merged = dict() for entity in results[0]['data']: - data.append(self.merge(entity)) + data.append(self._merge_recursive(entity, merged)) return data, results[0]['metadata'] @@ -856,6 +891,48 @@ class Session(object): else: return value + def _merge_recursive(self, entity, merged=None): + '''Merge *entity* and all its attributes recursivly.''' + log_debug = self.logger.isEnabledFor(logging.DEBUG) + + if merged is None: + merged = {} + + attached = self.merge(entity, merged) + + for attribute in entity.attributes: + # Remote attributes. + remote_value = attribute.get_remote_value(entity) + + if isinstance( + remote_value, + ( + ftrack_api_old.entity.base.Entity, + ftrack_api_old.collection.Collection, + ftrack_api_old.collection.MappedCollectionProxy + ) + ): + log_debug and self.logger.debug( + 'Merging remote value for attribute {0}.'.format(attribute) + ) + + if isinstance(remote_value, ftrack_api_old.entity.base.Entity): + self._merge_recursive(remote_value, merged=merged) + + elif isinstance( + remote_value, ftrack_api_old.collection.Collection + ): + for entry in remote_value: + self._merge_recursive(entry, merged=merged) + + elif isinstance( + remote_value, ftrack_api_old.collection.MappedCollectionProxy + ): + for entry in remote_value.collection: + self._merge_recursive(entry, merged=merged) + + return attached + def _merge_entity(self, entity, merged=None): '''Merge *entity* into session returning merged entity. @@ -1185,7 +1262,7 @@ class Session(object): # Process batch. if batch: - result = self._call(batch) + result = self.call(batch) # Clear recorded operations. self.recorded_operations.clear() @@ -1260,7 +1337,7 @@ class Session(object): def _fetch_server_information(self): '''Return server information.''' - result = self._call([{'action': 'query_server_information'}]) + result = self.call([{'action': 'query_server_information'}]) return result[0] def _discover_plugins(self, plugin_arguments=None): @@ -1362,7 +1439,7 @@ class Session(object): 'Loading schemas from server due to hash not matching.' 'Local: {0!r} != Server: {1!r}', local_schema_hash, server_hash )) - schemas = self._call([{'action': 'query_schemas'}])[0] + schemas = self.call([{'action': 'query_schemas'}])[0] if schema_cache_path: try: @@ -1525,8 +1602,24 @@ class Session(object): synchronous=True ) + @ftrack_api_old.logging.deprecation_warning( + 'Session._call is now available as public method Session.call. The ' + 'private method will be removed in version 2.0.' + ) def _call(self, data): - '''Make request to server with *data*.''' + '''Make request to server with *data* batch describing the actions. + + .. note:: + + This private method is now available as public method + :meth:`entity_reference`. This alias remains for backwards + compatibility, but will be removed in version 2.0. + + ''' + return self.call(data) + + def call(self, data): + '''Make request to server with *data* batch describing the actions.''' url = self._server_url + '/api' headers = { 'content-type': 'application/json', @@ -1553,7 +1646,7 @@ class Session(object): 'Server reported error in unexpected format. Raw error was: {0}' .format(response.text) ) - self.logger.error(error_message) + self.logger.exception(error_message) raise ftrack_api_old.exception.ServerError(error_message) else: @@ -1562,7 +1655,7 @@ class Session(object): error_message = 'Server reported error: {0}({1})'.format( result['exception'], result['content'] ) - self.logger.error(error_message) + self.logger.exception(error_message) raise ftrack_api_old.exception.ServerError(error_message) return result @@ -1620,12 +1713,12 @@ class Session(object): if "entity_data" in data: for key, value in data["entity_data"].items(): if isinstance(value, ftrack_api_old.entity.base.Entity): - data["entity_data"][key] = self._entity_reference(value) + data["entity_data"][key] = self.entity_reference(value) return data if isinstance(item, ftrack_api_old.entity.base.Entity): - data = self._entity_reference(item) + data = self.entity_reference(item) with self.auto_populating(True): @@ -1646,14 +1739,15 @@ class Session(object): value = attribute.get_local_value(item) elif entity_attribute_strategy == 'persisted_only': - value = attribute.get_remote_value(item) + if not attribute.computed: + value = attribute.get_remote_value(item) if value is not ftrack_api_old.symbol.NOT_SET: if isinstance( attribute, ftrack_api_old.attribute.ReferenceAttribute ): if isinstance(value, ftrack_api_old.entity.base.Entity): - value = self._entity_reference(value) + value = self.entity_reference(value) data[attribute.name] = value @@ -1668,14 +1762,14 @@ class Session(object): if isinstance(item, ftrack_api_old.collection.Collection): data = [] for entity in item: - data.append(self._entity_reference(entity)) + data.append(self.entity_reference(entity)) return data raise TypeError('{0!r} is not JSON serializable'.format(item)) - def _entity_reference(self, entity): - '''Return reference to *entity*. + def entity_reference(self, entity): + '''Return entity reference that uniquely identifies *entity*. Return a mapping containing the __entity_type__ of the entity along with the key, value pairs that make up it's primary key. @@ -1689,6 +1783,26 @@ class Session(object): return reference + @ftrack_api_old.logging.deprecation_warning( + 'Session._entity_reference is now available as public method ' + 'Session.entity_reference. The private method will be removed ' + 'in version 2.0.' + ) + def _entity_reference(self, entity): + '''Return entity reference that uniquely identifies *entity*. + + Return a mapping containing the __entity_type__ of the entity along + with the key, value pairs that make up it's primary key. + + .. note:: + + This private method is now available as public method + :meth:`entity_reference`. This alias remains for backwards + compatibility, but will be removed in version 2.0. + + ''' + return self.entity_reference(entity) + def decode(self, string): '''Return decoded JSON *string* as Python object.''' with self.operation_recording(False): @@ -2016,6 +2130,10 @@ class Session(object): return availabilities + @ftrack_api_old.logging.deprecation_warning( + 'Session.delayed_job has been deprecated in favour of session.call. ' + 'Please refer to the release notes for more information.' + ) def delayed_job(self, job_type): '''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned. @@ -2033,7 +2151,7 @@ class Session(object): } try: - result = self._call( + result = self.call( [operation] )[0] @@ -2070,7 +2188,7 @@ class Session(object): ) try: - result = self._call([operation]) + result = self.call([operation]) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2172,7 +2290,7 @@ class Session(object): } try: - result = self._call([operation]) + result = self.call([operation]) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2212,7 +2330,7 @@ class Session(object): } try: - result = self._call([operation]) + result = self.call([operation]) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2258,7 +2376,7 @@ class Session(object): ) try: - self._call(operations) + self.call(operations) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2306,7 +2424,7 @@ class Session(object): ) try: - self._call(operations) + self.call(operations) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. if 'Invalid action u\'send_review_session_invite\'' in error.message: diff --git a/pype/vendor/ftrack_api_old/symbol.py b/pype/vendor/ftrack_api_old/symbol.py index 10b3f55bd5..f46760f634 100644 --- a/pype/vendor/ftrack_api_old/symbol.py +++ b/pype/vendor/ftrack_api_old/symbol.py @@ -1,6 +1,8 @@ # :coding: utf-8 # :copyright: Copyright (c) 2014 ftrack +import os + class Symbol(object): '''A constant symbol.''' @@ -68,8 +70,8 @@ CONNECT_LOCATION_ID = '07b82a97-8cf9-11e3-9383-20c9d081909b' #: Identifier of builtin server location. SERVER_LOCATION_ID = '3a372bde-05bc-11e4-8908-20c9d081909b' -#: Chunk size used when working with data. -CHUNK_SIZE = 8192 +#: Chunk size used when working with data, default to 1Mb. +CHUNK_SIZE = int(os.getenv('FTRACK_API_FILE_CHUNK_SIZE', 0)) or 1024*1024 #: Symbol representing syncing users with ldap JOB_SYNC_USERS_LDAP = Symbol('SYNC_USERS_LDAP') diff --git a/pype/widgets/message_window.py b/pype/widgets/message_window.py index 72e655cf5c..3532d2df44 100644 --- a/pype/widgets/message_window.py +++ b/pype/widgets/message_window.py @@ -7,7 +7,7 @@ log = logging.getLogger(__name__) class Window(QtWidgets.QWidget): def __init__(self, parent, title, message, level): - super().__init__() + super(Window, self).__init__() self.parent = parent self.title = title self.message = message @@ -48,9 +48,10 @@ class Window(QtWidgets.QWidget): return -def message(title=None, message=None, level="info"): - global app - app = QtWidgets.QApplication(sys.argv) +def message(title=None, message=None, level="info", parent=None): + app = parent + if not app: + app = QtWidgets.QApplication(sys.argv) ex = Window(app, title, message, level) ex.show() # sys.exit(app.exec_()) diff --git a/res/icons/folder-favorite.png b/res/icons/folder-favorite.png new file mode 100644 index 0000000000..198b289e9e Binary files /dev/null and b/res/icons/folder-favorite.png differ diff --git a/res/icons/folder-favorite2.png b/res/icons/folder-favorite2.png new file mode 100644 index 0000000000..91bc3f0fbe Binary files /dev/null and b/res/icons/folder-favorite2.png differ diff --git a/res/icons/folder-favorite3.png b/res/icons/folder-favorite3.png new file mode 100644 index 0000000000..ce1e6d7171 Binary files /dev/null and b/res/icons/folder-favorite3.png differ diff --git a/schema/application-1.0.json b/schema/application-1.0.json new file mode 100644 index 0000000000..e2418037c6 --- /dev/null +++ b/schema/application-1.0.json @@ -0,0 +1,68 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:application-1.0", + "description": "An application definition.", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "label", + "application_dir", + "executable" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "label": { + "description": "Nice name of application.", + "type": "string" + }, + "application_dir": { + "description": "Name of directory used for application resources.", + "type": "string" + }, + "executable": { + "description": "Name of callable executable, this is called to launch the application", + "type": "string" + }, + "description": { + "description": "Description of application.", + "type": "string" + }, + "environment": { + "description": "Key/value pairs for environment variables related to this application. Supports lists for paths, such as PYTHONPATH.", + "type": "object", + "items": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + } + }, + "default_dirs": { + "type": "array", + "items": { + "type": "string" + } + }, + "copy": { + "type": "object", + "patternProperties": { + "^.*$": { + "anyOf": [ + {"type": "string"}, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + } +} diff --git a/schema/asset-1.0.json b/schema/asset-1.0.json new file mode 100644 index 0000000000..6f3665c628 --- /dev/null +++ b/schema/asset-1.0.json @@ -0,0 +1,35 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:asset-1.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "name", + "subsets" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "name": { + "description": "Name of directory", + "type": "string" + }, + "subsets": { + "type": "array", + "items": { + "$ref": "subset.json" + } + } + }, + + "definitions": {} +} \ No newline at end of file diff --git a/schema/asset-2.0.json b/schema/asset-2.0.json new file mode 100644 index 0000000000..066cb33498 --- /dev/null +++ b/schema/asset-2.0.json @@ -0,0 +1,55 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:asset-2.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "silo", + "data" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:asset-2.0"], + "example": "avalon-core:asset-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["asset"], + "example": "asset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of asset", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "Bruce" + }, + "silo": { + "description": "Group or container of asset", + "type": "string", + "example": "assets" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": {"key": "value"} + } + }, + + "definitions": {} +} diff --git a/schema/asset-3.0.json b/schema/asset-3.0.json new file mode 100644 index 0000000000..a3a22e917b --- /dev/null +++ b/schema/asset-3.0.json @@ -0,0 +1,55 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:asset-3.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:asset-3.0", "pype:asset-3.0"], + "example": "avalon-core:asset-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["asset"], + "example": "asset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of asset", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "Bruce" + }, + "silo": { + "description": "Group or container of asset", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "assets" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": {"key": "value"} + } + }, + + "definitions": {} +} diff --git a/schema/config-1.0.json b/schema/config-1.0.json new file mode 100644 index 0000000000..b3c4362f41 --- /dev/null +++ b/schema/config-1.0.json @@ -0,0 +1,86 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:config-1.0", + "description": "A project configuration.", + + "type": "object", + + "additionalProperties": false, + "required": [ + "template", + "tasks", + "apps" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "template": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^.*$": { + "type": "string" + } + } + }, + "tasks": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "group": {"type": "string"}, + "label": {"type": "string"} + }, + "required": ["name"] + } + }, + "apps": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "group": {"type": "string"}, + "label": {"type": "string"} + }, + "required": ["name"] + } + }, + "families": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "label": {"type": "string"}, + "hideFilter": {"type": "boolean"} + }, + "required": ["name"] + } + }, + "groups": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "color": {"type": "string"}, + "order": {"type": ["integer", "number"]} + }, + "required": ["name"] + } + }, + "copy": { + "type": "object" + } + } +} diff --git a/schema/container-1.0.json b/schema/container-1.0.json new file mode 100644 index 0000000000..d9e4e39f7f --- /dev/null +++ b/schema/container-1.0.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:container-1.0", + "description": "A loaded asset", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "id", + "objectName", + "name", + "author", + "loader", + "families", + "time", + "subset", + "asset", + "representation", + "version", + "silo", + "path", + "source" + ], + "properties": { + "id": { + "description": "Identifier for finding object in host", + "type": "string", + "enum": ["pyblish.mindbender.container"], + "example": "pyblish.mindbender.container" + }, + "objectName": { + "description": "Name of internal object, such as the objectSet in Maya.", + "type": "string", + "example": "Bruce_:rigDefault_CON" + }, + "name": { + "description": "Full name of application object", + "type": "string", + "example": "modelDefault" + }, + "author": { + "description": "Name of the author of the published version", + "type": "string", + "example": "Marcus Ottosson" + }, + "loader": { + "description": "Name of loader plug-in used to produce this container", + "type": "string", + "example": "ModelLoader" + }, + "families": { + "description": "Families associated with the this subset", + "type": "string", + "example": "mindbender.model" + }, + "time": { + "description": "File-system safe, formatted time", + "type": "string", + "example": "20170329T131545Z" + }, + "subset": { + "description": "Name of source subset", + "type": "string", + "example": "modelDefault" + }, + "asset": { + "description": "Name of source asset", + "type": "string" , + "example": "Bruce" + }, + "representation": { + "description": "Name of source representation", + "type": "string" , + "example": ".ma" + }, + "version": { + "description": "Version number", + "type": "number", + "example": 12 + }, + "silo": { + "description": "Silo of parent asset", + "type": "string", + "example": "assets" + }, + "path": { + "description": "Absolute path on disk", + "type": "string", + "example": "{root}/assets/Bruce/publish/rigDefault/v002" + }, + "source": { + "description": "Absolute path to file from which this version was published", + "type": "string", + "example": "{root}/assets/Bruce/work/rigging/maya/scenes/rig_v001.ma" + } + } +} diff --git a/schema/container-2.0.json b/schema/container-2.0.json new file mode 100644 index 0000000000..7b84209ea0 --- /dev/null +++ b/schema/container-2.0.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:container-2.0", + "description": "A loaded asset", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "id", + "objectName", + "name", + "namespace", + "loader", + "representation" + ], + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:container-2.0", "pype:container-2.0"], + "example": "pype:container-2.0" + }, + "id": { + "description": "Identifier for finding object in host", + "type": "string", + "enum": ["pyblish.avalon.container"], + "example": "pyblish.avalon.container" + }, + "objectName": { + "description": "Name of internal object, such as the objectSet in Maya.", + "type": "string", + "example": "Bruce_:rigDefault_CON" + }, + "loader": { + "description": "Name of loader plug-in used to produce this container", + "type": "string", + "example": "ModelLoader" + }, + "name": { + "description": "Internal object name of container in application", + "type": "string", + "example": "modelDefault_01" + }, + "namespace": { + "description": "Internal namespace of container in application", + "type": "string", + "example": "Bruce_" + }, + "representation": { + "description": "Unique id of representation in database", + "type": "string", + "example": "59523f355f8c1b5f6c5e8348" + } + } +} \ No newline at end of file diff --git a/schema/inventory-1.0.json b/schema/inventory-1.0.json new file mode 100644 index 0000000000..888ba7945a --- /dev/null +++ b/schema/inventory-1.0.json @@ -0,0 +1,10 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:config-1.0", + "description": "A project configuration.", + + "type": "object", + + "additionalProperties": true +} diff --git a/schema/project-2.0.json b/schema/project-2.0.json new file mode 100644 index 0000000000..ad0e460f4d --- /dev/null +++ b/schema/project-2.0.json @@ -0,0 +1,86 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:project-2.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "data", + "config" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:project-2.0", "pype:project-2.0"], + "example": "avalon-core:project-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["project"], + "example": "project" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "hulk" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": { + "fps": 24, + "width": 1920, + "height": 1080 + } + }, + "config": { + "type": "object", + "description": "Document metadata", + "example": { + "schema": "pype:config-1.0", + "apps": [ + { + "name": "maya2016", + "label": "Autodesk Maya 2016" + }, + { + "name": "nuke10", + "label": "The Foundry Nuke 10.0" + } + ], + "tasks": [ + {"name": "model"}, + {"name": "render"}, + {"name": "animate"}, + {"name": "rig"}, + {"name": "lookdev"}, + {"name": "layout"} + ], + "template": { + "work": + "{root}/{project}/{silo}/{asset}/work/{task}/{app}", + "publish": + "{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/{subset}.{representation}" + } + }, + "$ref": "config-1.0.json" + } + }, + + "definitions": {} +} diff --git a/schema/representation-1.0.json b/schema/representation-1.0.json new file mode 100644 index 0000000000..10ae72928e --- /dev/null +++ b/schema/representation-1.0.json @@ -0,0 +1,28 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:representation-1.0", + "description": "The inverse of an instance", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "format", + "path" + ], + + "properties": { + "schema": {"type": "string"}, + "format": { + "description": "File extension, including '.'", + "type": "string" + }, + "path": { + "description": "Unformatted path to version.", + "type": "string" + } + } +} diff --git a/schema/representation-2.0.json b/schema/representation-2.0.json new file mode 100644 index 0000000000..e12dea8564 --- /dev/null +++ b/schema/representation-2.0.json @@ -0,0 +1,78 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:representation-2.0", + "description": "The inverse of an instance", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:representation-2.0", "pype:representation-2.0"], + "example": "pype:representation-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["representation"], + "example": "representation" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of representation", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "abc" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": { + "label": "Alembic" + } + }, + "dependencies": { + "description": "Other representation that this representation depends on", + "type": "array", + "items": {"type": "string"}, + "example": [ + "592d547a5f8c1b388093c145" + ] + }, + "context": { + "description": "Summary of the context to which this representation belong.", + "type": "object", + "properties": { + "project": {"type": "object"}, + "asset": {"type": "string"}, + "silo": {"type": ["string", "null"]}, + "subset": {"type": "string"}, + "version": {"type": "number"}, + "representation": {"type": "string"} + }, + "example": { + "project": "hulk", + "asset": "Bruce", + "silo": "assets", + "subset": "rigDefault", + "version": 12, + "representation": "ma" + } + } + } +} diff --git a/schema/session-1.0.json b/schema/session-1.0.json new file mode 100644 index 0000000000..2b201f9c61 --- /dev/null +++ b/schema/session-1.0.json @@ -0,0 +1,143 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:session-1.0", + "description": "The Avalon environment", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "AVALON_PROJECTS", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_SILO", + "AVALON_CONFIG" + ], + + "properties": { + "AVALON_PROJECTS": { + "description": "Absolute path to root of project directories", + "type": "string", + "example": "/nas/projects" + }, + "AVALON_PROJECT": { + "description": "Name of project", + "type": "string", + "pattern": "^\\w*$", + "example": "Hulk" + }, + "AVALON_ASSET": { + "description": "Name of asset", + "type": "string", + "pattern": "^\\w*$", + "example": "Bruce" + }, + "AVALON_SILO": { + "description": "Name of asset group or container", + "type": "string", + "pattern": "^\\w*$", + "example": "assets" + }, + "AVALON_TASK": { + "description": "Name of task", + "type": "string", + "pattern": "^\\w*$", + "example": "modeling" + }, + "AVALON_CONFIG": { + "description": "Name of Avalon configuration", + "type": "string", + "pattern": "^\\w*$", + "example": "polly" + }, + "AVALON_APP": { + "description": "Name of application", + "type": "string", + "pattern": "^\\w*$", + "example": "maya2016" + }, + "AVALON_MONGO": { + "description": "Address to the asset database", + "type": "string", + "pattern": "^mongodb://[\\w/@:.]*$", + "example": "mongodb://localhost:27017", + "default": "mongodb://localhost:27017" + }, + "AVALON_DB": { + "description": "Name of database", + "type": "string", + "pattern": "^\\w*$", + "example": "avalon", + "default": "avalon" + }, + "AVALON_LABEL": { + "description": "Nice name of Avalon, used in e.g. graphical user interfaces", + "type": "string", + "example": "Mindbender", + "default": "Avalon" + }, + "AVALON_SENTRY": { + "description": "Address to Sentry", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "https://5b872b280de742919b115bdc8da076a5:8d278266fe764361b8fa6024af004a9c@logs.mindbender.com/2", + "default": null + }, + "AVALON_DEADLINE": { + "description": "Address to Deadline", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "http://192.168.99.101", + "default": null + }, + "AVALON_TIMEOUT": { + "description": "Wherever there is a need for a timeout, this is the default value.", + "type": "string", + "pattern": "^[0-9]*$", + "default": "1000", + "example": "1000" + }, + "AVALON_UPLOAD": { + "description": "Boolean of whether to upload published material to central asset repository", + "type": "string", + "default": null, + "example": "True" + }, + "AVALON_USERNAME": { + "description": "Generic username", + "type": "string", + "pattern": "^\\w*$", + "default": "avalon", + "example": "myself" + }, + "AVALON_PASSWORD": { + "description": "Generic password", + "type": "string", + "pattern": "^\\w*$", + "default": "secret", + "example": "abc123" + }, + "AVALON_INSTANCE_ID": { + "description": "Unique identifier for instances in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.instance", + "example": "avalon.instance" + }, + "AVALON_CONTAINER_ID": { + "description": "Unique identifier for a loaded representation in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.container", + "example": "avalon.container" + }, + "AVALON_DEBUG": { + "description": "Enable debugging mode. Some applications may use this for e.g. extended verbosity or mock plug-ins.", + "type": "string", + "default": null, + "example": "True" + } + } +} \ No newline at end of file diff --git a/schema/session-2.0.json b/schema/session-2.0.json new file mode 100644 index 0000000000..006a9e2dbf --- /dev/null +++ b/schema/session-2.0.json @@ -0,0 +1,142 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:session-2.0", + "description": "The Avalon environment", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "AVALON_PROJECTS", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_CONFIG" + ], + + "properties": { + "AVALON_PROJECTS": { + "description": "Absolute path to root of project directories", + "type": "string", + "example": "/nas/projects" + }, + "AVALON_PROJECT": { + "description": "Name of project", + "type": "string", + "pattern": "^\\w*$", + "example": "Hulk" + }, + "AVALON_ASSET": { + "description": "Name of asset", + "type": "string", + "pattern": "^\\w*$", + "example": "Bruce" + }, + "AVALON_SILO": { + "description": "Name of asset group or container", + "type": "string", + "pattern": "^\\w*$", + "example": "assets" + }, + "AVALON_TASK": { + "description": "Name of task", + "type": "string", + "pattern": "^\\w*$", + "example": "modeling" + }, + "AVALON_CONFIG": { + "description": "Name of Avalon configuration", + "type": "string", + "pattern": "^\\w*$", + "example": "polly" + }, + "AVALON_APP": { + "description": "Name of application", + "type": "string", + "pattern": "^\\w*$", + "example": "maya2016" + }, + "AVALON_MONGO": { + "description": "Address to the asset database", + "type": "string", + "pattern": "^mongodb://[\\w/@:.]*$", + "example": "mongodb://localhost:27017", + "default": "mongodb://localhost:27017" + }, + "AVALON_DB": { + "description": "Name of database", + "type": "string", + "pattern": "^\\w*$", + "example": "avalon", + "default": "avalon" + }, + "AVALON_LABEL": { + "description": "Nice name of Avalon, used in e.g. graphical user interfaces", + "type": "string", + "example": "Mindbender", + "default": "Avalon" + }, + "AVALON_SENTRY": { + "description": "Address to Sentry", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "https://5b872b280de742919b115bdc8da076a5:8d278266fe764361b8fa6024af004a9c@logs.mindbender.com/2", + "default": null + }, + "AVALON_DEADLINE": { + "description": "Address to Deadline", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "http://192.168.99.101", + "default": null + }, + "AVALON_TIMEOUT": { + "description": "Wherever there is a need for a timeout, this is the default value.", + "type": "string", + "pattern": "^[0-9]*$", + "default": "1000", + "example": "1000" + }, + "AVALON_UPLOAD": { + "description": "Boolean of whether to upload published material to central asset repository", + "type": "string", + "default": null, + "example": "True" + }, + "AVALON_USERNAME": { + "description": "Generic username", + "type": "string", + "pattern": "^\\w*$", + "default": "avalon", + "example": "myself" + }, + "AVALON_PASSWORD": { + "description": "Generic password", + "type": "string", + "pattern": "^\\w*$", + "default": "secret", + "example": "abc123" + }, + "AVALON_INSTANCE_ID": { + "description": "Unique identifier for instances in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.instance", + "example": "avalon.instance" + }, + "AVALON_CONTAINER_ID": { + "description": "Unique identifier for a loaded representation in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.container", + "example": "avalon.container" + }, + "AVALON_DEBUG": { + "description": "Enable debugging mode. Some applications may use this for e.g. extended verbosity or mock plug-ins.", + "type": "string", + "default": null, + "example": "True" + } + } +} diff --git a/schema/shaders-1.0.json b/schema/shaders-1.0.json new file mode 100644 index 0000000000..e66cc735e8 --- /dev/null +++ b/schema/shaders-1.0.json @@ -0,0 +1,32 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:shaders-1.0", + "description": "Relationships between shaders and Avalon IDs", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "shader" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "shader": { + "description": "Name of directory", + "type": "array", + "items": { + "type": "str", + "description": "Avalon ID and optional face indexes, e.g. 'f9520572-ac1d-11e6-b39e-3085a99791c9.f[5002:5185]'" + } + } + }, + + "definitions": {} +} diff --git a/schema/subset-1.0.json b/schema/subset-1.0.json new file mode 100644 index 0000000000..90ae0349fa --- /dev/null +++ b/schema/subset-1.0.json @@ -0,0 +1,35 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:subset-1.0", + "description": "A container of instances", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "name", + "versions" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "name": { + "description": "Name of directory", + "type": "string" + }, + "versions": { + "type": "array", + "items": { + "$ref": "version.json" + } + } + }, + + "definitions": {} +} \ No newline at end of file diff --git a/schema/subset-2.0.json b/schema/subset-2.0.json new file mode 100644 index 0000000000..98f39c4f3e --- /dev/null +++ b/schema/subset-2.0.json @@ -0,0 +1,51 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:subset-2.0", + "description": "A container of instances", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["pype:subset-2.0"], + "example": "pype:subset-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["subset"], + "example": "subset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "shot01" + }, + "data": { + "type": "object", + "description": "Document metadata", + "example": { + "frameStart": 1000, + "frameEnd": 1201 + } + } + } +} diff --git a/schema/subset-3.0.json b/schema/subset-3.0.json new file mode 100644 index 0000000000..a0af9d340f --- /dev/null +++ b/schema/subset-3.0.json @@ -0,0 +1,62 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:subset-3.0", + "description": "A container of instances", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["avalon-core:subset-3.0", "pype:subset-3.0"], + "example": "pype:subset-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["subset"], + "example": "subset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "shot01" + }, + "data": { + "description": "Document metadata", + "type": "object", + "required": ["families"], + "properties": { + "families": { + "type": "array", + "items": {"type": "string"}, + "description": "One or more families associated with this subset" + } + }, + "example": { + "families" : [ + "avalon.camera" + ], + "frameStart": 1000, + "frameEnd": 1201 + } + } + } +} diff --git a/schema/thumbnail-1.0.json b/schema/thumbnail-1.0.json new file mode 100644 index 0000000000..96b540ab7e --- /dev/null +++ b/schema/thumbnail-1.0.json @@ -0,0 +1,42 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:thumbnail-1.0", + "description": "Entity with thumbnail data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["pype:thumbnail-1.0"], + "example": "pype:thumbnail-1.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["thumbnail"], + "example": "thumbnail" + }, + "data": { + "description": "Thumbnail data", + "type": "object", + "example": { + "binary_data": "Binary({byte data of image})", + "template": "{thumbnail_root}/{project[name]}/{_id}{ext}}", + "template_data": { + "ext": ".jpg" + } + } + } + } +} diff --git a/schema/version-1.0.json b/schema/version-1.0.json new file mode 100644 index 0000000000..c784a25175 --- /dev/null +++ b/schema/version-1.0.json @@ -0,0 +1,50 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:version-1.0", + "description": "An individual version", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "version", + "path", + "time", + "author", + "source", + "representations" + ], + + "properties": { + "schema": {"type": "string"}, + "representations": { + "type": "array", + "items": { + "$ref": "representation.json" + } + }, + "time": { + "description": "ISO formatted, file-system compatible time", + "type": "string" + }, + "author": { + "description": "User logged on to the machine at time of publish", + "type": "string" + }, + "version": { + "description": "Number of this version", + "type": "number" + }, + "path": { + "description": "Unformatted path, e.g. '{root}/assets/Bruce/publish/lookdevDefault/v001", + "type": "string" + }, + "source": { + "description": "Original file from which this version was made.", + "type": "string" + } + } +} diff --git a/schema/version-2.0.json b/schema/version-2.0.json new file mode 100644 index 0000000000..5bb4a56f96 --- /dev/null +++ b/schema/version-2.0.json @@ -0,0 +1,92 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:version-2.0", + "description": "An individual version", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["pype:version-2.0"], + "example": "pype:version-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["version"], + "example": "version" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Number of version", + "type": "number", + "example": 12 + }, + "locations": { + "description": "Where on the planet this version can be found.", + "type": "array", + "items": {"type": "string"}, + "example": ["data.avalon.com"] + }, + "data": { + "description": "Document metadata", + "type": "object", + "required": ["families", "author", "source", "time"], + "properties": { + "time": { + "description": "ISO formatted, file-system compatible time", + "type": "string" + }, + "timeFormat": { + "description": "ISO format of time", + "type": "string" + }, + "author": { + "description": "User logged on to the machine at time of publish", + "type": "string" + }, + "version": { + "description": "Number of this version", + "type": "number" + }, + "path": { + "description": "Unformatted path, e.g. '{root}/assets/Bruce/publish/lookdevDefault/v001", + "type": "string" + }, + "source": { + "description": "Original file from which this version was made.", + "type": "string" + }, + "families": { + "type": "array", + "items": {"type": "string"}, + "description": "One or more families associated with this version" + } + }, + "example": { + "source" : "{root}/f02_prod/assets/BubbleWitch/work/modeling/marcus/maya/scenes/model_v001.ma", + "author" : "marcus", + "families" : [ + "avalon.model" + ], + "time" : "20170510T090203Z" + } + } + } +} diff --git a/schema/version-3.0.json b/schema/version-3.0.json new file mode 100644 index 0000000000..808650da0d --- /dev/null +++ b/schema/version-3.0.json @@ -0,0 +1,84 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:version-3.0", + "description": "An individual version", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["avalon-core:version-3.0", "pype:version-3.0"], + "example": "pype:version-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["version"], + "example": "version" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Number of version", + "type": "number", + "example": 12 + }, + "locations": { + "description": "Where on the planet this version can be found.", + "type": "array", + "items": {"type": "string"}, + "example": ["data.avalon.com"] + }, + "data": { + "description": "Document metadata", + "type": "object", + "required": ["author", "source", "time"], + "properties": { + "time": { + "description": "ISO formatted, file-system compatible time", + "type": "string" + }, + "timeFormat": { + "description": "ISO format of time", + "type": "string" + }, + "author": { + "description": "User logged on to the machine at time of publish", + "type": "string" + }, + "version": { + "description": "Number of this version", + "type": "number" + }, + "path": { + "description": "Unformatted path, e.g. '{root}/assets/Bruce/publish/lookdevDefault/v001", + "type": "string" + }, + "source": { + "description": "Original file from which this version was made.", + "type": "string" + } + }, + "example": { + "source" : "{root}/f02_prod/assets/BubbleWitch/work/modeling/marcus/maya/scenes/model_v001.ma", + "author" : "marcus", + "time" : "20170510T090203Z" + } + } + } +} diff --git a/setup/maya/userSetup.py b/setup/maya/userSetup.py index b419e9d27e..4f4aed36b7 100644 --- a/setup/maya/userSetup.py +++ b/setup/maya/userSetup.py @@ -14,12 +14,15 @@ shelf_preset = presets['maya'].get('project_shelf') if shelf_preset: project = os.environ["AVALON_PROJECT"] + icon_path = os.path.join(os.environ['PYPE_PROJECT_SCRIPTS'], project,"icons") + icon_path = os.path.abspath(icon_path) + for i in shelf_preset['imports']: import_string = "from {} import {}".format(project, i) print(import_string) exec(import_string) -cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], preset=shelf_preset)") +cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)") print("finished PYPE usersetup") diff --git a/setup/nuke/nuke_path/atom_server.py b/setup/nuke/nuke_path/atom_server.py deleted file mode 100644 index 1742c290c1..0000000000 --- a/setup/nuke/nuke_path/atom_server.py +++ /dev/null @@ -1,54 +0,0 @@ -''' - Simple socket server using threads -''' - -import socket -import sys -import threading -import StringIO -import contextlib - -import nuke - -HOST = '' -PORT = 8888 - - -@contextlib.contextmanager -def stdoutIO(stdout=None): - old = sys.stdout - if stdout is None: - stdout = StringIO.StringIO() - sys.stdout = stdout - yield stdout - sys.stdout = old - - -def _exec(data): - with stdoutIO() as s: - exec(data) - return s.getvalue() - - -def server_start(): - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.bind((HOST, PORT)) - s.listen(5) - - while 1: - client, address = s.accept() - try: - data = client.recv(4096) - if data: - result = nuke.executeInMainThreadWithResult(_exec, args=(data)) - client.send(str(result)) - except SystemExit: - result = self.encode('SERVER: Shutting down...') - client.send(str(result)) - raise - finally: - client.close() - -t = threading.Thread(None, server_start) -t.setDaemon(True) -t.start() diff --git a/setup/nuke/nuke_path/menu.py b/setup/nuke/nuke_path/menu.py index 7f5de6013d..15702fa364 100644 --- a/setup/nuke/nuke_path/menu.py +++ b/setup/nuke/nuke_path/menu.py @@ -1,6 +1,5 @@ import os import sys -import atom_server import KnobScripter from pype.nuke.lib import ( diff --git a/setup/nukestudio/hiero_plugin_path/Python/Startup/project_helpers.py b/setup/nukestudio/hiero_plugin_path/Python/Startup/project_helpers.py new file mode 100644 index 0000000000..7e274bd0a3 --- /dev/null +++ b/setup/nukestudio/hiero_plugin_path/Python/Startup/project_helpers.py @@ -0,0 +1,235 @@ +try: + from PySide.QtGui import * + from PySide.QtCore import * +except: + from PySide2.QtGui import * + from PySide2.QtWidgets import * + from PySide2.QtCore import * + +from hiero.core.util import uniquify, version_get, version_set +import hiero.core +import hiero.ui +import nuke + +# A globally variable for storing the current Project +gTrackedActiveProject = None + +# This selection handler will track changes in items selected/deselected in the Bin/Timeline/Spreadsheet Views + + +def __trackActiveProjectHandler(event): + global gTrackedActiveProject + selection = event.sender.selection() + binSelection = selection + if len(binSelection) > 0 and hasattr(binSelection[0], 'project'): + proj = binSelection[0].project() + + # We only store this if its a valid, active User Project + if proj in hiero.core.projects(hiero.core.Project.kUserProjects): + gTrackedActiveProject = proj + + +hiero.core.events.registerInterest( + 'kSelectionChanged/kBin', __trackActiveProjectHandler) +hiero.core.events.registerInterest( + 'kSelectionChanged/kTimeline', __trackActiveProjectHandler) +hiero.core.events.registerInterest( + 'kSelectionChanged/Spreadsheet', __trackActiveProjectHandler) + + +def activeProject(): + """hiero.ui.activeProject() -> returns the current Project + + Note: There is not technically a notion of a 'active' Project in Hiero/NukeStudio, as it is a multi-project App. + This method determines what is 'active' by going down the following rules... + + # 1 - If the current Viewer (hiero.ui.currentViewer) contains a Clip or Sequence, this item is assumed to give the active Project + # 2 - If nothing is currently in the Viewer, look to the active View, determine project from active selection + # 3 - If no current selection can be determined, fall back to a globally tracked last selection from trackActiveProjectHandler + # 4 - If all those rules fail, fall back to the last project in the list of hiero.core.projects() + + @return: hiero.core.Project""" + global gTrackedActiveProject + activeProject = None + + # Case 1 : Look for what the current Viewr tells us - this might not be what we want, and relies on hiero.ui.currentViewer() being robust. + cv = hiero.ui.currentViewer().player().sequence() + if hasattr(cv, 'project'): + activeProject = cv.project() + else: + # Case 2: We can't determine a project from the current Viewer, so try seeing what's selected in the activeView + # Note that currently, if you run activeProject from the Script Editor, the activeView is always None, so this will rarely get used! + activeView = hiero.ui.activeView() + if activeView: + # We can determine an active View.. see what's being worked with + selection = activeView.selection() + + # Handle the case where nothing is selected in the active view + if len(selection) == 0: + # It's possible that there is no selection in a Timeline/Spreadsheet, but these views have 'sequence' method, so try that... + if isinstance(hiero.ui.activeView(), (hiero.ui.TimelineEditor, hiero.ui.SpreadsheetView)): + activeSequence = activeView.sequence() + if hasattr(currentItem, 'project'): + activeProject = activeSequence.project() + + # The active view has a selection... assume that the first item in the selection has the active Project + else: + currentItem = selection[0] + if hasattr(currentItem, 'project'): + activeProject = currentItem.project() + + # Finally, Cases 3 and 4... + if not activeProject: + activeProjects = hiero.core.projects(hiero.core.Project.kUserProjects) + if gTrackedActiveProject in activeProjects: + activeProject = gTrackedActiveProject + else: + activeProject = activeProjects[-1] + + return activeProject + +# Method to get all recent projects + + +def recentProjects(): + """hiero.core.recentProjects() -> Returns a list of paths to recently opened projects + + Hiero stores up to 5 recent projects in uistate.ini with the [recentFile]/# key. + + @return: list of paths to .hrox Projects""" + + appSettings = hiero.core.ApplicationSettings() + recentProjects = [] + for i in range(0, 5): + proj = appSettings.value('recentFile/%i' % i) + if len(proj) > 0: + recentProjects.append(proj) + return recentProjects + +# Method to get recent project by index + + +def recentProject(k=0): + """hiero.core.recentProject(k) -> Returns the recent project path, specified by integer k (0-4) + + @param: k (optional, default = 0) - an integer from 0-4, relating to the index of recent projects. + + @return: hiero.core.Project""" + + appSettings = hiero.core.ApplicationSettings() + proj = appSettings.value('recentFile/%i' % int(k), None) + return proj + +# Method to get open project by index + + +def openRecentProject(k=0): + """hiero.core.openRecentProject(k) -> Opens the most the recent project as listed in the Open Recent list. + + @param: k (optional, default = 0) - an integer from 0-4, relating to the index of recent projects. + @return: hiero.core.Project""" + + appSettings = hiero.core.ApplicationSettings() + proj = appSettings.value('recentFile/%i' % int(k), None) + proj = hiero.core.openProject(proj) + return proj + + +# Duck punch these methods into the relevant ui/core namespaces +hiero.ui.activeProject = activeProject +hiero.core.recentProjects = recentProjects +hiero.core.recentProject = recentProject +hiero.core.openRecentProject = openRecentProject + + +# Method to Save a new Version of the activeHrox Project +class SaveAllProjects(QAction): + + def __init__(self): + QAction.__init__(self, "Save All Projects", None) + self.triggered.connect(self.projectSaveAll) + hiero.core.events.registerInterest( + "kShowContextMenu/kBin", self.eventHandler) + + def projectSaveAll(self): + allProjects = hiero.core.projects() + for proj in allProjects: + try: + proj.save() + print 'Saved Project: %s to: %s ' % (proj.name(), proj.path()) + except: + print 'Unable to save Project: %s to: %s. Check file permissions.' % (proj.name(), proj.path()) + + def eventHandler(self, event): + event.menu.addAction(self) + +# For projects with v# in the path name, saves out a new Project with v#+1 + + +class SaveNewProjectVersion(QAction): + + def __init__(self): + QAction.__init__(self, "Save New Version...", None) + self.triggered.connect(self.saveNewVersion) + hiero.core.events.registerInterest( + "kShowContextMenu/kBin", self.eventHandler) + self.selectedProjects = [] + + def saveNewVersion(self): + if len(self.selectedProjects) > 0: + projects = self.selectedProjects + else: + projects = [hiero.ui.activeProject()] + + if len(projects) < 1: + return + + for proj in projects: + oldName = proj.name() + path = proj.path() + v = None + prefix = None + try: + (prefix, v) = version_get(path, 'v') + except ValueError, msg: + print msg + + if (prefix is not None) and (v is not None): + v = int(v) + newPath = version_set(path, prefix, v, v + 1) + try: + proj.saveAs(newPath) + print 'Saved new project version: %s to: %s ' % (oldName, newPath) + except: + print 'Unable to save Project: %s. Check file permissions.' % (oldName) + else: + newPath = path.replace(".hrox", "_v01.hrox") + answer = nuke.ask( + '%s does not contain a version number.\nDo you want to save as %s?' % (proj, newPath)) + if answer: + try: + proj.saveAs(newPath) + print 'Saved new project version: %s to: %s ' % (oldName, newPath) + except: + print 'Unable to save Project: %s. Check file permissions.' % (oldName) + + def eventHandler(self, event): + self.selectedProjects = [] + if hasattr(event.sender, 'selection') and event.sender.selection() is not None and len(event.sender.selection()) != 0: + selection = event.sender.selection() + self.selectedProjects = uniquify( + [item.project() for item in selection]) + event.menu.addAction(self) + + +# Instantiate the actions +saveAllAct = SaveAllProjects() +saveNewAct = SaveNewProjectVersion() + +fileMenu = hiero.ui.findMenuAction("foundry.menu.file") +importAct = hiero.ui.findMenuAction("foundry.project.importFiles") +hiero.ui.insertMenuAction(saveNewAct, fileMenu.menu(), + before="Import File(s)...") +hiero.ui.insertMenuAction(saveAllAct, fileMenu.menu(), + before="Import File(s)...") +fileMenu.menu().insertSeparator(importAct)