diff --git a/pype/blender/__init__.py b/pype/blender/__init__.py new file mode 100644 index 0000000000..8a29917e40 --- /dev/null +++ b/pype/blender/__init__.py @@ -0,0 +1,34 @@ +import logging +from pathlib import Path +import os + +import bpy + +from avalon import api as avalon +from pyblish import api as pyblish + +from .plugin import AssetLoader + +logger = logging.getLogger("pype.blender") + +PARENT_DIR = os.path.dirname(__file__) +PACKAGE_DIR = os.path.dirname(PARENT_DIR) +PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") + +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "blender", "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "blender", "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "blender", "create") + + +def install(): + """Install Blender configuration for Avalon.""" + pyblish.register_plugin_path(str(PUBLISH_PATH)) + avalon.register_plugin_path(avalon.Loader, str(LOAD_PATH)) + avalon.register_plugin_path(avalon.Creator, str(CREATE_PATH)) + + +def uninstall(): + """Uninstall Blender configuration for Avalon.""" + pyblish.deregister_plugin_path(str(PUBLISH_PATH)) + avalon.deregister_plugin_path(avalon.Loader, str(LOAD_PATH)) + avalon.deregister_plugin_path(avalon.Creator, str(CREATE_PATH)) diff --git a/pype/blender/action.py b/pype/blender/action.py new file mode 100644 index 0000000000..4bd7e303fc --- /dev/null +++ b/pype/blender/action.py @@ -0,0 +1,47 @@ +import bpy + +import pyblish.api + +from ..action import get_errored_instances_from_context + + +class SelectInvalidAction(pyblish.api.Action): + """Select invalid objects in Blender when a publish plug-in failed.""" + label = "Select Invalid" + on = "failed" + icon = "search" + + def process(self, context, plugin): + errored_instances = get_errored_instances_from_context(context) + instances = pyblish.api.instances_by_plugin(errored_instances, plugin) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding invalid nodes...") + invalid = list() + for instance in instances: + invalid_nodes = plugin.get_invalid(instance) + if invalid_nodes: + if isinstance(invalid_nodes, (list, tuple)): + invalid.extend(invalid_nodes) + else: + self.log.warning( + "Failed plug-in doens't have any selectable objects." + ) + + bpy.ops.object.select_all(action='DESELECT') + + # Make sure every node is only processed once + invalid = list(set(invalid)) + if not invalid: + self.log.info("No invalid nodes found.") + return + + invalid_names = [obj.name for obj in invalid] + self.log.info( + "Selecting invalid objects: %s", ", ".join(invalid_names) + ) + # Select the objects and also make the last one the active object. + for obj in invalid: + obj.select_set(True) + + bpy.context.view_layer.objects.active = invalid[-1] diff --git a/pype/blender/plugin.py b/pype/blender/plugin.py new file mode 100644 index 0000000000..ad5a259785 --- /dev/null +++ b/pype/blender/plugin.py @@ -0,0 +1,135 @@ +"""Shared functionality for pipeline plugins for Blender.""" + +from pathlib import Path +from typing import Dict, List, Optional + +import bpy + +from avalon import api + +VALID_EXTENSIONS = [".blend"] + + +def model_name(asset: str, subset: str, namespace: Optional[str] = None) -> str: + """Return a consistent name for a model asset.""" + name = f"{asset}_{subset}" + if namespace: + name = f"{namespace}:{name}" + return name + + +class AssetLoader(api.Loader): + """A basic AssetLoader for Blender + + This will implement the basic logic for linking/appending assets + into another Blender scene. + + The `update` method should be implemented by a sub-class, because + it's different for different types (e.g. model, rig, animation, + etc.). + """ + + @staticmethod + def _get_instance_empty(instance_name: str, nodes: List) -> Optional[bpy.types.Object]: + """Get the 'instance empty' that holds the collection instance.""" + for node in nodes: + if not isinstance(node, bpy.types.Object): + continue + if (node.type == 'EMPTY' and node.instance_type == 'COLLECTION' + and node.instance_collection and node.name == instance_name): + return node + return None + + @staticmethod + def _get_instance_collection(instance_name: str, nodes: List) -> Optional[bpy.types.Collection]: + """Get the 'instance collection' (container) for this asset.""" + for node in nodes: + if not isinstance(node, bpy.types.Collection): + continue + if node.name == instance_name: + return node + return None + + @staticmethod + def _get_library_from_container(container: bpy.types.Collection) -> bpy.types.Library: + """Find the library file from the container. + + It traverses the objects from this collection, checks if there is only + 1 library from which the objects come from and returns the library. + + Warning: + No nested collections are supported at the moment! + """ + assert not container.children, "Nested collections are not supported." + assert container.objects, "The collection doesn't contain any objects." + libraries = set() + for obj in container.objects: + assert obj.library, f"'{obj.name}' is not linked." + libraries.add(obj.library) + + assert len(libraries) == 1, "'{container.name}' contains objects from more then 1 library." + + return list(libraries)[0] + + def process_asset(self, + context: dict, + name: str, + namespace: Optional[str] = None, + options: Optional[Dict] = None): + """Must be implemented by a sub-class""" + raise NotImplementedError("Must be implemented by a sub-class") + + def load(self, + context: dict, + name: Optional[str] = None, + namespace: Optional[str] = None, + options: Optional[Dict] = None) -> Optional[bpy.types.Collection]: + """Load asset via database + + Arguments: + context: Full parenthood of representation to load + name: Use pre-defined name + namespace: Use pre-defined namespace + options: Additional settings dictionary + """ + # TODO (jasper): make it possible to add the asset several times by + # just re-using the collection + assert Path(self.fname).exists(), f"{self.fname} doesn't exist." + + self.process_asset( + context=context, + name=name, + namespace=namespace, + options=options, + ) + + # Only containerise if anything was loaded by the Loader. + nodes = self[:] + if not nodes: + return None + + # Only containerise if it's not already a collection from a .blend file. + representation = context["representation"]["name"] + if representation != "blend": + from avalon.blender.pipeline import containerise + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__, + ) + + asset = context["asset"]["name"] + subset = context["subset"]["name"] + instance_name = model_name(asset, subset, namespace) + + return self._get_instance_collection(instance_name, nodes) + + def update(self, container: Dict, representation: Dict): + """Must be implemented by a sub-class""" + raise NotImplementedError("Must be implemented by a sub-class") + + def remove(self, container: Dict) -> bool: + """Must be implemented by a sub-class""" + raise NotImplementedError("Must be implemented by a sub-class") diff --git a/pype/ftrack/actions/action_create_cust_attrs.py b/pype/ftrack/actions/action_create_cust_attrs.py index e2bd753b0e..5279a95a20 100644 --- a/pype/ftrack/actions/action_create_cust_attrs.py +++ b/pype/ftrack/actions/action_create_cust_attrs.py @@ -5,7 +5,8 @@ import json import arrow import logging import ftrack_api -from pype.ftrack import BaseAction, get_ca_mongoid +from pype.ftrack import BaseAction +from pype.ftrack.lib.avalon_sync import CustAttrIdKey from pypeapp import config from ftrack_api.exception import NoResultFoundError @@ -171,7 +172,6 @@ class CustomAttributes(BaseAction): def avalon_mongo_id_attributes(self, session): # Attribute Name and Label - cust_attr_name = get_ca_mongoid() cust_attr_label = 'Avalon/Mongo Id' # Types that don't need object_type_id @@ -207,7 +207,7 @@ class CustomAttributes(BaseAction): group = self.get_group('avalon') data = {} - data['key'] = cust_attr_name + data['key'] = CustAttrIdKey data['label'] = cust_attr_label data['type'] = custom_attribute_type data['default'] = '' diff --git a/pype/ftrack/actions/action_create_project_structure.py b/pype/ftrack/actions/action_create_project_structure.py index c99c2df1e6..4589802f3a 100644 --- a/pype/ftrack/actions/action_create_project_structure.py +++ b/pype/ftrack/actions/action_create_project_structure.py @@ -142,6 +142,13 @@ class CreateProjectFolders(BaseAction): else: data['project_id'] = parent['project']['id'] + existing_entity = self.session.query(( + "TypedContext where name is \"{}\" and " + "parent_id is \"{}\" and project_id is \"{}\"" + ).format(name, data['parent_id'], data['project_id'])).first() + if existing_entity: + return existing_entity + new_ent = self.session.create(ent_type, data) self.session.commit() return new_ent diff --git a/pype/ftrack/actions/action_delete_asset.py b/pype/ftrack/actions/action_delete_asset.py index df760f7c21..7eb9126fca 100644 --- a/pype/ftrack/actions/action_delete_asset.py +++ b/pype/ftrack/actions/action_delete_asset.py @@ -1,354 +1,606 @@ import os -import sys -import logging +import collections +import uuid +from datetime import datetime +from queue import Queue + from bson.objectid import ObjectId -import argparse -import ftrack_api from pype.ftrack import BaseAction from pype.ftrack.lib.io_nonsingleton import DbConnector -class DeleteAsset(BaseAction): +class DeleteAssetSubset(BaseAction): '''Edit meta data action.''' #: Action identifier. - identifier = 'delete.asset' + identifier = "delete.asset.subset" #: Action label. - label = 'Delete Asset/Subsets' + label = "Delete Asset/Subsets" #: Action description. - description = 'Removes from Avalon with all childs and asset from Ftrack' - icon = '{}/ftrack/action_icons/DeleteAsset.svg'.format( - os.environ.get('PYPE_STATICS_SERVER', '') + description = "Removes from Avalon with all childs and asset from Ftrack" + icon = "{}/ftrack/action_icons/DeleteAsset.svg".format( + os.environ.get("PYPE_STATICS_SERVER", "") ) #: roles that are allowed to register this action - role_list = ['Pypeclub', 'Administrator'] - #: Db - db = DbConnector() + role_list = ["Pypeclub", "Administrator", "Project Manager"] + #: Db connection + dbcon = DbConnector() - value = None + splitter = {"type": "label", "value": "---"} + action_data_by_id = {} + asset_prefix = "asset:" + subset_prefix = "subset:" def discover(self, session, entities, event): - ''' Validation ''' - if len(entities) != 1: - return False + """ Validation """ + task_ids = [] + for ent_info in event["data"]["selection"]: + entType = ent_info.get("entityType", "") + if entType == "task": + task_ids.append(ent_info["entityId"]) - valid = ["task"] - entityType = event["data"]["selection"][0].get("entityType", "") - if entityType.lower() not in valid: - return False - - return True + for entity in entities: + ftrack_id = entity["id"] + if ftrack_id not in task_ids: + continue + if entity.entity_type.lower() != "task": + return True + return False def _launch(self, event): - self.reset_session() try: - self.db.install() args = self._translate_event( self.session, event ) + if "values" not in event["data"]: + self.dbcon.install() + return self._interface(self.session, *args) - interface = self._interface( - self.session, *args - ) - - confirmation = self.confirm_delete( - True, *args - ) - - if interface: - return interface - + confirmation = self.confirm_delete(*args) if confirmation: return confirmation + self.dbcon.install() response = self.launch( self.session, *args ) finally: - self.db.uninstall() + self.dbcon.uninstall() return self._handle_result( self.session, response, *args ) def interface(self, session, entities, event): - if not event['data'].get('values', {}): - self.attempt = 1 - items = [] - entity = entities[0] - title = 'Choose items to delete from "{}"'.format(entity['name']) - project = entity['project'] + self.show_message(event, "Preparing data...", True) + items = [] + title = "Choose items to delete" - self.db.Session['AVALON_PROJECT'] = project["full_name"] + # Filter selection and get ftrack ids + selection = event["data"].get("selection") or [] + ftrack_ids = [] + project_in_selection = False + for entity in selection: + entity_type = (entity.get("entityType") or "").lower() + if entity_type != "task": + if entity_type == "show": + project_in_selection = True + continue - av_entity = self.db.find_one({ - 'type': 'asset', - 'name': entity['name'] + ftrack_id = entity.get("entityId") + if not ftrack_id: + continue + + ftrack_ids.append(ftrack_id) + + if project_in_selection: + msg = "It is not possible to use this action on project entity." + self.show_message(event, msg, True) + + # Filter event even more (skip task entities) + # - task entities are not relevant for avalon + for entity in entities: + ftrack_id = entity["id"] + if ftrack_id not in ftrack_ids: + continue + + if entity.entity_type.lower() == "task": + ftrack_ids.remove(ftrack_id) + + if not ftrack_ids: + # It is bug if this happens! + return { + "success": False, + "message": "Invalid selection for this action (Bug)" + } + + if entities[0].entity_type.lower() == "project": + project = entities[0] + else: + project = entities[0]["project"] + + project_name = project["full_name"] + self.dbcon.Session["AVALON_PROJECT"] = project_name + + selected_av_entities = self.dbcon.find({ + "type": "asset", + "data.ftrackId": {"$in": ftrack_ids} + }) + selected_av_entities = [ent for ent in selected_av_entities] + if not selected_av_entities: + return { + "success": False, + "message": "Didn't found entities in avalon" + } + + # Remove cached action older than 2 minutes + old_action_ids = [] + for id, data in self.action_data_by_id.items(): + created_at = data.get("created_at") + if not created_at: + old_action_ids.append(id) + continue + cur_time = datetime.now() + existing_in_sec = (created_at - cur_time).total_seconds() + if existing_in_sec > 60 * 2: + old_action_ids.append(id) + + for id in old_action_ids: + self.action_data_by_id.pop(id, None) + + # Store data for action id + action_id = str(uuid.uuid1()) + self.action_data_by_id[action_id] = { + "attempt": 1, + "created_at": datetime.now(), + "project_name": project_name, + "subset_ids_by_name": {}, + "subset_ids_by_parent": {} + } + + id_item = { + "type": "hidden", + "name": "action_id", + "value": action_id + } + + items.append(id_item) + asset_ids = [ent["_id"] for ent in selected_av_entities] + subsets_for_selection = self.dbcon.find({ + "type": "subset", + "parent": {"$in": asset_ids} + }) + + asset_ending = "" + if len(selected_av_entities) > 1: + asset_ending = "s" + + asset_title = { + "type": "label", + "value": "# Delete asset{}:".format(asset_ending) + } + asset_note = { + "type": "label", + "value": ( + "

NOTE: Action will delete checked entities" + " in Ftrack and Avalon with all children entities and" + " published content.

" + ) + } + + items.append(asset_title) + items.append(asset_note) + + asset_items = collections.defaultdict(list) + for asset in selected_av_entities: + ent_path_items = [project_name] + ent_path_items.extend(asset.get("data", {}).get("parents") or []) + ent_path_to_parent = "/".join(ent_path_items) + "/" + asset_items[ent_path_to_parent].append(asset) + + for asset_parent_path, assets in sorted(asset_items.items()): + items.append({ + "type": "label", + "value": "## - {}".format(asset_parent_path) }) - - if av_entity is None: - return { - 'success': False, - 'message': 'Didn\'t found assets in avalon' - } - - asset_label = { - 'type': 'label', - 'value': '## Delete whole asset: ##' - } - asset_item = { - 'label': av_entity['name'], - 'name': 'whole_asset', - 'type': 'boolean', - 'value': False - } - splitter = { - 'type': 'label', - 'value': '{}'.format(200*"-") - } - subset_label = { - 'type': 'label', - 'value': '## Subsets: ##' - } - if av_entity is not None: - items.append(asset_label) - items.append(asset_item) - items.append(splitter) - - all_subsets = self.db.find({ - 'type': 'subset', - 'parent': av_entity['_id'] + for asset in assets: + items.append({ + "label": asset["name"], + "name": "{}{}".format( + self.asset_prefix, str(asset["_id"]) + ), + "type": 'boolean', + "value": False }) - subset_items = [] - for subset in all_subsets: - item = { - 'label': subset['name'], - 'name': str(subset['_id']), - 'type': 'boolean', - 'value': False - } - subset_items.append(item) - if len(subset_items) > 0: - items.append(subset_label) - items.extend(subset_items) - else: - return { - 'success': False, - 'message': 'Didn\'t found assets in avalon' - } + subset_ids_by_name = collections.defaultdict(list) + subset_ids_by_parent = collections.defaultdict(list) + for subset in subsets_for_selection: + subset_id = subset["_id"] + name = subset["name"] + parent_id = subset["parent"] + subset_ids_by_name[name].append(subset_id) + subset_ids_by_parent[parent_id].append(subset_id) + if not subset_ids_by_name: return { - 'items': items, - 'title': title + "items": items, + "title": title } - def confirm_delete(self, first_attempt, entities, event): - if first_attempt is True: - if 'values' not in event['data']: - return + subset_ending = "" + if len(subset_ids_by_name.keys()) > 1: + subset_ending = "s" - values = event['data']['values'] + subset_title = { + "type": "label", + "value": "# Subset{} to delete:".format(subset_ending) + } + subset_note = { + "type": "label", + "value": ( + "

WARNING: Subset{} will be removed" + " for all selected entities.

" + ).format(subset_ending) + } - if len(values) <= 0: - return - if 'whole_asset' not in values: - return - else: - values = self.values + items.append(self.splitter) + items.append(subset_title) + items.append(subset_note) - title = 'Confirmation of deleting {}' - if values['whole_asset'] is True: - title = title.format( - 'whole asset {}'.format( - entities[0]['name'] - ) - ) - else: - subsets = [] - for key, value in values.items(): - if value is True: - subsets.append(key) - len_subsets = len(subsets) - if len_subsets == 0: + for name in subset_ids_by_name: + items.append({ + "label": "{}".format(name), + "name": "{}{}".format(self.subset_prefix, name), + "type": "boolean", + "value": False + }) + + self.action_data_by_id[action_id]["subset_ids_by_parent"] = ( + subset_ids_by_parent + ) + self.action_data_by_id[action_id]["subset_ids_by_name"] = ( + subset_ids_by_name + ) + + return { + "items": items, + "title": title + } + + def confirm_delete(self, entities, event): + values = event["data"]["values"] + action_id = values.get("action_id") + spec_data = self.action_data_by_id.get(action_id) + if not spec_data: + # it is a bug if this happens! + return { + "success": False, + "message": "Something bad has happened. Please try again." + } + + # Process Delete confirmation + delete_key = values.get("delete_key") + if delete_key: + delete_key = delete_key.lower().strip() + # Go to launch part if user entered `delete` + if delete_key == "delete": + return + # Skip whole process if user didn't enter any text + elif delete_key == "": + self.action_data_by_id.pop(action_id, None) return { - 'success': True, - 'message': 'Nothing was selected to delete' + "success": True, + "message": "Deleting cancelled (delete entry was empty)" } - elif len_subsets == 1: - title = title.format( - '{} subset'.format(len_subsets) - ) - else: - title = title.format( - '{} subsets'.format(len_subsets) - ) + # Get data to show again + to_delete = spec_data["to_delete"] + + else: + to_delete = collections.defaultdict(list) + for key, value in values.items(): + if not value: + continue + if key.startswith(self.asset_prefix): + _key = key.replace(self.asset_prefix, "") + to_delete["assets"].append(_key) + + elif key.startswith(self.subset_prefix): + _key = key.replace(self.subset_prefix, "") + to_delete["subsets"].append(_key) + + self.action_data_by_id[action_id]["to_delete"] = to_delete + + asset_to_delete = len(to_delete.get("assets") or []) > 0 + subset_to_delete = len(to_delete.get("subsets") or []) > 0 + + if not asset_to_delete and not subset_to_delete: + self.action_data_by_id.pop(action_id, None) + return { + "success": True, + "message": "Nothing was selected to delete" + } + + attempt = spec_data["attempt"] + if attempt > 3: + self.action_data_by_id.pop(action_id, None) + return { + "success": False, + "message": "You didn't enter \"DELETE\" properly 3 times!" + } + + self.action_data_by_id[action_id]["attempt"] += 1 + + title = "Confirmation of deleting" + + if asset_to_delete: + asset_len = len(to_delete["assets"]) + asset_ending = "" + if asset_len > 1: + asset_ending = "s" + title += " {} Asset{}".format(asset_len, asset_ending) + if subset_to_delete: + title += " and" + + if subset_to_delete: + sub_len = len(to_delete["subsets"]) + type_ending = "" + sub_ending = "" + if sub_len == 1: + subset_ids_by_name = spec_data["subset_ids_by_name"] + if len(subset_ids_by_name[to_delete["subsets"][0]]) > 1: + sub_ending = "s" + + elif sub_len > 1: + type_ending = "s" + sub_ending = "s" + + title += " {} type{} of subset{}".format( + sub_len, type_ending, sub_ending + ) - self.values = values items = [] + id_item = {"type": "hidden", "name": "action_id", "value": action_id} delete_label = { 'type': 'label', 'value': '# Please enter "DELETE" to confirm #' } - delete_item = { - 'name': 'delete_key', - 'type': 'text', - 'value': '', - 'empty_text': 'Type Delete here...' + "name": "delete_key", + "type": "text", + "value": "", + "empty_text": "Type Delete here..." } + + items.append(id_item) items.append(delete_label) items.append(delete_item) return { - 'items': items, - 'title': title + "items": items, + "title": title } def launch(self, session, entities, event): - if 'values' not in event['data']: - return - - values = event['data']['values'] - if len(values) <= 0: - return - if 'delete_key' not in values: - return - - if values['delete_key'].lower() != 'delete': - if values['delete_key'].lower() == '': - return { - 'success': False, - 'message': 'Deleting cancelled' - } - if self.attempt < 3: - self.attempt += 1 - return_dict = self.confirm_delete(False, entities, event) - return_dict['title'] = '{} ({} attempt)'.format( - return_dict['title'], self.attempt - ) - return return_dict + self.show_message(event, "Processing...", True) + values = event["data"]["values"] + action_id = values.get("action_id") + spec_data = self.action_data_by_id.get(action_id) + if not spec_data: + # it is a bug if this happens! return { - 'success': False, - 'message': 'You didn\'t enter "DELETE" properly 3 times!' + "success": False, + "message": "Something bad has happened. Please try again." } - entity = entities[0] - project = entity['project'] + report_messages = collections.defaultdict(list) - self.db.Session['AVALON_PROJECT'] = project["full_name"] + project_name = spec_data["project_name"] + to_delete = spec_data["to_delete"] + self.dbcon.Session["AVALON_PROJECT"] = project_name - all_ids = [] - if self.values.get('whole_asset', False) is True: - av_entity = self.db.find_one({ - 'type': 'asset', - 'name': entity['name'] + assets_to_delete = to_delete.get("assets") or [] + subsets_to_delete = to_delete.get("subsets") or [] + + # Convert asset ids to ObjectId obj + assets_to_delete = [ObjectId(id) for id in assets_to_delete if id] + + subset_ids_by_parent = spec_data["subset_ids_by_parent"] + subset_ids_by_name = spec_data["subset_ids_by_name"] + + subset_ids_to_archive = [] + asset_ids_to_archive = [] + ftrack_ids_to_delete = [] + if len(assets_to_delete) > 0: + # Prepare data when deleting whole avalon asset + avalon_assets = self.dbcon.find({"type": "asset"}) + avalon_assets_by_parent = collections.defaultdict(list) + for asset in avalon_assets: + parent_id = asset["data"]["visualParent"] + avalon_assets_by_parent[parent_id].append(asset) + if asset["_id"] in assets_to_delete: + ftrack_id = asset["data"]["ftrackId"] + ftrack_ids_to_delete.append(ftrack_id) + + children_queue = Queue() + for mongo_id in assets_to_delete: + children_queue.put(mongo_id) + + while not children_queue.empty(): + mongo_id = children_queue.get() + if mongo_id in asset_ids_to_archive: + continue + + asset_ids_to_archive.append(mongo_id) + for subset_id in subset_ids_by_parent.get(mongo_id, []): + if subset_id not in subset_ids_to_archive: + subset_ids_to_archive.append(subset_id) + + children = avalon_assets_by_parent.get(mongo_id) + if not children: + continue + + for child in children: + child_id = child["_id"] + if child_id not in asset_ids_to_archive: + children_queue.put(child_id) + + # Prepare names of assets in ftrack and ids of subsets in mongo + asset_names_to_delete = [] + if len(subsets_to_delete) > 0: + for name in subsets_to_delete: + asset_names_to_delete.append(name) + for subset_id in subset_ids_by_name[name]: + if subset_id in subset_ids_to_archive: + continue + subset_ids_to_archive.append(subset_id) + + # Get ftrack ids of entities where will be delete only asset + not_deleted_entities_id = [] + ftrack_id_name_map = {} + if asset_names_to_delete: + for entity in entities: + ftrack_id = entity["id"] + ftrack_id_name_map[ftrack_id] = entity["name"] + if ftrack_id in ftrack_ids_to_delete: + continue + not_deleted_entities_id.append(ftrack_id) + + mongo_proc_txt = "MongoProcessing: " + ftrack_proc_txt = "Ftrack processing: " + if asset_ids_to_archive: + self.log.debug("{}Archivation of assets <{}>".format( + mongo_proc_txt, + ", ".join([str(id) for id in asset_ids_to_archive]) + )) + self.dbcon.update_many( + { + "_id": {"$in": asset_ids_to_archive}, + "type": "asset" + }, + {"$set": {"type": "archived_asset"}} + ) + + if subset_ids_to_archive: + self.log.debug("{}Archivation of subsets <{}>".format( + mongo_proc_txt, + ", ".join([str(id) for id in subset_ids_to_archive]) + )) + self.dbcon.update_many( + { + "_id": {"$in": subset_ids_to_archive}, + "type": "subset" + }, + {"$set": {"type": "archived_subset"}} + ) + + if ftrack_ids_to_delete: + self.log.debug("{}Deleting Ftrack Entities <{}>".format( + ftrack_proc_txt, ", ".join(ftrack_ids_to_delete) + )) + + joined_ids_to_delete = ", ".join( + ["\"{}\"".format(id) for id in ftrack_ids_to_delete] + ) + ftrack_ents_to_delete = self.session.query( + "select id, link from TypedContext where id in ({})".format( + joined_ids_to_delete + ) + ).all() + for entity in ftrack_ents_to_delete: + self.session.delete(entity) + try: + self.session.commit() + except Exception: + ent_path = "/".join( + [ent["name"] for ent in entity["link"]] + ) + msg = "Failed to delete entity" + report_messages[msg].append(ent_path) + self.session.rollback() + self.log.warning( + "{} <{}>".format(msg, ent_path), + exc_info=True + ) + + if not_deleted_entities_id: + joined_not_deleted = ", ".join([ + "\"{}\"".format(ftrack_id) + for ftrack_id in not_deleted_entities_id + ]) + joined_asset_names = ", ".join([ + "\"{}\"".format(name) + for name in asset_names_to_delete + ]) + # Find assets of selected entities with names of checked subsets + assets = self.session.query(( + "select id from Asset where" + " context_id in ({}) and name in ({})" + ).format(joined_not_deleted, joined_asset_names)).all() + + self.log.debug("{}Deleting Ftrack Assets <{}>".format( + ftrack_proc_txt, + ", ".join([asset["id"] for asset in assets]) + )) + for asset in assets: + self.session.delete(asset) + try: + self.session.commit() + except Exception: + self.session.rollback() + msg = "Failed to delete asset" + report_messages[msg].append(asset["id"]) + self.log.warning( + "{} <{}>".format(asset["id"]), + exc_info=True + ) + + return self.report_handle(report_messages, project_name, event) + + def report_handle(self, report_messages, project_name, event): + if not report_messages: + return { + "success": True, + "message": "Deletion was successful!" + } + + title = "Delete report ({}):".format(project_name) + items = [] + items.append({ + "type": "label", + "value": "# Deleting was not completely successful" + }) + items.append({ + "type": "label", + "value": "

Check logs for more information

" + }) + for msg, _items in report_messages.items(): + if not _items or not msg: + continue + + items.append({ + "type": "label", + "value": "# {}".format(msg) }) - if av_entity is not None: - all_ids.append(av_entity['_id']) - all_ids.extend(self.find_child(av_entity)) + if isinstance(_items, str): + _items = [_items] + items.append({ + "type": "label", + "value": '

{}

'.format("
".join(_items)) + }) + items.append(self.splitter) - session.delete(entity) - session.commit() - else: - subset_names = [] - for key, value in self.values.items(): - if key == 'delete_key' or value is False: - continue - - entity_id = ObjectId(key) - av_entity = self.db.find_one({'_id': entity_id}) - subset_names.append(av_entity['name']) - if av_entity is None: - continue - all_ids.append(entity_id) - all_ids.extend(self.find_child(av_entity)) - - for ft_asset in entity['assets']: - if ft_asset['name'] in subset_names: - session.delete(ft_asset) - session.commit() - - if len(all_ids) == 0: - return { - 'success': True, - 'message': 'No entities to delete in avalon' - } - - delete_query = {'_id': {'$in': all_ids}} - self.db.delete_many(delete_query) + self.show_interface(items, title, event) return { - 'success': True, - 'message': 'All assets were deleted!' + "success": False, + "message": "Deleting finished. Read report messages." } - def find_child(self, entity): - output = [] - id = entity['_id'] - visuals = [x for x in self.db.find({'data.visualParent': id})] - assert len(visuals) == 0, 'This asset has another asset as child' - childs = self.db.find({'parent': id}) - for child in childs: - output.append(child['_id']) - output.extend(self.find_child(child)) - return output - - def find_assets(self, asset_names): - assets = [] - for name in asset_names: - entity = self.db.find_one({ - 'type': 'asset', - 'name': name - }) - if entity is not None and entity not in assets: - assets.append(entity) - return assets - def register(session, plugins_presets={}): '''Register plugin. Called when used as an plugin.''' - DeleteAsset(session, plugins_presets).register() - - -def main(arguments=None): - '''Set up logging and register action.''' - if arguments is None: - arguments = [] - - parser = argparse.ArgumentParser() - # Allow setting of logging level from arguments. - loggingLevels = {} - for level in ( - logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, - logging.ERROR, logging.CRITICAL - ): - loggingLevels[logging.getLevelName(level).lower()] = level - - parser.add_argument( - '-v', '--verbosity', - help='Set the logging output verbosity.', - choices=loggingLevels.keys(), - default='info' - ) - namespace = parser.parse_args(arguments) - - # Set up basic logging - logging.basicConfig(level=loggingLevels[namespace.verbosity]) - - session = ftrack_api.Session() - - register(session) - - # Wait for events - logging.info( - 'Registered actions and listening for events. Use Ctrl-C to abort.' - ) - session.event_hub.wait() - - -if __name__ == '__main__': - raise SystemExit(main(sys.argv[1:])) + DeleteAssetSubset(session, plugins_presets).register() diff --git a/pype/ftrack/actions/action_delete_asset_byname.py b/pype/ftrack/actions/action_delete_asset_byname.py deleted file mode 100644 index c05c135991..0000000000 --- a/pype/ftrack/actions/action_delete_asset_byname.py +++ /dev/null @@ -1,175 +0,0 @@ -import os -import sys -import logging -import argparse -import ftrack_api -from pype.ftrack import BaseAction -from pype.ftrack.lib.io_nonsingleton import DbConnector - - -class AssetsRemover(BaseAction): - '''Edit meta data action.''' - - #: Action identifier. - identifier = 'remove.assets' - #: Action label. - label = "Pype Admin" - variant = '- Delete Assets by Name' - #: Action description. - description = 'Removes assets from Ftrack and Avalon db with all childs' - #: roles that are allowed to register this action - role_list = ['Pypeclub', 'Administrator'] - icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format( - os.environ.get('PYPE_STATICS_SERVER', '') - ) - #: Db - db = DbConnector() - - def discover(self, session, entities, event): - ''' Validation ''' - if len(entities) != 1: - return False - - valid = ["show", "task"] - entityType = event["data"]["selection"][0].get("entityType", "") - if entityType.lower() not in valid: - return False - - return True - - def interface(self, session, entities, event): - if not event['data'].get('values', {}): - title = 'Enter Asset names to delete' - - items = [] - for i in range(15): - - item = { - 'label': 'Asset {}'.format(i+1), - 'name': 'asset_{}'.format(i+1), - 'type': 'text', - 'value': '' - } - items.append(item) - - return { - 'items': items, - 'title': title - } - - def launch(self, session, entities, event): - entity = entities[0] - if entity.entity_type.lower() != 'Project': - project = entity['project'] - else: - project = entity - - if 'values' not in event['data']: - return - - values = event['data']['values'] - if len(values) <= 0: - return { - 'success': True, - 'message': 'No Assets to delete!' - } - - asset_names = [] - - for k, v in values.items(): - if v.replace(' ', '') != '': - asset_names.append(v) - - self.db.install() - self.db.Session['AVALON_PROJECT'] = project["full_name"] - - assets = self.find_assets(asset_names) - - all_ids = [] - for asset in assets: - all_ids.append(asset['_id']) - all_ids.extend(self.find_child(asset)) - - if len(all_ids) == 0: - self.db.uninstall() - return { - 'success': True, - 'message': 'None of assets' - } - - delete_query = {'_id': {'$in': all_ids}} - self.db.delete_many(delete_query) - - self.db.uninstall() - return { - 'success': True, - 'message': 'All assets were deleted!' - } - - def find_child(self, entity): - output = [] - id = entity['_id'] - visuals = [x for x in self.db.find({'data.visualParent': id})] - assert len(visuals) == 0, 'This asset has another asset as child' - childs = self.db.find({'parent': id}) - for child in childs: - output.append(child['_id']) - output.extend(self.find_child(child)) - return output - - def find_assets(self, asset_names): - assets = [] - for name in asset_names: - entity = self.db.find_one({ - 'type': 'asset', - 'name': name - }) - if entity is not None and entity not in assets: - assets.append(entity) - return assets - - -def register(session, plugins_presets={}): - '''Register plugin. Called when used as an plugin.''' - - AssetsRemover(session, plugins_presets).register() - - -def main(arguments=None): - '''Set up logging and register action.''' - if arguments is None: - arguments = [] - - parser = argparse.ArgumentParser() - # Allow setting of logging level from arguments. - loggingLevels = {} - for level in ( - logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING, - logging.ERROR, logging.CRITICAL - ): - loggingLevels[logging.getLevelName(level).lower()] = level - - parser.add_argument( - '-v', '--verbosity', - help='Set the logging output verbosity.', - choices=loggingLevels.keys(), - default='info' - ) - namespace = parser.parse_args(arguments) - - # Set up basic logging - logging.basicConfig(level=loggingLevels[namespace.verbosity]) - - session = ftrack_api.Session() - - register(session) - - # Wait for events - logging.info( - 'Registered actions and listening for events. Use Ctrl-C to abort.' - ) - session.event_hub.wait() - - -if __name__ == '__main__': - raise SystemExit(main(sys.argv[1:])) diff --git a/pype/ftrack/actions/action_delivery.py b/pype/ftrack/actions/action_delivery.py new file mode 100644 index 0000000000..29fdfe39ae --- /dev/null +++ b/pype/ftrack/actions/action_delivery.py @@ -0,0 +1,528 @@ +import os +import copy +import shutil +import collections +import string + +import clique +from bson.objectid import ObjectId + +from avalon import pipeline +from avalon.vendor import filelink +from avalon.tools.libraryloader.io_nonsingleton import DbConnector + +from pypeapp import Anatomy +from pype.ftrack import BaseAction +from pype.ftrack.lib.avalon_sync import CustAttrIdKey + + +class Delivery(BaseAction): + '''Edit meta data action.''' + + #: Action identifier. + identifier = "delivery.action" + #: Action label. + label = "Delivery" + #: Action description. + description = "Deliver data to client" + #: roles that are allowed to register this action + role_list = ["Pypeclub", "Administrator", "Project manager"] + icon = '{}/ftrack/action_icons/Delivery.svg'.format( + os.environ.get('PYPE_STATICS_SERVER', '') + ) + + db_con = DbConnector() + + def discover(self, session, entities, event): + ''' Validation ''' + for entity in entities: + if entity.entity_type.lower() == "assetversion": + return True + + return False + + def interface(self, session, entities, event): + if event["data"].get("values", {}): + return + + title = "Delivery data to Client" + + items = [] + item_splitter = {"type": "label", "value": "---"} + + # Prepare component names for processing + components = None + project = None + for entity in entities: + if project is None: + project_id = None + for ent_info in entity["link"]: + if ent_info["type"].lower() == "project": + project_id = ent_info["id"] + break + + if project_id is None: + project = entity["asset"]["parent"]["project"] + else: + project = session.query(( + "select id, full_name from Project where id is \"{}\"" + ).format(project_id)).one() + + _components = set( + [component["name"] for component in entity["components"]] + ) + if components is None: + components = _components + continue + + components = components.intersection(_components) + if not components: + break + + project_name = project["full_name"] + items.append({ + "type": "hidden", + "name": "__project_name__", + "value": project_name + }) + + # Prpeare anatomy data + anatomy = Anatomy(project_name) + new_anatomies = [] + first = None + for key in (anatomy.templates.get("delivery") or {}): + new_anatomies.append({ + "label": key, + "value": key + }) + if first is None: + first = key + + skipped = False + # Add message if there are any common components + if not components or not new_anatomies: + skipped = True + items.append({ + "type": "label", + "value": "

Something went wrong:

" + }) + + items.append({ + "type": "hidden", + "name": "__skipped__", + "value": skipped + }) + + if not components: + if len(entities) == 1: + items.append({ + "type": "label", + "value": ( + "- Selected entity doesn't have components to deliver." + ) + }) + else: + items.append({ + "type": "label", + "value": ( + "- Selected entities don't have common components." + ) + }) + + # Add message if delivery anatomies are not set + if not new_anatomies: + items.append({ + "type": "label", + "value": ( + "- `\"delivery\"` anatomy key is not set in config." + ) + }) + + # Skip if there are any data shortcomings + if skipped: + return { + "items": items, + "title": title + } + + items.append({ + "value": "

Choose Components to deliver

", + "type": "label" + }) + + for component in components: + items.append({ + "type": "boolean", + "value": False, + "label": component, + "name": component + }) + + items.append(item_splitter) + + items.append({ + "value": "

Location for delivery

", + "type": "label" + }) + + items.append({ + "type": "label", + "value": ( + "NOTE: It is possible to replace `root` key in anatomy." + ) + }) + + items.append({ + "type": "text", + "name": "__location_path__", + "empty_text": "Type location path here...(Optional)" + }) + + items.append(item_splitter) + + items.append({ + "value": "

Anatomy of delivery files

", + "type": "label" + }) + + items.append({ + "type": "label", + "value": ( + "

NOTE: These can be set in Anatomy.yaml" + " within `delivery` key.

" + ) + }) + + items.append({ + "type": "enumerator", + "name": "__new_anatomies__", + "data": new_anatomies, + "value": first + }) + + return { + "items": items, + "title": title + } + + def launch(self, session, entities, event): + if "values" not in event["data"]: + return + + self.report_items = collections.defaultdict(list) + + values = event["data"]["values"] + skipped = values.pop("__skipped__") + if skipped: + return None + + component_names = [] + location_path = values.pop("__location_path__") + anatomy_name = values.pop("__new_anatomies__") + project_name = values.pop("__project_name__") + + for key, value in values.items(): + if value is True: + component_names.append(key) + + if not component_names: + return { + "success": True, + "message": "Not selected components to deliver." + } + + location_path = location_path.strip() + if location_path: + location_path = os.path.normpath(location_path) + if not os.path.exists(location_path): + return { + "success": False, + "message": ( + "Entered location path does not exists. \"{}\"" + ).format(location_path) + } + + self.db_con.install() + self.db_con.Session["AVALON_PROJECT"] = project_name + + repres_to_deliver = [] + for entity in entities: + asset = entity["asset"] + subset_name = asset["name"] + version = entity["version"] + + parent = asset["parent"] + parent_mongo_id = parent["custom_attributes"].get(CustAttrIdKey) + if parent_mongo_id: + parent_mongo_id = ObjectId(parent_mongo_id) + else: + asset_ent = self.db_con.find_one({ + "type": "asset", + "data.ftrackId": parent["id"] + }) + if not asset_ent: + ent_path = "/".join( + [ent["name"] for ent in parent["link"]] + ) + msg = "Not synchronized entities to avalon" + self.report_items[msg].append(ent_path) + self.log.warning("{} <{}>".format(msg, ent_path)) + continue + + parent_mongo_id = asset_ent["_id"] + + subset_ent = self.db_con.find_one({ + "type": "subset", + "parent": parent_mongo_id, + "name": subset_name + }) + + version_ent = self.db_con.find_one({ + "type": "version", + "name": version, + "parent": subset_ent["_id"] + }) + + repre_ents = self.db_con.find({ + "type": "representation", + "parent": version_ent["_id"] + }) + + repres_by_name = {} + for repre in repre_ents: + repre_name = repre["name"] + repres_by_name[repre_name] = repre + + for component in entity["components"]: + comp_name = component["name"] + if comp_name not in component_names: + continue + + repre = repres_by_name.get(comp_name) + repres_to_deliver.append(repre) + + if not location_path: + location_path = os.environ.get("AVALON_PROJECTS") or "" + + print(location_path) + + anatomy = Anatomy(project_name) + for repre in repres_to_deliver: + # Get destination repre path + anatomy_data = copy.deepcopy(repre["context"]) + anatomy_data["root"] = location_path + + anatomy_filled = anatomy.format_all(anatomy_data) + test_path = anatomy_filled["delivery"][anatomy_name] + + if not test_path.solved: + msg = ( + "Missing keys in Representation's context" + " for anatomy template \"{}\"." + ).format(anatomy_name) + + if test_path.missing_keys: + keys = ", ".join(test_path.missing_keys) + sub_msg = ( + "Representation: {}
- Missing keys: \"{}\"
" + ).format(str(repre["_id"]), keys) + + if test_path.invalid_types: + items = [] + for key, value in test_path.invalid_types.items(): + items.append("\"{}\" {}".format(key, str(value))) + + keys = ", ".join(items) + sub_msg = ( + "Representation: {}
" + "- Invalid value DataType: \"{}\"
" + ).format(str(repre["_id"]), keys) + + self.report_items[msg].append(sub_msg) + self.log.warning( + "{} Representation: \"{}\" Filled: <{}>".format( + msg, str(repre["_id"]), str(result) + ) + ) + continue + + # Get source repre path + frame = repre['context'].get('frame') + + if frame: + repre["context"]["frame"] = len(str(frame)) * "#" + + repre_path = self.path_from_represenation(repre) + # TODO add backup solution where root of path from component + # is repalced with AVALON_PROJECTS root + if not frame: + self.process_single_file( + repre_path, anatomy, anatomy_name, anatomy_data + ) + + else: + self.process_sequence( + repre_path, anatomy, anatomy_name, anatomy_data + ) + + self.db_con.uninstall() + + return self.report() + + def process_single_file( + self, repre_path, anatomy, anatomy_name, anatomy_data + ): + anatomy_filled = anatomy.format(anatomy_data) + delivery_path = anatomy_filled["delivery"][anatomy_name] + delivery_folder = os.path.dirname(delivery_path) + if not os.path.exists(delivery_folder): + os.makedirs(delivery_folder) + + self.copy_file(repre_path, delivery_path) + + def process_sequence( + self, repre_path, anatomy, anatomy_name, anatomy_data + ): + dir_path, file_name = os.path.split(str(repre_path)) + + base_name, ext = os.path.splitext(file_name) + file_name_items = None + if "#" in base_name: + file_name_items = [part for part in base_name.split("#") if part] + + elif "%" in base_name: + file_name_items = base_name.split("%") + + if not file_name_items: + msg = "Source file was not found" + self.report_items[msg].append(repre_path) + self.log.warning("{} <{}>".format(msg, repre_path)) + return + + src_collections, remainder = clique.assemble(os.listdir(dir_path)) + src_collection = None + for col in src_collections: + if col.tail != ext: + continue + + # skip if collection don't have same basename + if not col.head.startswith(file_name_items[0]): + continue + + src_collection = col + break + + if src_collection is None: + # TODO log error! + msg = "Source collection of files was not found" + self.report_items[msg].append(repre_path) + self.log.warning("{} <{}>".format(msg, repre_path)) + return + + frame_indicator = "@####@" + + anatomy_data["frame"] = frame_indicator + anatomy_filled = anatomy.format(anatomy_data) + + delivery_path = anatomy_filled["delivery"][anatomy_name] + print(delivery_path) + delivery_folder = os.path.dirname(delivery_path) + dst_head, dst_tail = delivery_path.split(frame_indicator) + dst_padding = src_collection.padding + dst_collection = clique.Collection( + head=dst_head, + tail=dst_tail, + padding=dst_padding + ) + + if not os.path.exists(delivery_folder): + os.makedirs(delivery_folder) + + src_head = src_collection.head + src_tail = src_collection.tail + for index in src_collection.indexes: + src_padding = src_collection.format("{padding}") % index + src_file_name = "{}{}{}".format(src_head, src_padding, src_tail) + src = os.path.normpath( + os.path.join(dir_path, src_file_name) + ) + + dst_padding = dst_collection.format("{padding}") % index + dst = "{}{}{}".format(dst_head, dst_padding, dst_tail) + + self.copy_file(src, dst) + + def path_from_represenation(self, representation): + try: + template = representation["data"]["template"] + + except KeyError: + return None + + try: + context = representation["context"] + context["root"] = os.environ.get("AVALON_PROJECTS") or "" + path = pipeline.format_template_with_optional_keys( + context, template + ) + + except KeyError: + # Template references unavailable data + return None + + return os.path.normpath(path) + + def copy_file(self, src_path, dst_path): + if os.path.exists(dst_path): + return + try: + filelink.create( + src_path, + dst_path, + filelink.HARDLINK + ) + except OSError: + shutil.copyfile(src_path, dst_path) + + def report(self): + items = [] + title = "Delivery report" + for msg, _items in self.report_items.items(): + if not _items: + continue + + if items: + items.append({"type": "label", "value": "---"}) + + items.append({ + "type": "label", + "value": "# {}".format(msg) + }) + if not isinstance(_items, (list, tuple)): + _items = [_items] + __items = [] + for item in _items: + __items.append(str(item)) + + items.append({ + "type": "label", + "value": '

{}

'.format("
".join(__items)) + }) + + if not items: + return { + "success": True, + "message": "Delivery Finished" + } + + return { + "items": items, + "title": title, + "success": False, + "message": "Delivery Finished" + } + +def register(session, plugins_presets={}): + '''Register plugin. Called when used as an plugin.''' + + Delivery(session, plugins_presets).register() diff --git a/pype/ftrack/actions/action_prepare_project.py b/pype/ftrack/actions/action_prepare_project.py index 75bcf98cb3..4cc6cfd8df 100644 --- a/pype/ftrack/actions/action_prepare_project.py +++ b/pype/ftrack/actions/action_prepare_project.py @@ -2,12 +2,9 @@ import os import json from ruamel import yaml -import ftrack_api from pype.ftrack import BaseAction from pypeapp import config -from pype.ftrack.lib import get_avalon_attr - -from ftrack_api import session as fa_session +from pype.ftrack.lib.avalon_sync import get_avalon_attr class PrepareProject(BaseAction): @@ -55,6 +52,8 @@ class PrepareProject(BaseAction): attributes_to_set = {} for attr in hier_cust_attrs: key = attr["key"] + if key.startswith("avalon_"): + continue attributes_to_set[key] = { "label": attr["label"], "object": attr, @@ -65,6 +64,8 @@ class PrepareProject(BaseAction): if attr["entity_type"].lower() != "show": continue key = attr["key"] + if key.startswith("avalon_"): + continue attributes_to_set[key] = { "label": attr["label"], "object": attr, diff --git a/pype/ftrack/actions/action_seed.py b/pype/ftrack/actions/action_seed.py index cf0a4b0445..1238e73e72 100644 --- a/pype/ftrack/actions/action_seed.py +++ b/pype/ftrack/actions/action_seed.py @@ -9,7 +9,7 @@ class SeedDebugProject(BaseAction): #: Action identifier. identifier = "seed.debug.project" #: Action label. - label = "SeedDebugProject" + label = "Seed Debug Project" #: Action description. description = "Description" #: priority @@ -265,6 +265,15 @@ class SeedDebugProject(BaseAction): def create_assets(self, project, asset_count): self.log.debug("*** Creating assets:") + try: + asset_count = int(asset_count) + except ValueError: + asset_count = 0 + + if asset_count <= 0: + self.log.debug("No assets to create") + return + main_entity = self.session.create("Folder", { "name": "Assets", "parent": project @@ -305,6 +314,31 @@ class SeedDebugProject(BaseAction): def create_shots(self, project, seq_count, shots_count): self.log.debug("*** Creating shots:") + + # Convert counts to integers + try: + seq_count = int(seq_count) + except ValueError: + seq_count = 0 + + try: + shots_count = int(shots_count) + except ValueError: + shots_count = 0 + + # Check if both are higher than 0 + missing = [] + if seq_count <= 0: + missing.append("sequences") + + if shots_count <= 0: + missing.append("shots") + + if missing: + self.log.debug("No {} to create".format(" and ".join(missing))) + return + + # Create Folder "Shots" main_entity = self.session.create("Folder", { "name": "Shots", "parent": project diff --git a/pype/ftrack/actions/action_sync_to_avalon.py b/pype/ftrack/actions/action_sync_to_avalon.py index 3ddcc1c794..d2fcfb372f 100644 --- a/pype/ftrack/actions/action_sync_to_avalon.py +++ b/pype/ftrack/actions/action_sync_to_avalon.py @@ -1,2153 +1,9 @@ import os -import collections -import re -import queue import time -import toml import traceback -from bson.objectid import ObjectId -from bson.errors import InvalidId -from pymongo import UpdateOne - -import avalon from pype.ftrack import BaseAction -from pype.ftrack.lib.io_nonsingleton import DbConnector -import ftrack_api -from ftrack_api import session as fa_session -from pypeapp import Anatomy - - -class SyncEntitiesFactory: - dbcon = DbConnector() - - project_query = ( - "select full_name, name, custom_attributes" - ", project_schema._task_type_schema.types.name" - " from Project where full_name is \"{}\"" - ) - entities_query = ( - "select id, name, parent_id, link" - " from TypedContext where project_id is \"{}\"" - ) - ignore_custom_attr_key = "avalon_ignore_sync" - id_cust_attr = "avalon_mongo_id" - - entity_schemas = { - "project": "avalon-core:project-2.0", - "asset": "avalon-core:asset-3.0", - "config": "avalon-core:config-1.0" - } - - report_splitter = {"type": "label", "value": "---"} - - def __init__(self, log_obj, _session, project_full_name): - self.log = log_obj - self.session = ftrack_api.Session( - server_url=_session.server_url, - api_key=_session.api_key, - api_user=_session.api_user, - auto_connect_event_hub=True - ) - - self.cancel_auto_sync = False - - self.schema_patterns = {} - self.duplicates = {} - self.failed_regex = {} - self.tasks_failed_regex = collections.defaultdict(list) - self.report_items = { - "info": collections.defaultdict(list), - "warning": collections.defaultdict(list), - "error": collections.defaultdict(list) - } - - self.create_list = [] - self.recreated_ftrack_ents = {} - self.updates = collections.defaultdict(dict) - - self._avalon_ents_by_id = None - self._avalon_ents_by_ftrack_id = None - self._avalon_ents_by_name = None - self._avalon_ents_by_parent_id = None - - self._avalon_archived_ents = None - self._avalon_archived_by_id = None - self._avalon_archived_by_parent_id = None - self._avalon_archived_by_name = None - - self._subsets_by_parent_id = None - self._changeability_by_mongo_id = None - - self.all_filtered_entities = {} - # self.all_filtered_ids = [] - self.filtered_ids = [] - self.not_selected_ids = [] - - self._ent_pats_by_ftrack_id = {} - - # Get Ftrack project - ft_project = self.session.query( - self.project_query.format(project_full_name) - ).one() - ft_project_id = ft_project["id"] - - # Skip if project is ignored - if ft_project["custom_attributes"].get( - self.ignore_custom_attr_key - ) is True: - msg = ( - "Project \"{}\" has set `Ignore Sync` custom attribute to True" - ).format(project_full_name) - self.log.warning(msg) - return {"success": False, "message": msg} - - # Check if `avalon_mongo_id` custom attribute exist or is accessible - if self.id_cust_attr not in ft_project["custom_attributes"]: - items = [] - items.append({ - "type": "label", - "value": "# Can't access Custom attribute <{}>".format( - self.id_cust_attr - ) - }) - items.append({ - "type": "label", - "value": ( - "

- Check if user \"{}\" has permissions" - " to access the Custom attribute

" - ).format(_session.api_key) - }) - items.append({ - "type": "label", - "value": "

- Check if the Custom attribute exist

" - }) - return { - "items": items, - "title": "Synchronization failed", - "success": False, - "message": "Synchronization failed" - } - - # Find all entities in project - all_project_entities = self.session.query( - self.entities_query.format(ft_project_id) - ).all() - - # Store entities by `id` and `parent_id` - entities_dict = collections.defaultdict(lambda: { - "children": list(), - "parent_id": None, - "entity": None, - "entity_type": None, - "name": None, - "custom_attributes": {}, - "hier_attrs": {}, - "avalon_attrs": {}, - "tasks": [] - }) - - for entity in all_project_entities: - parent_id = entity["parent_id"] - entity_type = entity.entity_type - entity_type_low = entity_type.lower() - if entity_type_low == "task": - entities_dict[parent_id]["tasks"].append(entity["name"]) - continue - - entity_id = entity["id"] - entities_dict[entity_id].update({ - "entity": entity, - "parent_id": parent_id, - "entity_type": entity_type_low, - "entity_type_orig": entity_type, - "name": entity["name"] - }) - entities_dict[parent_id]["children"].append(entity_id) - - entities_dict[ft_project_id]["entity"] = ft_project - entities_dict[ft_project_id]["entity_type"] = ( - ft_project.entity_type.lower() - ) - entities_dict[ft_project_id]["entity_type_orig"] = ( - ft_project.entity_type - ) - entities_dict[ft_project_id]["name"] = ft_project["full_name"] - - self.ft_project_id = ft_project_id - self.entities_dict = entities_dict - - @property - def avalon_ents_by_id(self): - if self._avalon_ents_by_id is None: - self._avalon_ents_by_id = {} - for entity in self.avalon_entities: - self._avalon_ents_by_id[str(entity["_id"])] = entity - - return self._avalon_ents_by_id - - @property - def avalon_ents_by_ftrack_id(self): - if self._avalon_ents_by_ftrack_id is None: - self._avalon_ents_by_ftrack_id = {} - for entity in self.avalon_entities: - key = entity.get("data", {}).get("ftrackId") - if not key: - continue - self._avalon_ents_by_ftrack_id[key] = str(entity["_id"]) - - return self._avalon_ents_by_ftrack_id - - @property - def avalon_ents_by_name(self): - if self._avalon_ents_by_name is None: - self._avalon_ents_by_name = {} - for entity in self.avalon_entities: - self._avalon_ents_by_name[entity["name"]] = str(entity["_id"]) - - return self._avalon_ents_by_name - - @property - def avalon_ents_by_parent_id(self): - if self._avalon_ents_by_parent_id is None: - self._avalon_ents_by_parent_id = collections.defaultdict(list) - for entity in self.avalon_entities: - parent_id = entity["data"]["visualParent"] - if parent_id is not None: - parent_id = str(parent_id) - self._avalon_ents_by_parent_id[parent_id].append(entity) - - return self._avalon_ents_by_parent_id - - @property - def avalon_archived_ents(self): - if self._avalon_archived_ents is None: - self._avalon_archived_ents = [ - ent for ent in self.dbcon.find({"type": "archived_asset"}) - ] - return self._avalon_archived_ents - - @property - def avalon_archived_by_name(self): - if self._avalon_archived_by_name is None: - self._avalon_archived_by_name = collections.defaultdict(list) - for ent in self.avalon_archived_ents: - self._avalon_archived_by_name[ent["name"]].append(ent) - return self._avalon_archived_by_name - - @property - def avalon_archived_by_id(self): - if self._avalon_archived_by_id is None: - self._avalon_archived_by_id = { - str(ent["_id"]): ent for ent in self.avalon_archived_ents - } - return self._avalon_archived_by_id - - @property - def avalon_archived_by_parent_id(self): - if self._avalon_archived_by_parent_id is None: - self._avalon_archived_by_parent_id = collections.defaultdict(list) - for entity in self.avalon_archived_ents: - parent_id = entity["data"]["visualParent"] - if parent_id is not None: - parent_id = str(parent_id) - self._avalon_archived_by_parent_id[parent_id].append(entity) - - return self._avalon_archived_by_parent_id - - @property - def subsets_by_parent_id(self): - if self._subsets_by_parent_id is None: - self._subsets_by_parent_id = collections.defaultdict(list) - for subset in self.dbcon.find({"type": "subset"}): - self._subsets_by_parent_id[str(subset["parent"])].append( - subset - ) - - return self._subsets_by_parent_id - - @property - def changeability_by_mongo_id(self): - if self._changeability_by_mongo_id is None: - self._changeability_by_mongo_id = collections.defaultdict( - lambda: True - ) - self._changeability_by_mongo_id[self.avalon_project_id] = False - self._bubble_changeability(list(self.subsets_by_parent_id.keys())) - return self._changeability_by_mongo_id - - @property - def all_ftrack_names(self): - return [ - ent_dict["name"] for ent_dict in self.entities_dict.values() if ( - ent_dict.get("name") - ) - ] - - def duplicity_regex_check(self): - self.log.debug("* Checking duplicities and invalid symbols") - # Duplicity and regex check - entity_ids_by_name = {} - duplicates = [] - failed_regex = [] - task_names = {} - for ftrack_id, entity_dict in self.entities_dict.items(): - regex_check = True - name = entity_dict["name"] - entity_type = entity_dict["entity_type"] - # Tasks must be checked too - for task_name in entity_dict["tasks"]: - passed = task_names.get(task_name) - if passed is None: - passed = self.check_regex(task_name, "task") - task_names[task_name] = passed - - if not passed: - self.tasks_failed_regex[task_name].append(ftrack_id) - - if name in entity_ids_by_name: - duplicates.append(name) - else: - entity_ids_by_name[name] = [] - regex_check = self.check_regex(name, entity_type) - - entity_ids_by_name[name].append(ftrack_id) - if not regex_check: - failed_regex.append(name) - - for name in failed_regex: - self.failed_regex[name] = entity_ids_by_name[name] - - for name in duplicates: - self.duplicates[name] = entity_ids_by_name[name] - - self.filter_by_duplicate_regex() - - def check_regex(self, name, entity_type, in_schema=None): - schema_name = "asset-3.0" - if in_schema: - schema_name = in_schema - elif entity_type == "project": - schema_name = "project-2.0" - elif entity_type == "task": - schema_name = "task" - - name_pattern = self.schema_patterns.get(schema_name) - if not name_pattern: - default_pattern = "^[a-zA-Z0-9_.]*$" - schema_obj = avalon.schema._cache.get(schema_name + ".json") - if not schema_obj: - name_pattern = default_pattern - else: - name_pattern = schema_obj.get( - "properties", {}).get( - "name", {}).get( - "pattern", default_pattern - ) - self.schema_patterns[schema_name] = name_pattern - - if re.match(name_pattern, name): - return True - return False - - def filter_by_duplicate_regex(self): - filter_queue = queue.Queue() - failed_regex_msg = "{} - Entity has invalid symbols in the name" - duplicate_msg = "There are multiple entities with the name: \"{}\":" - - for ids in self.failed_regex.values(): - for id in ids: - ent_path = self.get_ent_path(id) - self.log.warning(failed_regex_msg.format(ent_path)) - filter_queue.put(id) - - for name, ids in self.duplicates.items(): - self.log.warning(duplicate_msg.format(name)) - for id in ids: - ent_path = self.get_ent_path(id) - self.log.warning(ent_path) - filter_queue.put(id) - - filtered_ids = [] - while not filter_queue.empty(): - ftrack_id = filter_queue.get() - if ftrack_id in filtered_ids: - continue - - entity_dict = self.entities_dict.pop(ftrack_id, {}) - if not entity_dict: - continue - - self.all_filtered_entities[ftrack_id] = entity_dict - parent_id = entity_dict.get("parent_id") - if parent_id and parent_id in self.entities_dict: - if ftrack_id in self.entities_dict[parent_id]["children"]: - self.entities_dict[parent_id]["children"].remove(ftrack_id) - - filtered_ids.append(ftrack_id) - for child_id in entity_dict.get("children", []): - filter_queue.put(child_id) - - # self.all_filtered_ids.extend(filtered_ids) - - for name, ids in self.tasks_failed_regex.items(): - for id in ids: - if id not in self.entities_dict: - continue - self.entities_dict[id]["tasks"].remove(name) - ent_path = self.get_ent_path(id) - self.log.warning(failed_regex_msg.format( - "/".join([ent_path, name]) - )) - - def filter_by_ignore_sync(self): - # skip filtering if `ignore_sync` attribute do not exist - if self.entities_dict[self.ft_project_id]["avalon_attrs"].get( - self.ignore_custom_attr_key, "_notset_" - ) == "_notset_": - return - - self.filter_queue = queue.Queue() - self.filter_queue.put((self.ft_project_id, False)) - while not self.filter_queue.empty(): - parent_id, remove = self.filter_queue.get() - if remove: - parent_dict = self.entities_dict.pop(parent_id, {}) - self.all_filtered_entities[parent_id] = parent_dict - self.filtered_ids.append(parent_id) - else: - parent_dict = self.entities_dict.get(parent_id, {}) - - for child_id in parent_dict.get("children", []): - # keep original `remove` value for all childs - _remove = (remove is True) - if not _remove: - if self.entities_dict[child_id]["avalon_attrs"].get( - self.ignore_custom_attr_key - ): - self.entities_dict[parent_id]["children"].remove( - child_id - ) - _remove = True - self.filter_queue.put((child_id, _remove)) - - # self.all_filtered_ids.extend(self.filtered_ids) - - def filter_by_selection(self, event): - # BUGGY!!!! cause that entities are in deleted list - # TODO may be working when filtering happen after preparations - # - But this part probably does not have any functional reason - # - Time of synchronization probably won't be changed much - selected_ids = [] - for entity in event["data"]["selection"]: - # Skip if project is in selection - if entity["entityType"] == "show": - return - selected_ids.append(entity["entityId"]) - - sync_ids = [self.ft_project_id] - parents_queue = queue.Queue() - children_queue = queue.Queue() - for id in selected_ids: - # skip if already filtered with ignore sync custom attribute - if id in self.filtered_ids: - continue - - parents_queue.put(id) - children_queue.put(id) - - while not parents_queue.empty(): - id = parents_queue.get() - while True: - # Stops when parent is in sync_ids - if id in self.filtered_ids or id in sync_ids or id is None: - break - sync_ids.append(id) - id = self.entities_dict[id]["parent_id"] - - while not children_queue.empty(): - parent_id = children_queue.get() - for child_id in self.entities_dict[parent_id]["children"]: - if child_id in sync_ids or child_id in self.filtered_ids: - continue - sync_ids.append(child_id) - children_queue.put(child_id) - - # separate not selected and to process entities - for key, value in self.entities_dict.items(): - if key not in sync_ids: - self.not_selected_ids.append(key) - - for id in self.not_selected_ids: - # pop from entities - value = self.entities_dict.pop(id) - # remove entity from parent's children - parent_id = value["parent_id"] - if parent_id not in sync_ids: - continue - - self.entities_dict[parent_id]["children"].remove(id) - - def set_cutom_attributes(self): - self.log.debug("* Preparing custom attributes") - # Get custom attributes and values - custom_attrs, hier_attrs = self.get_avalon_attr(True) - ent_types = self.session.query("select id, name from ObjectType").all() - ent_types_by_name = { - ent_type["name"]: ent_type["id"] for ent_type in ent_types - } - - attrs = set() - # store default values per entity type - attrs_per_entity_type = collections.defaultdict(dict) - avalon_attrs = collections.defaultdict(dict) - # store also custom attribute configuration id for future use (create) - attrs_per_entity_type_ca_id = collections.defaultdict(dict) - avalon_attrs_ca_id = collections.defaultdict(dict) - - for cust_attr in custom_attrs: - key = cust_attr["key"] - attrs.add(key) - ca_ent_type = cust_attr["entity_type"] - if key.startswith("avalon_"): - if ca_ent_type == "show": - avalon_attrs[ca_ent_type][key] = cust_attr["default"] - avalon_attrs_ca_id[ca_ent_type][key] = cust_attr["id"] - else: - obj_id = cust_attr["object_type_id"] - avalon_attrs[obj_id][key] = cust_attr["default"] - avalon_attrs_ca_id[obj_id][key] = cust_attr["id"] - continue - - if ca_ent_type == "show": - attrs_per_entity_type[ca_ent_type][key] = cust_attr["default"] - attrs_per_entity_type_ca_id[ca_ent_type][key] = cust_attr["id"] - else: - obj_id = cust_attr["object_type_id"] - attrs_per_entity_type[obj_id][key] = cust_attr["default"] - attrs_per_entity_type_ca_id[obj_id][key] = cust_attr["id"] - - obj_id_ent_type_map = {} - sync_ids = [] - for entity_id, entity_dict in self.entities_dict.items(): - sync_ids.append(entity_id) - entity_type = entity_dict["entity_type"] - entity_type_orig = entity_dict["entity_type_orig"] - - if entity_type == "project": - attr_key = "show" - else: - map_key = obj_id_ent_type_map.get(entity_type_orig) - if not map_key: - # Put space between capitals - # (e.g. 'AssetBuild' -> 'Asset Build') - map_key = re.sub( - r"(\w)([A-Z])", r"\1 \2", entity_type_orig - ) - obj_id_ent_type_map[entity_type_orig] = map_key - - # Get object id of entity type - attr_key = ent_types_by_name.get(map_key) - - # Backup soluction when id is not found by prequeried objects - if not attr_key: - query = "ObjectType where name is \"{}\"".format(map_key) - attr_key = self.session.query(query).one()["id"] - ent_types_by_name[map_key] = attr_key - - prepared_attrs = attrs_per_entity_type.get(attr_key) - prepared_avalon_attr = avalon_attrs.get(attr_key) - prepared_attrs_ca_id = attrs_per_entity_type_ca_id.get(attr_key) - prepared_avalon_attr_ca_id = avalon_attrs_ca_id.get(attr_key) - if prepared_attrs: - self.entities_dict[entity_id]["custom_attributes"] = ( - prepared_attrs.copy() - ) - if prepared_attrs_ca_id: - self.entities_dict[entity_id]["custom_attributes_id"] = ( - prepared_attrs_ca_id.copy() - ) - if prepared_avalon_attr: - self.entities_dict[entity_id]["avalon_attrs"] = ( - prepared_avalon_attr.copy() - ) - if prepared_avalon_attr_ca_id: - self.entities_dict[entity_id]["avalon_attrs_id"] = ( - prepared_avalon_attr_ca_id.copy() - ) - - # TODO query custom attributes by entity_id - entity_ids_joined = ", ".join([ - "\"{}\"".format(id) for id in sync_ids - ]) - attributes_joined = ", ".join([ - "\"{}\"".format(name) for name in attrs - ]) - - cust_attr_query = ( - "select value, entity_id from ContextCustomAttributeValue " - "where entity_id in ({}) and configuration.key in ({})" - ) - call_expr = [{ - "action": "query", - "expression": cust_attr_query.format( - entity_ids_joined, attributes_joined - ) - }] - if hasattr(self.session, "_call"): - [values] = self.session._call(call_expr) - else: - [values] = self.session.call(call_expr) - - for value in values["data"]: - entity_id = value["entity_id"] - key = value["configuration"]["key"] - store_key = "custom_attributes" - if key.startswith("avalon_"): - store_key = "avalon_attrs" - self.entities_dict[entity_id][store_key][key] = value["value"] - - # process hierarchical attributes - self.set_hierarchical_attribute(hier_attrs, sync_ids) - - def set_hierarchical_attribute(self, hier_attrs, sync_ids): - # collect all hierarchical attribute keys - # and prepare default values to project - attribute_names = [] - for attr in hier_attrs: - key = attr["key"] - attribute_names.append(key) - - store_key = "hier_attrs" - if key.startswith("avalon_"): - store_key = "avalon_attrs" - - self.entities_dict[self.ft_project_id][store_key][key] = ( - attr["default"] - ) - - # Prepare dict with all hier keys and None values - prepare_dict = {} - prepare_dict_avalon = {} - for attr in attribute_names: - if attr.startswith("avalon_"): - prepare_dict_avalon[attr] = None - else: - prepare_dict[attr] = None - - for id, entity_dict in self.entities_dict.items(): - # Skip project because has stored defaults at the moment - if entity_dict["entity_type"] == "project": - continue - entity_dict["hier_attrs"] = prepare_dict.copy() - for key, val in prepare_dict_avalon.items(): - entity_dict["avalon_attrs"][key] = val - - # Prepare values to query - entity_ids_joined = ", ".join([ - "\"{}\"".format(id) for id in sync_ids - ]) - attributes_joined = ", ".join([ - "\"{}\"".format(name) for name in attribute_names - ]) - call_expr = [{ - "action": "query", - "expression": ( - "select value, entity_id from ContextCustomAttributeValue " - "where entity_id in ({}) and configuration.key in ({})" - ).format(entity_ids_joined, attributes_joined) - }] - if hasattr(self.session, "_call"): - [values] = self.session._call(call_expr) - else: - [values] = self.session.call(call_expr) - - avalon_hier = [] - for value in values["data"]: - if value["value"] is None: - continue - entity_id = value["entity_id"] - key = value["configuration"]["key"] - store_key = "hier_attrs" - if key.startswith("avalon_"): - store_key = "avalon_attrs" - avalon_hier.append(key) - self.entities_dict[entity_id][store_key][key] = value["value"] - - # Get dictionary with not None hierarchical values to pull to childs - top_id = self.ft_project_id - project_values = {} - for key, value in self.entities_dict[top_id]["hier_attrs"].items(): - if value is not None: - project_values[key] = value - - for key in avalon_hier: - value = self.entities_dict[top_id]["avalon_attrs"][key] - if value is not None: - project_values[key] = value - - hier_down_queue = queue.Queue() - hier_down_queue.put((project_values, top_id)) - - while not hier_down_queue.empty(): - hier_values, parent_id = hier_down_queue.get() - for child_id in self.entities_dict[parent_id]["children"]: - _hier_values = hier_values.copy() - for name in attribute_names: - store_key = "hier_attrs" - if name.startswith("avalon_"): - store_key = "avalon_attrs" - value = self.entities_dict[child_id][store_key][name] - if value is not None: - _hier_values[name] = value - - self.entities_dict[child_id]["hier_attrs"].update(_hier_values) - hier_down_queue.put((_hier_values, child_id)) - - def remove_from_archived(self, mongo_id): - entity = self.avalon_archived_by_id.pop(mongo_id, None) - if not entity: - return - - if self._avalon_archived_ents is not None: - if entity in self._avalon_archived_ents: - self._avalon_archived_ents.remove(entity) - - if self._avalon_archived_by_name is not None: - name = entity["name"] - if name in self._avalon_archived_by_name: - name_ents = self._avalon_archived_by_name[name] - if entity in name_ents: - if len(name_ents) == 1: - self._avalon_archived_by_name.pop(name) - else: - self._avalon_archived_by_name[name].remove(entity) - - # TODO use custom None instead of __NOTSET__ - if self._avalon_archived_by_parent_id is not None: - parent_id = entity.get("data", {}).get( - "visualParent", "__NOTSET__" - ) - if parent_id is not None: - parent_id = str(parent_id) - - if parent_id in self._avalon_archived_by_parent_id: - parent_list = self._avalon_archived_by_parent_id[parent_id] - if entity not in parent_list: - self._avalon_archived_by_parent_id[parent_id].remove( - entity - ) - - def prepare_ftrack_ent_data(self): - not_set_ids = [] - for id, entity_dict in self.entities_dict.items(): - entity = entity_dict["entity"] - if entity is None: - not_set_ids.append(id) - continue - - self.entities_dict[id]["final_entity"] = {} - self.entities_dict[id]["final_entity"]["name"] = ( - entity_dict["name"] - ) - data = {} - data["ftrackId"] = entity["id"] - data["entityType"] = entity_dict["entity_type_orig"] - - for key, val in entity_dict.get("custom_attributes", []).items(): - data[key] = val - - for key, val in entity_dict.get("hier_attrs", []).items(): - data[key] = val - - if id == self.ft_project_id: - data["code"] = entity["name"] - self.entities_dict[id]["final_entity"]["data"] = data - self.entities_dict[id]["final_entity"]["type"] = "project" - - proj_schema = entity["project_schema"] - task_types = proj_schema["_task_type_schema"]["types"] - self.entities_dict[id]["final_entity"]["config"] = { - "tasks": [{"name": tt["name"]} for tt in task_types], - "apps": self.get_project_apps(data) - } - continue - - ent_path_items = [ent["name"] for ent in entity["link"]] - parents = ent_path_items[1:len(ent_path_items)-1:] - hierarchy = "" - if len(parents) > 0: - hierarchy = os.path.sep.join(parents) - - data["parents"] = parents - data["hierarchy"] = hierarchy - data["tasks"] = self.entities_dict[id].pop("tasks", []) - self.entities_dict[id]["final_entity"]["data"] = data - self.entities_dict[id]["final_entity"]["type"] = "asset" - - if not_set_ids: - self.log.debug(( - "- Debug information: Filtering bug, there are empty dicts" - "in entities dict (functionality should not be affected) <{}>" - ).format("| ".join(not_set_ids))) - for id in not_set_ids: - self.entities_dict.pop(id) - - def get_project_apps(self, proj_data): - apps = [] - missing_toml_msg = "Missing config file for application" - error_msg = ( - "Unexpected error happend during preparation of application" - ) - for app in proj_data.get("applications"): - try: - toml_path = avalon.lib.which_app(app) - # TODO report - if not toml_path: - self.log.warning(missing_toml_msg + '"{}"'.format(app)) - self.report_items["warning"][missing_toml_msg].append(app) - continue - - apps.append({ - "name": app, - "label": toml.load(toml_path)["label"] - }) - except Exception: - # TODO report - self.report_items["warning"][error_msg].append(app) - self.log.warning(( - "Error has happened during preparing application \"{}\"" - ).format(app), exc_info=True) - return apps - - def get_ent_path(self, ftrack_id): - ent_path = self._ent_pats_by_ftrack_id.get(ftrack_id) - if not ent_path: - entity = self.entities_dict[ftrack_id]["entity"] - ent_path = "/".join( - [ent["name"] for ent in entity["link"]] - ) - self._ent_pats_by_ftrack_id[ftrack_id] = ent_path - - return ent_path - - def prepare_avalon_entities(self, ft_project_name): - self.log.debug(( - "* Preparing avalon entities " - "(separate to Create, Update and Deleted groups)" - )) - # Avalon entities - self.dbcon.install() - self.dbcon.Session["AVALON_PROJECT"] = ft_project_name - avalon_project = self.dbcon.find_one({"type": "project"}) - avalon_entities = self.dbcon.find({"type": "asset"}) - self.avalon_project = avalon_project - self.avalon_entities = avalon_entities - - ftrack_avalon_mapper = {} - avalon_ftrack_mapper = {} - create_ftrack_ids = [] - update_ftrack_ids = [] - - same_mongo_id = [] - all_mongo_ids = {} - for ftrack_id, entity_dict in self.entities_dict.items(): - mongo_id = entity_dict["avalon_attrs"].get(self.id_cust_attr) - if not mongo_id: - continue - if mongo_id in all_mongo_ids: - same_mongo_id.append(mongo_id) - else: - all_mongo_ids[mongo_id] = [] - all_mongo_ids[mongo_id].append(ftrack_id) - - if avalon_project: - mongo_id = str(avalon_project["_id"]) - ftrack_avalon_mapper[self.ft_project_id] = mongo_id - avalon_ftrack_mapper[mongo_id] = self.ft_project_id - update_ftrack_ids.append(self.ft_project_id) - else: - create_ftrack_ids.append(self.ft_project_id) - - # make it go hierarchically - prepare_queue = queue.Queue() - - for child_id in self.entities_dict[self.ft_project_id]["children"]: - prepare_queue.put(child_id) - - while not prepare_queue.empty(): - ftrack_id = prepare_queue.get() - for child_id in self.entities_dict[ftrack_id]["children"]: - prepare_queue.put(child_id) - - entity_dict = self.entities_dict[ftrack_id] - ent_path = self.get_ent_path(ftrack_id) - - mongo_id = entity_dict["avalon_attrs"].get(self.id_cust_attr) - av_ent_by_mongo_id = self.avalon_ents_by_id.get(mongo_id) - if av_ent_by_mongo_id: - av_ent_ftrack_id = av_ent_by_mongo_id.get("data", {}).get( - "ftrackId" - ) - is_right = False - else_match_better = False - if av_ent_ftrack_id and av_ent_ftrack_id == ftrack_id: - is_right = True - - elif mongo_id not in same_mongo_id: - is_right = True - - else: - ftrack_ids_with_same_mongo = all_mongo_ids[mongo_id] - for _ftrack_id in ftrack_ids_with_same_mongo: - if _ftrack_id == av_ent_ftrack_id: - continue - - _entity_dict = self.entities_dict[_ftrack_id] - _mongo_id = _entity_dict["avalon_attrs"][ - self.id_cust_attr - ] - _av_ent_by_mongo_id = self.avalon_ents_by_id.get( - _mongo_id - ) - _av_ent_ftrack_id = _av_ent_by_mongo_id.get( - "data", {} - ).get("ftrackId") - if _av_ent_ftrack_id == ftrack_id: - else_match_better = True - break - - if not is_right and not else_match_better: - entity = entity_dict["entity"] - ent_path_items = [ent["name"] for ent in entity["link"]] - parents = ent_path_items[1:len(ent_path_items)-1:] - av_parents = av_ent_by_mongo_id["data"]["parents"] - if av_parents == parents: - is_right = True - else: - name = entity_dict["name"] - av_name = av_ent_by_mongo_id["name"] - if name == av_name: - is_right = True - - if is_right: - self.log.debug( - "Existing (by MongoID) <{}>".format(ent_path) - ) - ftrack_avalon_mapper[ftrack_id] = mongo_id - avalon_ftrack_mapper[mongo_id] = ftrack_id - update_ftrack_ids.append(ftrack_id) - continue - - mongo_id = self.avalon_ents_by_ftrack_id.get(ftrack_id) - if not mongo_id: - mongo_id = self.avalon_ents_by_name.get(entity_dict["name"]) - if mongo_id: - self.log.debug( - "Existing (by matching name) <{}>".format(ent_path) - ) - else: - self.log.debug( - "Existing (by FtrackID in mongo) <{}>".format(ent_path) - ) - - if mongo_id: - ftrack_avalon_mapper[ftrack_id] = mongo_id - avalon_ftrack_mapper[mongo_id] = ftrack_id - update_ftrack_ids.append(ftrack_id) - continue - - self.log.debug("New <{}>".format(ent_path)) - create_ftrack_ids.append(ftrack_id) - - deleted_entities = [] - for mongo_id in self.avalon_ents_by_id: - if mongo_id in avalon_ftrack_mapper: - continue - deleted_entities.append(mongo_id) - - av_ent = self.avalon_ents_by_id[mongo_id] - av_ent_path_items = [p for p in av_ent["data"]["parents"]] - av_ent_path_items.append(av_ent["name"]) - self.log.debug("Deleted <{}>".format("/".join(av_ent_path_items))) - - self.ftrack_avalon_mapper = ftrack_avalon_mapper - self.avalon_ftrack_mapper = avalon_ftrack_mapper - self.create_ftrack_ids = create_ftrack_ids - self.update_ftrack_ids = update_ftrack_ids - self.deleted_entities = deleted_entities - - self.log.debug(( - "Ftrack -> Avalon comparison: New <{}> " - "| Existing <{}> | Deleted <{}>" - ).format( - len(create_ftrack_ids), - len(update_ftrack_ids), - len(deleted_entities) - )) - - def filter_with_children(self, ftrack_id): - if ftrack_id not in self.entities_dict: - return - ent_dict = self.entities_dict[ftrack_id] - parent_id = ent_dict["parent_id"] - self.entities_dict[parent_id]["children"].remove(ftrack_id) - - children_queue = queue.Queue() - children_queue.put(ftrack_id) - while not children_queue.empty(): - _ftrack_id = children_queue.get() - entity_dict = self.entities_dict.pop(_ftrack_id, {"children": []}) - for child_id in entity_dict["children"]: - children_queue.put(child_id) - - def prepare_changes(self): - self.log.debug("* Preparing changes for avalon/ftrack") - hierarchy_changing_ids = [] - ignore_keys = collections.defaultdict(list) - - update_queue = queue.Queue() - for ftrack_id in self.update_ftrack_ids: - update_queue.put(ftrack_id) - - while not update_queue.empty(): - ftrack_id = update_queue.get() - if ftrack_id == self.ft_project_id: - changes = self.prepare_project_changes() - if changes: - self.updates[self.avalon_project_id] = changes - continue - - ftrack_ent_dict = self.entities_dict[ftrack_id] - - # *** check parents - parent_check = False - - ftrack_parent_id = ftrack_ent_dict["parent_id"] - avalon_id = self.ftrack_avalon_mapper[ftrack_id] - avalon_entity = self.avalon_ents_by_id[avalon_id] - avalon_parent_id = avalon_entity["data"]["visualParent"] - if avalon_parent_id is not None: - avalon_parent_id = str(avalon_parent_id) - - ftrack_parent_mongo_id = self.ftrack_avalon_mapper[ - ftrack_parent_id - ] - - # if parent is project - if (ftrack_parent_mongo_id == avalon_parent_id) or ( - ftrack_parent_id == self.ft_project_id and - avalon_parent_id is None - ): - parent_check = True - - # check name - ftrack_name = ftrack_ent_dict["name"] - avalon_name = avalon_entity["name"] - name_check = ftrack_name == avalon_name - - # IDEAL STATE: both parent and name check passed - if parent_check and name_check: - continue - - # If entity is changeable then change values of parent or name - if self.changeability_by_mongo_id[avalon_id]: - # TODO logging - if not parent_check: - if ftrack_parent_mongo_id == str(self.avalon_project_id): - new_parent_name = self.entities_dict[ - self.ft_project_id]["name"] - new_parent_id = None - else: - new_parent_name = self.avalon_ents_by_id[ - ftrack_parent_mongo_id]["name"] - new_parent_id = ObjectId(ftrack_parent_mongo_id) - - if avalon_parent_id == str(self.avalon_project_id): - old_parent_name = self.entities_dict[ - self.ft_project_id]["name"] - else: - old_parent_name = self.avalon_ents_by_id[ - ftrack_parent_mongo_id]["name"] - - self.updates[avalon_id]["data"] = { - "visualParent": new_parent_id - } - ignore_keys[ftrack_id].append("data.visualParent") - self.log.debug(( - "Avalon entity \"{}\" changed parent \"{}\" -> \"{}\"" - ).format(avalon_name, old_parent_name, new_parent_name)) - - if not name_check: - self.updates[avalon_id]["name"] = ftrack_name - ignore_keys[ftrack_id].append("name") - self.log.debug( - "Avalon entity \"{}\" was renamed to \"{}\"".format( - avalon_name, ftrack_name - ) - ) - continue - - # parents and hierarchy must be recalculated - hierarchy_changing_ids.append(ftrack_id) - - # Parent is project if avalon_parent_id is set to None - if avalon_parent_id is None: - avalon_parent_id = str(self.avalon_project_id) - - if not name_check: - ent_path = self.get_ent_path(ftrack_id) - # TODO report - # TODO logging - self.entities_dict[ftrack_id]["name"] = avalon_name - self.entities_dict[ftrack_id]["entity"]["name"] = ( - avalon_name - ) - self.entities_dict[ftrack_id]["final_entity"]["name"] = ( - avalon_name - ) - self.log.warning("Name was changed back to {} <{}>".format( - avalon_name, ent_path - )) - self._ent_pats_by_ftrack_id.pop(ftrack_id, None) - msg = ( - " It is not possible to change" - " the name of an entity or it's parents, " - " if it already contained published data." - ) - self.report_items["warning"][msg].append(ent_path) - - # skip parent oricessing if hierarchy didn't change - if parent_check: - continue - - # Logic when parenting(hierarchy) has changed and should not - old_ftrack_parent_id = self.avalon_ftrack_mapper.get( - avalon_parent_id - ) - - # If last ftrack parent id from mongo entity exist then just - # remap paren_id on entity - if old_ftrack_parent_id: - # TODO report - # TODO logging - ent_path = self.get_ent_path(ftrack_id) - msg = ( - " It is not possible" - " to change the hierarchy of an entity or it's parents," - " if it already contained published data." - ) - self.report_items["warning"][msg].append(ent_path) - self.log.warning(( - " Entity contains published data so it was moved" - " back to it's original hierarchy <{}>" - ).format(ent_path)) - self.entities_dict[ftrack_id]["entity"]["parent_id"] = ( - old_ftrack_parent_id - ) - self.entities_dict[ftrack_id]["parent_id"] = ( - old_ftrack_parent_id - ) - self.entities_dict[old_ftrack_parent_id][ - "children" - ].append(ftrack_id) - - continue - - old_parent_ent = self.avalon_ents_by_id.get(avalon_parent_id) - if not old_parent_ent: - old_parent_ent = self.avalon_archived_by_id.get( - avalon_parent_id - ) - - # TODO report - # TODO logging - if not old_parent_ent: - self.log.warning(( - "Parent entity was not found by id" - " - Trying to find by parent name" - )) - ent_path = self.get_ent_path(ftrack_id) - - parents = avalon_entity["data"]["parents"] - parent_name = parents[-1] - matching_entity_id = None - for id, entity_dict in self.entities_dict.items(): - if entity_dict["name"] == parent_name: - matching_entity_id = id - break - - if matching_entity_id is None: - # TODO logging - # TODO report (turn off auto-sync?) - self.log.error(( - "The entity contains published data but it was moved to" - " a different place in the hierarchy and it's previous" - " parent cannot be found." - " It's impossible to solve this programmatically <{}>" - ).format(ent_path)) - msg = ( - " Hierarchy of an entity" " can't be changed due to published data and missing" - " previous parent" - ) - self.report_items["error"][msg].append(ent_path) - self.filter_with_children(ftrack_id) - continue - - matching_ent_dict = self.entities_dict.get(matching_entity_id) - match_ent_parents = matching_ent_dict.get( - "final_entity", {}).get( - "data", {}).get( - "parents", ["__NOT_SET__"] - ) - # TODO logging - # TODO report - if ( - len(match_ent_parents) >= len(parents) or - match_ent_parents[:-1] != parents - ): - ent_path = self.get_ent_path(ftrack_id) - self.log.error(( - "The entity contains published data but it was moved to" - " a different place in the hierarchy and it's previous" - " parents were moved too." - " It's impossible to solve this programmatically <{}>" - ).format(ent_path)) - msg = ( - " Hierarchy of an entity" - " can't be changed due to published data and scrambled" - "hierarchy" - ) - continue - - old_parent_ent = matching_ent_dict["final_entity"] - - parent_id = self.ft_project_id - entities_to_create = [] - # TODO logging - self.log.warning( - "Ftrack entities must be recreated because they were deleted," - " but they contain published data." - ) - - _avalon_ent = old_parent_ent - - self.updates[avalon_parent_id] = {"type": "asset"} - success = True - while True: - _vis_par = _avalon_ent["data"]["visualParent"] - _name = _avalon_ent["name"] - if _name in self.all_ftrack_names: - av_ent_path_items = _avalon_ent["data"]["parents"] - av_ent_path_items.append(_name) - av_ent_path = "/".join(av_ent_path_items) - # TODO report - # TODO logging - self.log.error(( - "Can't recreate the entity in Ftrack because an entity" " with the same name already exists in a different" - " place in the hierarchy <{}>" - ).format(av_ent_path)) - msg = ( - " Hierarchy of an entity" - " can't be changed. I contains published data and it's" " previous parent had a name, that is duplicated at a " - " different hierarchy level" - ) - self.report_items["error"][msg].append(av_ent_path) - self.filter_with_children(ftrack_id) - success = False - break - - entities_to_create.append(_avalon_ent) - if _vis_par is None: - break - - _vis_par = str(_vis_par) - _mapped = self.avalon_ftrack_mapper.get(_vis_par) - if _mapped: - parent_id = _mapped - break - - _avalon_ent = self.avalon_ents_by_id.get(_vis_par) - if not _avalon_ent: - _avalon_ent = self.avalon_archived_by_id.get(_vis_par) - - if success is False: - continue - - new_entity_id = None - for av_entity in reversed(entities_to_create): - new_entity_id = self.create_ftrack_ent_from_avalon_ent( - av_entity, parent_id - ) - update_queue.put(new_entity_id) - - if new_entity_id: - ftrack_ent_dict["entity"]["parent_id"] = new_entity_id - - if hierarchy_changing_ids: - self.reload_parents(hierarchy_changing_ids) - - for ftrack_id in self.update_ftrack_ids: - if ftrack_id == self.ft_project_id: - continue - - avalon_id = self.ftrack_avalon_mapper[ftrack_id] - avalon_entity = self.avalon_ents_by_id[avalon_id] - - avalon_attrs = self.entities_dict[ftrack_id]["avalon_attrs"] - if ( - self.id_cust_attr not in avalon_attrs or - avalon_attrs[self.id_cust_attr] != avalon_id - ): - configuration_id = self.entities_dict[ftrack_id][ - "avalon_attrs_id"][self.id_cust_attr] - - _entity_key = collections.OrderedDict({ - "configuration_id": configuration_id, - "entity_id": ftrack_id - }) - - self.session.recorded_operations.push( - fa_session.ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", - _entity_key, - "value", - fa_session.ftrack_api.symbol.NOT_SET, - avalon_id - ) - ) - # check rest of data - data_changes = self.compare_dict( - self.entities_dict[ftrack_id]["final_entity"], - avalon_entity, - ignore_keys[ftrack_id] - ) - if data_changes: - self.updates[avalon_id] = self.merge_dicts( - data_changes, - self.updates[avalon_id] - ) - - def synchronize(self): - self.log.debug("* Synchronization begins") - avalon_project_id = self.ftrack_avalon_mapper.get(self.ft_project_id) - if avalon_project_id: - self.avalon_project_id = ObjectId(avalon_project_id) - - # remove filtered ftrack ids from create/update list - for ftrack_id in self.all_filtered_entities: - if ftrack_id in self.create_ftrack_ids: - self.create_ftrack_ids.remove(ftrack_id) - elif ftrack_id in self.update_ftrack_ids: - self.update_ftrack_ids.remove(ftrack_id) - - self.log.debug("* Processing entities for archivation") - self.delete_entities() - - self.log.debug("* Processing new entities") - # Create not created entities - for ftrack_id in self.create_ftrack_ids: - # CHECK it is possible that entity was already created - # because is parent of another entity which was processed first - if ftrack_id in self.ftrack_avalon_mapper: - continue - self.create_avalon_entity(ftrack_id) - - if len(self.create_list) > 0: - self.dbcon.insert_many(self.create_list) - - self.session.commit() - - self.log.debug("* Processing entities for update") - self.prepare_changes() - self.update_entities() - self.session.commit() - - def create_avalon_entity(self, ftrack_id): - if ftrack_id == self.ft_project_id: - self.create_avalon_project() - return - - entity_dict = self.entities_dict[ftrack_id] - parent_ftrack_id = entity_dict["parent_id"] - avalon_parent = None - if parent_ftrack_id != self.ft_project_id: - avalon_parent = self.ftrack_avalon_mapper.get(parent_ftrack_id) - # if not avalon_parent: - # self.create_avalon_entity(parent_ftrack_id) - # avalon_parent = self.ftrack_avalon_mapper[parent_ftrack_id] - avalon_parent = ObjectId(avalon_parent) - - # avalon_archived_by_id avalon_archived_by_name - current_id = ( - entity_dict["avalon_attrs"].get(self.id_cust_attr) or "" - ).strip() - mongo_id = current_id - name = entity_dict["name"] - - # Check if exist archived asset in mongo - by ID - unarchive = False - unarchive_id = self.check_unarchivation(ftrack_id, mongo_id, name) - if unarchive_id is not None: - unarchive = True - mongo_id = unarchive_id - - item = entity_dict["final_entity"] - try: - new_id = ObjectId(mongo_id) - if mongo_id in self.avalon_ftrack_mapper: - new_id = ObjectId() - except InvalidId: - new_id = ObjectId() - - item["_id"] = new_id - item["parent"] = self.avalon_project_id - item["schema"] = self.entity_schemas["asset"] - item["data"]["visualParent"] = avalon_parent - - new_id_str = str(new_id) - self.ftrack_avalon_mapper[ftrack_id] = new_id_str - self.avalon_ftrack_mapper[new_id_str] = ftrack_id - - self._avalon_ents_by_id[new_id_str] = item - self._avalon_ents_by_ftrack_id[ftrack_id] = new_id_str - self._avalon_ents_by_name[item["name"]] = new_id_str - - if current_id != new_id_str: - # store mongo id to ftrack entity - configuration_id = self.entities_dict[ftrack_id][ - "avalon_attrs_id" - ][self.id_cust_attr] - _entity_key = collections.OrderedDict({ - "configuration_id": configuration_id, - "entity_id": ftrack_id - }) - - self.session.recorded_operations.push( - fa_session.ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", - _entity_key, - "value", - fa_session.ftrack_api.symbol.NOT_SET, - new_id_str - ) - ) - - if unarchive is False: - self.create_list.append(item) - return - # If unarchive then replace entity data in database - self.dbcon.replace_one({"_id": new_id}, item) - self.remove_from_archived(mongo_id) - av_ent_path_items = item["data"]["parents"] - av_ent_path_items.append(item["name"]) - av_ent_path = "/".join(av_ent_path_items) - self.log.debug("Entity was unarchived <{}>".format(av_ent_path)) - - def check_unarchivation(self, ftrack_id, mongo_id, name): - archived_by_id = self.avalon_archived_by_id.get(mongo_id) - archived_by_name = self.avalon_archived_by_name.get(name) - - # if not found in archived then skip - if not archived_by_id and not archived_by_name: - return None - - entity_dict = self.entities_dict[ftrack_id] - - if archived_by_id: - # if is changeable then unarchive (nothing to check here) - if self.changeability_by_mongo_id[mongo_id]: - return mongo_id - - # TODO replace `__NOTSET__` with custom None constant - archived_parent_id = archived_by_id["data"].get( - "visualParent", "__NOTSET__" - ) - archived_parents = archived_by_id["data"].get("parents") - archived_name = archived_by_id["name"] - - if ( - archived_name != entity_dict["name"] or - archived_parents != entity_dict["final_entity"]["data"][ - "parents" - ] - ): - return None - - return mongo_id - - # First check if there is any that have same parents - for archived in archived_by_name: - mongo_id = str(archived["_id"]) - archived_parents = archived.get("data", {}).get("parents") - if ( - archived_parents == entity_dict["final_entity"]["data"][ - "parents" - ] - ): - return mongo_id - - # Secondly try to find more close to current ftrack entity - first_changeable = None - for archived in archived_by_name: - mongo_id = str(archived["_id"]) - if not self.changeability_by_mongo_id[mongo_id]: - continue - - if first_changeable is None: - first_changeable = mongo_id - - ftrack_parent_id = entity_dict["parent_id"] - map_ftrack_parent_id = self.ftrack_avalon_mapper.get( - ftrack_parent_id - ) - - # TODO replace `__NOTSET__` with custom None constant - archived_parent_id = archived.get("data", {}).get( - "visualParent", "__NOTSET__" - ) - if archived_parent_id is not None: - archived_parent_id = str(archived_parent_id) - - # skip if parent is archived - How this should be possible? - parent_entity = self.avalon_ents_by_id.get(archived_parent_id) - if ( - parent_entity and ( - map_ftrack_parent_id is not None and - map_ftrack_parent_id == str(parent_entity["_id"]) - ) - ): - return mongo_id - # Last return first changeable with same name (or None) - return first_changeable - - def create_avalon_project(self): - project_item = self.entities_dict[self.ft_project_id]["final_entity"] - mongo_id = ( - self.entities_dict[self.ft_project_id]["avalon_attrs"].get( - self.id_cust_attr - ) or "" - ).strip() - - try: - new_id = ObjectId(mongo_id) - except InvalidId: - new_id = ObjectId() - - project_item["_id"] = new_id - project_item["parent"] = None - project_item["schema"] = self.entity_schemas["project"] - project_item["config"]["schema"] = self.entity_schemas["config"] - project_item["config"]["template"] = self.get_avalon_project_template() - - self.ftrack_avalon_mapper[self.ft_project_id] = new_id - self.avalon_ftrack_mapper[new_id] = self.ft_project_id - - self.avalon_project_id = new_id - - self._avalon_ents_by_id[str(new_id)] = project_item - self._avalon_ents_by_ftrack_id[self.ft_project_id] = str(new_id) - self._avalon_ents_by_name[project_item["name"]] = str(new_id) - - self.create_list.append(project_item) - - # store mongo id to ftrack entity - entity = self.entities_dict[self.ft_project_id]["entity"] - entity["custom_attributes"][self.id_cust_attr] = str(new_id) - - def get_avalon_project_template(self): - """Get avalon template - Returns: - dictionary with templates - """ - project_name = self.entities_dict[self.ft_project_id]["name"] - templates = Anatomy(project_name).templates - return { - "workfile": templates["avalon"]["workfile"], - "work": templates["avalon"]["work"], - "publish": templates["avalon"]["publish"] - } - - def _bubble_changeability(self, unchangeable_ids): - unchangeable_queue = queue.Queue() - for entity_id in unchangeable_ids: - unchangeable_queue.put((entity_id, False)) - - processed_parents_ids = [] - subsets_to_remove = [] - while not unchangeable_queue.empty(): - entity_id, child_is_archived = unchangeable_queue.get() - # skip if already processed - if entity_id in processed_parents_ids: - continue - - entity = self.avalon_ents_by_id.get(entity_id) - # if entity is not archived but unchageable child was then skip - # - archived entities should not affect not archived? - if entity and child_is_archived: - continue - - # set changeability of current entity to False - self._changeability_by_mongo_id[entity_id] = False - processed_parents_ids.append(entity_id) - # if not entity then is probably archived - if not entity: - entity = self.avalon_archived_by_id.get(entity_id) - child_is_archived = True - - if not entity: - # if entity is not found then it is subset without parent - if entity_id in unchangeable_ids: - subsets_to_remove.append(entity_id) - else: - # TODO logging - What is happening here? - self.log.warning(( - "Avalon contains entities without valid parents that" - " lead to Project (should not cause errors)" - " - MongoId <{}>" - ).format(str(entity_id))) - continue - - # skip if parent is project - parent_id = entity["data"]["visualParent"] - if parent_id is None: - continue - unchangeable_queue.put((str(parent_id), child_is_archived)) - - self._delete_subsets_without_asset(subsets_to_remove) - - def _delete_subsets_without_asset(self, not_existing_parents): - subset_ids = [] - version_ids = [] - repre_ids = [] - to_delete = [] - - for parent_id in not_existing_parents: - subsets = self.subsets_by_parent_id.get(parent_id) - if not subsets: - continue - for subset in subsets: - if subset.get("type") != "subset": - continue - subset_ids.append(subset["_id"]) - - db_subsets = self.dbcon.find({ - "_id": {"$in": subset_ids}, - "type": "subset" - }) - if not db_subsets: - return - - db_versions = self.dbcon.find({ - "parent": {"$in": subset_ids}, - "type": "version" - }) - if db_versions: - version_ids = [ver["_id"] for ver in db_versions] - - db_repres = self.dbcon.find({ - "parent": {"$in": version_ids}, - "type": "representation" - }) - if db_repres: - repre_ids = [repre["_id"] for repre in db_repres] - - to_delete.extend(subset_ids) - to_delete.extend(version_ids) - to_delete.extend(repre_ids) - - self.dbcon.delete_many({"_id": {"$in": to_delete}}) - - # Probably deprecated - def _check_changeability(self, parent_id=None): - for entity in self.avalon_ents_by_parent_id[parent_id]: - mongo_id = str(entity["_id"]) - is_changeable = self._changeability_by_mongo_id.get(mongo_id) - if is_changeable is not None: - continue - - self._check_changeability(mongo_id) - is_changeable = True - for child in self.avalon_ents_by_parent_id[parent_id]: - if not self._changeability_by_mongo_id[str(child["_id"])]: - is_changeable = False - break - - if is_changeable is True: - is_changeable = (mongo_id in self.subsets_by_parent_id) - self._changeability_by_mongo_id[mongo_id] = is_changeable - - def update_entities(self): - mongo_changes_bulk = [] - for mongo_id, changes in self.updates.items(): - filter = {"_id": ObjectId(mongo_id)} - change_data = self.from_dict_to_set(changes) - mongo_changes_bulk.append(UpdateOne(filter, change_data)) - - if not mongo_changes_bulk: - # TODO LOG - return - self.dbcon.bulk_write(mongo_changes_bulk) - - def from_dict_to_set(self, data): - result = {"$set": {}} - dict_queue = queue.Queue() - dict_queue.put((None, data)) - - while not dict_queue.empty(): - _key, _data = dict_queue.get() - for key, value in _data.items(): - new_key = key - if _key is not None: - new_key = "{}.{}".format(_key, key) - - if not isinstance(value, dict): - result["$set"][new_key] = value - continue - dict_queue.put((new_key, value)) - return result - - def reload_parents(self, hierarchy_changing_ids): - parents_queue = queue.Queue() - parents_queue.put((self.ft_project_id, [], False)) - while not parents_queue.empty(): - ftrack_id, parent_parents, changed = parents_queue.get() - _parents = parent_parents.copy() - if ftrack_id not in hierarchy_changing_ids and not changed: - if ftrack_id != self.ft_project_id: - _parents.append(self.entities_dict[ftrack_id]["name"]) - for child_id in self.entities_dict[ftrack_id]["children"]: - parents_queue.put((child_id, _parents, changed)) - continue - - changed = True - parents = [par for par in _parents] - hierarchy = "/".join(parents) - self.entities_dict[ftrack_id][ - "final_entity"]["data"]["parents"] = parents - self.entities_dict[ftrack_id][ - "final_entity"]["data"]["hierarchy"] = hierarchy - - _parents.append(self.entities_dict[ftrack_id]["name"]) - for child_id in self.entities_dict[ftrack_id]["children"]: - parents_queue.put((child_id, _parents, changed)) - - if ftrack_id in self.create_ftrack_ids: - mongo_id = self.ftrack_avalon_mapper[ftrack_id] - if "data" not in self.updates[mongo_id]: - self.updates[mongo_id]["data"] = {} - self.updates[mongo_id]["data"]["parents"] = parents - self.updates[mongo_id]["data"]["hierarchy"] = hierarchy - - def prepare_project_changes(self): - ftrack_ent_dict = self.entities_dict[self.ft_project_id] - ftrack_entity = ftrack_ent_dict["entity"] - avalon_code = self.avalon_project["data"]["code"] - # TODO Is possible to sync if full name was changed? - # if ftrack_ent_dict["name"] != self.avalon_project["name"]: - # ftrack_entity["full_name"] = avalon_name - # self.entities_dict[self.ft_project_id]["name"] = avalon_name - # self.entities_dict[self.ft_project_id]["final_entity"][ - # "name" - # ] = avalon_name - - # TODO logging - # TODO report - # TODO May this happen? Is possible to change project code? - if ftrack_entity["name"] != avalon_code: - ftrack_entity["name"] = avalon_code - self.entities_dict[self.ft_project_id]["final_entity"]["data"][ - "code" - ] = avalon_code - self.session.commit() - sub_msg = ( - "Project code was changed back to \"{}\"".format(avalon_code) - ) - msg = ( - "It is not possible to change" - " project code after synchronization" - ) - self.report_items["warning"][msg] = sub_msg - self.log.warning(sub_msg) - - return self.compare_dict( - self.entities_dict[self.ft_project_id]["final_entity"], - self.avalon_project - ) - - def compare_dict(self, dict_new, dict_old, _ignore_keys=[]): - # _ignore_keys may be used for keys nested dict like"data.visualParent" - changes = {} - ignore_keys = [] - for key_val in _ignore_keys: - key_items = key_val.split(".") - if len(key_items) == 1: - ignore_keys.append(key_items[0]) - - for key, value in dict_new.items(): - if key in ignore_keys: - continue - - if key not in dict_old: - changes[key] = value - continue - - if isinstance(value, dict): - if not isinstance(dict_old[key], dict): - changes[key] = value - continue - - _new_ignore_keys = [] - for key_val in _ignore_keys: - key_items = key_val.split(".") - if len(key_items) <= 1: - continue - _new_ignore_keys.append(".".join(key_items[1:])) - - _changes = self.compare_dict( - value, dict_old[key], _new_ignore_keys - ) - if _changes: - changes[key] = _changes - continue - - if value != dict_old[key]: - changes[key] = value - - return changes - - def merge_dicts(self, dict_new, dict_old): - for key, value in dict_new.items(): - if key not in dict_old: - dict_old[key] = value - continue - - if isinstance(value, dict): - dict_old[key] = self.merge_dicts(value, dict_old[key]) - continue - - dict_old[key] = value - - return dict_old - - def delete_entities(self): - if not self.deleted_entities: - return - # Try to order so child is not processed before parent - deleted_entities = [] - _deleted_entities = [id for id in self.deleted_entities] - - while True: - if not _deleted_entities: - break - _ready = [] - for mongo_id in _deleted_entities: - ent = self.avalon_ents_by_id[mongo_id] - vis_par = ent["data"]["visualParent"] - if ( - vis_par is not None and - str(vis_par) in self.deleted_entities - ): - continue - _ready.append(mongo_id) - - for id in _ready: - deleted_entities.append(id) - _deleted_entities.remove(id) - - delete_ids = [] - for mongo_id in deleted_entities: - # delete if they are deletable - if self.changeability_by_mongo_id[mongo_id]: - delete_ids.append(ObjectId(mongo_id)) - continue - - # check if any new created entity match same entity - # - name and parents must match - deleted_entity = self.avalon_ents_by_id[mongo_id] - name = deleted_entity["name"] - parents = deleted_entity["data"]["parents"] - similar_ent_id = None - for ftrack_id in self.create_ftrack_ids: - _ent_final = self.entities_dict[ftrack_id]["final_entity"] - if _ent_final["name"] != name: - continue - if _ent_final["data"]["parents"] != parents: - continue - - # If in create is "same" then we can "archive" current - # since will be unarchived in create method - similar_ent_id = ftrack_id - break - - # If similar entity(same name and parents) is in create - # entities list then just change from create to update - if similar_ent_id is not None: - self.create_ftrack_ids.remove(similar_ent_id) - self.update_ftrack_ids.append(similar_ent_id) - self.avalon_ftrack_mapper[mongo_id] = similar_ent_id - self.ftrack_avalon_mapper[similar_ent_id] = mongo_id - continue - - found_by_name_id = None - for ftrack_id, ent_dict in self.entities_dict.items(): - if not ent_dict.get("name"): - continue - - if name == ent_dict["name"]: - found_by_name_id = ftrack_id - break - - if found_by_name_id is not None: - # * THESE conditins are too complex to implement in first stage - # - probably not possible to solve if this happen - # if found_by_name_id in self.create_ftrack_ids: - # # reparent entity of the new one create? - # pass - # - # elif found_by_name_id in self.update_ftrack_ids: - # found_mongo_id = self.ftrack_avalon_mapper[found_by_name_id] - # - # ent_dict = self.entities_dict[found_by_name_id] - - # TODO report - CRITICAL entity with same name alread exists in - # different hierarchy - can't recreate entity - continue - - _vis_parent = str(deleted_entity["data"]["visualParent"]) - if _vis_parent is None: - _vis_parent = self.avalon_project_id - ftrack_parent_id = self.avalon_ftrack_mapper[_vis_parent] - self.create_ftrack_ent_from_avalon_ent( - deleted_entity, ftrack_parent_id - ) - - filter = {"_id": {"$in": delete_ids}, "type": "asset"} - self.dbcon.update_many(filter, {"$set": {"type": "archived_asset"}}) - - def create_ftrack_ent_from_avalon_ent(self, av_entity, parent_id): - new_entity = None - parent_entity = self.entities_dict[parent_id]["entity"] - - _name = av_entity["name"] - _type = av_entity["data"].get("entityType", "folder") - - self.log.debug(( - "Re-ceating deleted entity {} <{}>" - ).format(_name, _type)) - - new_entity = self.session.create(_type, { - "name": _name, - "parent": parent_entity - }) - - final_entity = {} - for k, v in av_entity.items(): - final_entity[k] = v - - if final_entity.get("type") != "asset": - final_entity["type"] = "asset" - - new_entity_id = new_entity["id"] - new_entity_data = { - "entity": new_entity, - "parent_id": parent_id, - "entity_type": _type.lower(), - "entity_type_orig": _type, - "name": _name, - "final_entity": final_entity - } - for k, v in new_entity_data.items(): - self.entities_dict[new_entity_id][k] = v - - p_chilren = self.entities_dict[parent_id]["children"] - if new_entity_id not in p_chilren: - self.entities_dict[parent_id]["children"].append(new_entity_id) - - cust_attr, hier_attrs = self.get_avalon_attr() - for _attr in cust_attr: - key = _attr["key"] - if key not in av_entity["data"]: - continue - - if key not in new_entity["custom_attributes"]: - continue - - value = av_entity["data"][key] - if not value: - continue - - new_entity["custom_attributes"][key] = value - - av_entity_id = str(av_entity["_id"]) - new_entity["custom_attributes"][self.id_cust_attr] = av_entity_id - - self.ftrack_avalon_mapper[new_entity_id] = av_entity_id - self.avalon_ftrack_mapper[av_entity_id] = new_entity_id - - self.session.commit() - - ent_path = self.get_ent_path(new_entity_id) - msg = ( - "Deleted entity was recreated because it or its children" - " contain published data" - ) - - self.report_items["info"][msg].append(ent_path) - - return new_entity_id - - def regex_duplicate_interface(self): - items = [] - if self.failed_regex or self.tasks_failed_regex: - subtitle = "Entity names contain prohibited symbols:" - items.append({ - "type": "label", - "value": "# {}".format(subtitle) - }) - items.append({ - "type": "label", - "value": ( - "

NOTE: You can use Letters( a-Z )," - " Numbers( 0-9 ) and Underscore( _ )

" - ) - }) - log_msgs = [] - for name, ids in self.failed_regex.items(): - error_title = { - "type": "label", - "value": "## {}".format(name) - } - items.append(error_title) - paths = [] - for entity_id in ids: - ent_path = self.get_ent_path(entity_id) - paths.append(ent_path) - - error_message = { - "type": "label", - "value": '

{}

'.format("
".join(paths)) - } - items.append(error_message) - log_msgs.append("<{}> ({})".format(name, ",".join(paths))) - - for name, ids in self.tasks_failed_regex.items(): - error_title = { - "type": "label", - "value": "## Task: {}".format(name) - } - items.append(error_title) - paths = [] - for entity_id in ids: - ent_path = self.get_ent_path(entity_id) - ent_path = "/".join([ent_path, name]) - paths.append(ent_path) - - error_message = { - "type": "label", - "value": '

{}

'.format("
".join(paths)) - } - items.append(error_message) - log_msgs.append("<{}> ({})".format(name, ",".join(paths))) - - self.log.warning("{}{}".format(subtitle, ", ".join(log_msgs))) - - if self.duplicates: - subtitle = "Duplicated entity names:" - items.append({ - "type": "label", - "value": "# {}".format(subtitle) - }) - items.append({ - "type": "label", - "value": ( - "

NOTE: It is not allowed to use the same name" - " for multiple entities in the same project

" - ) - }) - log_msgs = [] - for name, ids in self.duplicates.items(): - error_title = { - "type": "label", - "value": "## {}".format(name) - } - items.append(error_title) - paths = [] - for entity_id in ids: - ent_path = self.get_ent_path(entity_id) - paths.append(ent_path) - - error_message = { - "type": "label", - "value": '

{}

'.format("
".join(paths)) - } - items.append(error_message) - log_msgs.append("<{}> ({})".format(name, ", ".join(paths))) - - self.log.warning("{}{}".format(subtitle, ", ".join(log_msgs))) - - return items - - def get_avalon_attr(self, split_hierarchical=True): - custom_attributes = [] - hier_custom_attributes = [] - cust_attrs_query = ( - "select id, entity_type, object_type_id, is_hierarchical, default" - " from CustomAttributeConfiguration" - " where group.name = \"avalon\"" - ) - all_avalon_attr = self.session.query(cust_attrs_query).all() - for cust_attr in all_avalon_attr: - if split_hierarchical and cust_attr["is_hierarchical"]: - hier_custom_attributes.append(cust_attr) - continue - - custom_attributes.append(cust_attr) - - if split_hierarchical: - # return tuple - return custom_attributes, hier_custom_attributes - - return custom_attributes - - def report(self): - items = [] - project_name = self.entities_dict[self.ft_project_id]["name"] - title = "Synchronization report ({}):".format(project_name) - - keys = ["error", "warning", "info"] - for key in keys: - subitems = [] - if key == "warning": - for _item in self.regex_duplicate_interface(): - subitems.append(_item) - - for msg, _items in self.report_items[key].items(): - if not _items: - continue - - subitems.append({ - "type": "label", - "value": "# {}".format(msg) - }) - if isinstance(_items, str): - _items = [_items] - subitems.append({ - "type": "label", - "value": '

{}

'.format("
".join(_items)) - }) - - if items and subitems: - items.append(self.report_splitter) - - items.extend(subitems) - - return { - "items": items, - "title": title, - "success": False, - "message": "Synchronization Finished" - } +from pype.ftrack.lib.avalon_sync import SyncEntitiesFactory class SyncToAvalonLocal(BaseAction): @@ -2191,6 +47,10 @@ class SyncToAvalonLocal(BaseAction): os.environ.get('PYPE_STATICS_SERVER', '') ) + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.entities_factory = SyncEntitiesFactory(self.log, self.session) + def discover(self, session, entities, event): ''' Validation ''' for ent in event["data"]["selection"]: @@ -2210,28 +70,29 @@ class SyncToAvalonLocal(BaseAction): ft_project_name = in_entities[0]["project"]["full_name"] try: - entities_factory = SyncEntitiesFactory( - self.log, session, ft_project_name - ) + output = self.entities_factory.launch_setup(ft_project_name) + if output is not None: + return output + time_1 = time.time() - entities_factory.set_cutom_attributes() + self.entities_factory.set_cutom_attributes() time_2 = time.time() # This must happen before all filtering!!! - entities_factory.prepare_avalon_entities(ft_project_name) + self.entities_factory.prepare_avalon_entities(ft_project_name) time_3 = time.time() - entities_factory.filter_by_ignore_sync() + self.entities_factory.filter_by_ignore_sync() time_4 = time.time() - entities_factory.duplicity_regex_check() + self.entities_factory.duplicity_regex_check() time_5 = time.time() - entities_factory.prepare_ftrack_ent_data() + self.entities_factory.prepare_ftrack_ent_data() time_6 = time.time() - entities_factory.synchronize() + self.entities_factory.synchronize() time_7 = time.time() self.log.debug( @@ -2262,7 +123,7 @@ class SyncToAvalonLocal(BaseAction): "* Total time: {}".format(time_7 - time_start) ) - report = entities_factory.report() + report = self.entities_factory.report() if report and report.get("items"): default_title = "Synchronization report ({}):".format( ft_project_name @@ -2304,13 +165,13 @@ class SyncToAvalonLocal(BaseAction): report = {"items": []} try: - report = entities_factory.report() + report = self.entities_factory.report() except Exception: pass _items = report.get("items", []) if _items: - items.append(entities_factory.report_splitter) + items.append(self.entities_factory.report_splitter) items.extend(_items) self.show_interface(items, title, event) @@ -2319,12 +180,12 @@ class SyncToAvalonLocal(BaseAction): finally: try: - entities_factory.dbcon.uninstall() + self.entities_factory.dbcon.uninstall() except Exception: pass try: - entities_factory.session.close() + self.entities_factory.session.close() except Exception: pass diff --git a/pype/ftrack/actions/action_update_from_v2-2-0.py b/pype/ftrack/actions/action_update_from_v2-2-0.py index 80b920207a..dd0f1e6ea2 100644 --- a/pype/ftrack/actions/action_update_from_v2-2-0.py +++ b/pype/ftrack/actions/action_update_from_v2-2-0.py @@ -1,14 +1,6 @@ import os -import sys -import argparse -import logging -import collections -import json -import re -import ftrack_api from pype.ftrack import BaseAction -from avalon import io, inventory, schema from pype.ftrack.lib.io_nonsingleton import DbConnector @@ -134,7 +126,6 @@ class PypeUpdateFromV2_2_0(BaseAction): "title": title } - def launch(self, session, entities, event): if 'values' not in event['data']: return @@ -182,7 +173,7 @@ class PypeUpdateFromV2_2_0(BaseAction): {"type": "asset"}, {"$unset": {"silo": ""}} ) - + self.log.debug("- setting schema of assets to v.3") self.db_con.update_many( {"type": "asset"}, @@ -191,10 +182,8 @@ class PypeUpdateFromV2_2_0(BaseAction): return True + def register(session, plugins_presets={}): """Register plugin. Called when used as an plugin.""" - if not isinstance(session, ftrack_api.session.Session): - return - PypeUpdateFromV2_2_0(session, plugins_presets).register() diff --git a/pype/ftrack/actions/action_where_run_ask.py b/pype/ftrack/actions/action_where_run_ask.py index 7fc08c1f68..a28f32f407 100644 --- a/pype/ftrack/actions/action_where_run_ask.py +++ b/pype/ftrack/actions/action_where_run_ask.py @@ -1,7 +1,5 @@ import os -import ftrack_api from pype.ftrack import BaseAction -from ftrack_api import session as fa_session class ActionAskWhereIRun(BaseAction): diff --git a/pype/ftrack/events/action_sync_to_avalon.py b/pype/ftrack/events/action_sync_to_avalon.py index 217fef4c07..79ab1b5f7a 100644 --- a/pype/ftrack/events/action_sync_to_avalon.py +++ b/pype/ftrack/events/action_sync_to_avalon.py @@ -1,2145 +1,10 @@ import os -import collections -import re -import queue import time -import toml import traceback -from bson.objectid import ObjectId -from bson.errors import InvalidId -from pymongo import UpdateOne - -import avalon from pype.ftrack import BaseAction -from pype.ftrack.lib.io_nonsingleton import DbConnector -import ftrack_api -from ftrack_api import session as fa_session -from pypeapp import Anatomy, config - - -class SyncEntitiesFactory: - dbcon = DbConnector() - - project_query = ( - "select full_name, name, custom_attributes" - ", project_schema._task_type_schema.types.name" - " from Project where full_name is \"{}\"" - ) - entities_query = ( - "select id, name, parent_id, link" - " from TypedContext where project_id is \"{}\"" - ) - ignore_custom_attr_key = "avalon_ignore_sync" - id_cust_attr = "avalon_mongo_id" - - entity_schemas = { - "project": "avalon-core:project-2.0", - "asset": "avalon-core:asset-3.0", - "config": "avalon-core:config-1.0" - } - - report_splitter = {"type": "label", "value": "---"} - - def __init__(self, log_obj, _session, project_full_name): - self.log = log_obj - self.session = ftrack_api.Session( - server_url=_session.server_url, - api_key=_session.api_key, - api_user=_session.api_user, - auto_connect_event_hub=True - ) - - self.cancel_auto_sync = False - - self.schema_patterns = {} - self.duplicates = {} - self.failed_regex = {} - self.tasks_failed_regex = collections.defaultdict(list) - self.report_items = { - "info": collections.defaultdict(list), - "warning": collections.defaultdict(list), - "error": collections.defaultdict(list) - } - - self.create_list = [] - self.recreated_ftrack_ents = {} - self.updates = collections.defaultdict(dict) - - self._avalon_ents_by_id = None - self._avalon_ents_by_ftrack_id = None - self._avalon_ents_by_name = None - self._avalon_ents_by_parent_id = None - - self._avalon_archived_ents = None - self._avalon_archived_by_id = None - self._avalon_archived_by_parent_id = None - self._avalon_archived_by_name = None - - self._subsets_by_parent_id = None - self._changeability_by_mongo_id = None - - self.all_filtered_entities = {} - # self.all_filtered_ids = [] - self.filtered_ids = [] - self.not_selected_ids = [] - - self._ent_pats_by_ftrack_id = {} - - # Get Ftrack project - ft_project = self.session.query( - self.project_query.format(project_full_name) - ).one() - ft_project_id = ft_project["id"] - - # Skip if project is ignored - if ft_project["custom_attributes"].get( - self.ignore_custom_attr_key - ) is True: - msg = ( - "Project \"{}\" has set `Ignore Sync` custom attribute to True" - ).format(project_full_name) - self.log.warning(msg) - return {"success": False, "message": msg} - - # Check if `avalon_mongo_id` custom attribute exist or is accessible - if self.id_cust_attr not in ft_project["custom_attributes"]: - items = [] - items.append({ - "type": "label", - "value": "# Can't access Custom attribute <{}>".format( - self.id_cust_attr - ) - }) - items.append({ - "type": "label", - "value": ( - "

- Check if user \"{}\" has permissions" - " to access the Custom attribute

" - ).format(_session.api_key) - }) - items.append({ - "type": "label", - "value": "

- Check if the Custom attribute exist

" - }) - return { - "items": items, - "title": "Synchronization failed", - "success": False, - "message": "Synchronization failed" - } - - # Find all entities in project - all_project_entities = self.session.query( - self.entities_query.format(ft_project_id) - ).all() - - # Store entities by `id` and `parent_id` - entities_dict = collections.defaultdict(lambda: { - "children": list(), - "parent_id": None, - "entity": None, - "entity_type": None, - "name": None, - "custom_attributes": {}, - "hier_attrs": {}, - "avalon_attrs": {}, - "tasks": [] - }) - - for entity in all_project_entities: - parent_id = entity["parent_id"] - entity_type = entity.entity_type - entity_type_low = entity_type.lower() - if entity_type_low == "task": - entities_dict[parent_id]["tasks"].append(entity["name"]) - continue - - entity_id = entity["id"] - entities_dict[entity_id].update({ - "entity": entity, - "parent_id": parent_id, - "entity_type": entity_type_low, - "entity_type_orig": entity_type, - "name": entity["name"] - }) - entities_dict[parent_id]["children"].append(entity_id) - - entities_dict[ft_project_id]["entity"] = ft_project - entities_dict[ft_project_id]["entity_type"] = ( - ft_project.entity_type.lower() - ) - entities_dict[ft_project_id]["entity_type_orig"] = ( - ft_project.entity_type - ) - entities_dict[ft_project_id]["name"] = ft_project["full_name"] - - self.ft_project_id = ft_project_id - self.entities_dict = entities_dict - - @property - def avalon_ents_by_id(self): - if self._avalon_ents_by_id is None: - self._avalon_ents_by_id = {} - for entity in self.avalon_entities: - self._avalon_ents_by_id[str(entity["_id"])] = entity - - return self._avalon_ents_by_id - - @property - def avalon_ents_by_ftrack_id(self): - if self._avalon_ents_by_ftrack_id is None: - self._avalon_ents_by_ftrack_id = {} - for entity in self.avalon_entities: - key = entity.get("data", {}).get("ftrackId") - if not key: - continue - self._avalon_ents_by_ftrack_id[key] = str(entity["_id"]) - - return self._avalon_ents_by_ftrack_id - - @property - def avalon_ents_by_name(self): - if self._avalon_ents_by_name is None: - self._avalon_ents_by_name = {} - for entity in self.avalon_entities: - self._avalon_ents_by_name[entity["name"]] = str(entity["_id"]) - - return self._avalon_ents_by_name - - @property - def avalon_ents_by_parent_id(self): - if self._avalon_ents_by_parent_id is None: - self._avalon_ents_by_parent_id = collections.defaultdict(list) - for entity in self.avalon_entities: - parent_id = entity["data"]["visualParent"] - if parent_id is not None: - parent_id = str(parent_id) - self._avalon_ents_by_parent_id[parent_id].append(entity) - - return self._avalon_ents_by_parent_id - - @property - def avalon_archived_ents(self): - if self._avalon_archived_ents is None: - self._avalon_archived_ents = [ - ent for ent in self.dbcon.find({"type": "archived_asset"}) - ] - return self._avalon_archived_ents - - @property - def avalon_archived_by_name(self): - if self._avalon_archived_by_name is None: - self._avalon_archived_by_name = collections.defaultdict(list) - for ent in self.avalon_archived_ents: - self._avalon_archived_by_name[ent["name"]].append(ent) - return self._avalon_archived_by_name - - @property - def avalon_archived_by_id(self): - if self._avalon_archived_by_id is None: - self._avalon_archived_by_id = { - str(ent["_id"]): ent for ent in self.avalon_archived_ents - } - return self._avalon_archived_by_id - - @property - def avalon_archived_by_parent_id(self): - if self._avalon_archived_by_parent_id is None: - self._avalon_archived_by_parent_id = collections.defaultdict(list) - for entity in self.avalon_archived_ents: - parent_id = entity["data"]["visualParent"] - if parent_id is not None: - parent_id = str(parent_id) - self._avalon_archived_by_parent_id[parent_id].append(entity) - - return self._avalon_archived_by_parent_id - - @property - def subsets_by_parent_id(self): - if self._subsets_by_parent_id is None: - self._subsets_by_parent_id = collections.defaultdict(list) - for subset in self.dbcon.find({"type": "subset"}): - self._subsets_by_parent_id[str(subset["parent"])].append( - subset - ) - - return self._subsets_by_parent_id - - @property - def changeability_by_mongo_id(self): - if self._changeability_by_mongo_id is None: - self._changeability_by_mongo_id = collections.defaultdict( - lambda: True - ) - self._changeability_by_mongo_id[self.avalon_project_id] = False - self._bubble_changeability(list(self.subsets_by_parent_id.keys())) - return self._changeability_by_mongo_id - - @property - def all_ftrack_names(self): - return [ - ent_dict["name"] for ent_dict in self.entities_dict.values() if ( - ent_dict.get("name") - ) - ] - - def duplicity_regex_check(self): - self.log.debug("* Checking duplicities and invalid symbols") - # Duplicity and regex check - entity_ids_by_name = {} - duplicates = [] - failed_regex = [] - task_names = {} - for ftrack_id, entity_dict in self.entities_dict.items(): - regex_check = True - name = entity_dict["name"] - entity_type = entity_dict["entity_type"] - # Tasks must be checked too - for task_name in entity_dict["tasks"]: - passed = task_names.get(task_name) - if passed is None: - passed = self.check_regex(task_name, "task") - task_names[task_name] = passed - - if not passed: - self.tasks_failed_regex[task_name].append(ftrack_id) - - if name in entity_ids_by_name: - duplicates.append(name) - else: - entity_ids_by_name[name] = [] - regex_check = self.check_regex(name, entity_type) - - entity_ids_by_name[name].append(ftrack_id) - if not regex_check: - failed_regex.append(name) - - for name in failed_regex: - self.failed_regex[name] = entity_ids_by_name[name] - - for name in duplicates: - self.duplicates[name] = entity_ids_by_name[name] - - self.filter_by_duplicate_regex() - - def check_regex(self, name, entity_type, in_schema=None): - schema_name = "asset-3.0" - if in_schema: - schema_name = in_schema - elif entity_type == "project": - schema_name = "project-2.0" - elif entity_type == "task": - schema_name = "task" - - name_pattern = self.schema_patterns.get(schema_name) - if not name_pattern: - default_pattern = "^[a-zA-Z0-9_.]*$" - schema_obj = avalon.schema._cache.get(schema_name + ".json") - if not schema_obj: - name_pattern = default_pattern - else: - name_pattern = schema_obj.get( - "properties", {}).get( - "name", {}).get( - "pattern", default_pattern - ) - self.schema_patterns[schema_name] = name_pattern - - if re.match(name_pattern, name): - return True - return False - - def filter_by_duplicate_regex(self): - filter_queue = queue.Queue() - failed_regex_msg = "{} - Entity has invalid symbol/s in name" - duplicate_msg = "Multiple entities have name \"{}\":" - - for ids in self.failed_regex.values(): - for id in ids: - ent_path = self.get_ent_path(id) - self.log.warning(failed_regex_msg.format(ent_path)) - filter_queue.put(id) - - for name, ids in self.duplicates.items(): - self.log.warning(duplicate_msg.format(name)) - for id in ids: - ent_path = self.get_ent_path(id) - self.log.warning(ent_path) - filter_queue.put(id) - - filtered_ids = [] - while not filter_queue.empty(): - ftrack_id = filter_queue.get() - if ftrack_id in filtered_ids: - continue - - entity_dict = self.entities_dict.pop(ftrack_id, {}) - if not entity_dict: - continue - - self.all_filtered_entities[ftrack_id] = entity_dict - parent_id = entity_dict.get("parent_id") - if parent_id and parent_id in self.entities_dict: - if ftrack_id in self.entities_dict[parent_id]["children"]: - self.entities_dict[parent_id]["children"].remove(ftrack_id) - - filtered_ids.append(ftrack_id) - for child_id in entity_dict.get("children", []): - filter_queue.put(child_id) - - # self.all_filtered_ids.extend(filtered_ids) - - for name, ids in self.tasks_failed_regex.items(): - for id in ids: - if id not in self.entities_dict: - continue - self.entities_dict[id]["tasks"].remove(name) - ent_path = self.get_ent_path(id) - self.log.warning(failed_regex_msg.format( - "/".join([ent_path, name]) - )) - - def filter_by_ignore_sync(self): - # skip filtering if `ignore_sync` attribute do not exist - if self.entities_dict[self.ft_project_id]["avalon_attrs"].get( - self.ignore_custom_attr_key, "_notset_" - ) == "_notset_": - return - - self.filter_queue = queue.Queue() - self.filter_queue.put((self.ft_project_id, False)) - while not self.filter_queue.empty(): - parent_id, remove = self.filter_queue.get() - if remove: - parent_dict = self.entities_dict.pop(parent_id, {}) - self.all_filtered_entities[parent_id] = parent_dict - self.filtered_ids.append(parent_id) - else: - parent_dict = self.entities_dict.get(parent_id, {}) - - for child_id in parent_dict.get("children", []): - # keep original `remove` value for all childs - _remove = (remove is True) - if not _remove: - if self.entities_dict[child_id]["avalon_attrs"].get( - self.ignore_custom_attr_key - ): - self.entities_dict[parent_id]["children"].remove( - child_id - ) - _remove = True - self.filter_queue.put((child_id, _remove)) - - # self.all_filtered_ids.extend(self.filtered_ids) - - def filter_by_selection(self, event): - # BUGGY!!!! cause that entities are in deleted list - # TODO may be working when filtering happen after preparations - # - But this part probably does not have any functional reason - # - Time of synchronization probably won't be changed much - selected_ids = [] - for entity in event["data"]["selection"]: - # Skip if project is in selection - if entity["entityType"] == "show": - return - selected_ids.append(entity["entityId"]) - - sync_ids = [self.ft_project_id] - parents_queue = queue.Queue() - children_queue = queue.Queue() - for id in selected_ids: - # skip if already filtered with ignore sync custom attribute - if id in self.filtered_ids: - continue - - parents_queue.put(id) - children_queue.put(id) - - while not parents_queue.empty(): - id = parents_queue.get() - while True: - # Stops when parent is in sync_ids - if id in self.filtered_ids or id in sync_ids or id is None: - break - sync_ids.append(id) - id = self.entities_dict[id]["parent_id"] - - while not children_queue.empty(): - parent_id = children_queue.get() - for child_id in self.entities_dict[parent_id]["children"]: - if child_id in sync_ids or child_id in self.filtered_ids: - continue - sync_ids.append(child_id) - children_queue.put(child_id) - - # separate not selected and to process entities - for key, value in self.entities_dict.items(): - if key not in sync_ids: - self.not_selected_ids.append(key) - - for id in self.not_selected_ids: - # pop from entities - value = self.entities_dict.pop(id) - # remove entity from parent's children - parent_id = value["parent_id"] - if parent_id not in sync_ids: - continue - - self.entities_dict[parent_id]["children"].remove(id) - - def set_cutom_attributes(self): - self.log.debug("* Preparing custom attributes") - # Get custom attributes and values - custom_attrs, hier_attrs = self.get_avalon_attr(True) - ent_types = self.session.query("select id, name from ObjectType").all() - ent_types_by_name = { - ent_type["name"]: ent_type["id"] for ent_type in ent_types - } - - attrs = set() - # store default values per entity type - attrs_per_entity_type = collections.defaultdict(dict) - avalon_attrs = collections.defaultdict(dict) - # store also custom attribute configuration id for future use (create) - attrs_per_entity_type_ca_id = collections.defaultdict(dict) - avalon_attrs_ca_id = collections.defaultdict(dict) - - for cust_attr in custom_attrs: - key = cust_attr["key"] - attrs.add(key) - ca_ent_type = cust_attr["entity_type"] - if key.startswith("avalon_"): - if ca_ent_type == "show": - avalon_attrs[ca_ent_type][key] = cust_attr["default"] - avalon_attrs_ca_id[ca_ent_type][key] = cust_attr["id"] - else: - obj_id = cust_attr["object_type_id"] - avalon_attrs[obj_id][key] = cust_attr["default"] - avalon_attrs_ca_id[obj_id][key] = cust_attr["id"] - continue - - if ca_ent_type == "show": - attrs_per_entity_type[ca_ent_type][key] = cust_attr["default"] - attrs_per_entity_type_ca_id[ca_ent_type][key] = cust_attr["id"] - else: - obj_id = cust_attr["object_type_id"] - attrs_per_entity_type[obj_id][key] = cust_attr["default"] - attrs_per_entity_type_ca_id[obj_id][key] = cust_attr["id"] - - obj_id_ent_type_map = {} - sync_ids = [] - for entity_id, entity_dict in self.entities_dict.items(): - sync_ids.append(entity_id) - entity_type = entity_dict["entity_type"] - entity_type_orig = entity_dict["entity_type_orig"] - - if entity_type == "project": - attr_key = "show" - else: - map_key = obj_id_ent_type_map.get(entity_type_orig) - if not map_key: - # Put space between capitals - # (e.g. 'AssetBuild' -> 'Asset Build') - map_key = re.sub( - r"(\w)([A-Z])", r"\1 \2", entity_type_orig - ) - obj_id_ent_type_map[entity_type_orig] = map_key - - # Get object id of entity type - attr_key = ent_types_by_name.get(map_key) - - # Backup soluction when id is not found by prequeried objects - if not attr_key: - query = "ObjectType where name is \"{}\"".format(map_key) - attr_key = self.session.query(query).one()["id"] - ent_types_by_name[map_key] = attr_key - - prepared_attrs = attrs_per_entity_type.get(attr_key) - prepared_avalon_attr = avalon_attrs.get(attr_key) - prepared_attrs_ca_id = attrs_per_entity_type_ca_id.get(attr_key) - prepared_avalon_attr_ca_id = avalon_attrs_ca_id.get(attr_key) - if prepared_attrs: - self.entities_dict[entity_id]["custom_attributes"] = ( - prepared_attrs.copy() - ) - if prepared_attrs_ca_id: - self.entities_dict[entity_id]["custom_attributes_id"] = ( - prepared_attrs_ca_id.copy() - ) - if prepared_avalon_attr: - self.entities_dict[entity_id]["avalon_attrs"] = ( - prepared_avalon_attr.copy() - ) - if prepared_avalon_attr_ca_id: - self.entities_dict[entity_id]["avalon_attrs_id"] = ( - prepared_avalon_attr_ca_id.copy() - ) - - # TODO query custom attributes by entity_id - entity_ids_joined = ", ".join([ - "\"{}\"".format(id) for id in sync_ids - ]) - attributes_joined = ", ".join([ - "\"{}\"".format(name) for name in attrs - ]) - - cust_attr_query = ( - "select value, entity_id from ContextCustomAttributeValue " - "where entity_id in ({}) and configuration.key in ({})" - ) - [values] = self.session._call([{ - "action": "query", - "expression": cust_attr_query.format( - entity_ids_joined, attributes_joined - ) - }]) - - for value in values["data"]: - entity_id = value["entity_id"] - key = value["configuration"]["key"] - store_key = "custom_attributes" - if key.startswith("avalon_"): - store_key = "avalon_attrs" - self.entities_dict[entity_id][store_key][key] = value["value"] - - # process hierarchical attributes - self.set_hierarchical_attribute(hier_attrs, sync_ids) - - def set_hierarchical_attribute(self, hier_attrs, sync_ids): - # collect all hierarchical attribute keys - # and prepare default values to project - attribute_names = [] - for attr in hier_attrs: - key = attr["key"] - attribute_names.append(key) - - store_key = "hier_attrs" - if key.startswith("avalon_"): - store_key = "avalon_attrs" - - self.entities_dict[self.ft_project_id][store_key][key] = ( - attr["default"] - ) - - # Prepare dict with all hier keys and None values - prepare_dict = {} - prepare_dict_avalon = {} - for attr in attribute_names: - if attr.startswith("avalon_"): - prepare_dict_avalon[attr] = None - else: - prepare_dict[attr] = None - - for id, entity_dict in self.entities_dict.items(): - # Skip project because has stored defaults at the moment - if entity_dict["entity_type"] == "project": - continue - entity_dict["hier_attrs"] = prepare_dict.copy() - for key, val in prepare_dict_avalon.items(): - entity_dict["avalon_attrs"][key] = val - - # Prepare values to query - entity_ids_joined = ", ".join([ - "\"{}\"".format(id) for id in sync_ids - ]) - attributes_joined = ", ".join([ - "\"{}\"".format(name) for name in attribute_names - ]) - [values] = self.session._call([{ - "action": "query", - "expression": ( - "select value, entity_id from ContextCustomAttributeValue " - "where entity_id in ({}) and configuration.key in ({})" - ).format(entity_ids_joined, attributes_joined) - }]) - - avalon_hier = [] - for value in values["data"]: - if value["value"] is None: - continue - entity_id = value["entity_id"] - key = value["configuration"]["key"] - store_key = "hier_attrs" - if key.startswith("avalon_"): - store_key = "avalon_attrs" - avalon_hier.append(key) - self.entities_dict[entity_id][store_key][key] = value["value"] - - # Get dictionary with not None hierarchical values to pull to childs - top_id = self.ft_project_id - project_values = {} - for key, value in self.entities_dict[top_id]["hier_attrs"].items(): - if value is not None: - project_values[key] = value - - for key in avalon_hier: - value = self.entities_dict[top_id]["avalon_attrs"][key] - if value is not None: - project_values[key] = value - - hier_down_queue = queue.Queue() - hier_down_queue.put((project_values, top_id)) - - while not hier_down_queue.empty(): - hier_values, parent_id = hier_down_queue.get() - for child_id in self.entities_dict[parent_id]["children"]: - _hier_values = hier_values.copy() - for name in attribute_names: - store_key = "hier_attrs" - if name.startswith("avalon_"): - store_key = "avalon_attrs" - value = self.entities_dict[child_id][store_key][name] - if value is not None: - _hier_values[name] = value - - self.entities_dict[child_id]["hier_attrs"].update(_hier_values) - hier_down_queue.put((_hier_values, child_id)) - - def remove_from_archived(self, mongo_id): - entity = self.avalon_archived_by_id.pop(mongo_id, None) - if not entity: - return - - if self._avalon_archived_ents is not None: - if entity in self._avalon_archived_ents: - self._avalon_archived_ents.remove(entity) - - if self._avalon_archived_by_name is not None: - name = entity["name"] - if name in self._avalon_archived_by_name: - name_ents = self._avalon_archived_by_name[name] - if entity in name_ents: - if len(name_ents) == 1: - self._avalon_archived_by_name.pop(name) - else: - self._avalon_archived_by_name[name].remove(entity) - - # TODO use custom None instead of __NOTSET__ - if self._avalon_archived_by_parent_id is not None: - parent_id = entity.get("data", {}).get( - "visualParent", "__NOTSET__" - ) - if parent_id is not None: - parent_id = str(parent_id) - - if parent_id in self._avalon_archived_by_parent_id: - parent_list = self._avalon_archived_by_parent_id[parent_id] - if entity not in parent_list: - self._avalon_archived_by_parent_id[parent_id].remove( - entity - ) - - def prepare_ftrack_ent_data(self): - not_set_ids = [] - for id, entity_dict in self.entities_dict.items(): - entity = entity_dict["entity"] - if entity is None: - not_set_ids.append(id) - continue - - self.entities_dict[id]["final_entity"] = {} - self.entities_dict[id]["final_entity"]["name"] = ( - entity_dict["name"] - ) - data = {} - data["ftrackId"] = entity["id"] - data["entityType"] = entity_dict["entity_type_orig"] - - for key, val in entity_dict.get("custom_attributes", []).items(): - data[key] = val - - for key, val in entity_dict.get("hier_attrs", []).items(): - data[key] = val - - if id == self.ft_project_id: - data["code"] = entity["name"] - self.entities_dict[id]["final_entity"]["data"] = data - self.entities_dict[id]["final_entity"]["type"] = "project" - - proj_schema = entity["project_schema"] - task_types = proj_schema["_task_type_schema"]["types"] - self.entities_dict[id]["final_entity"]["config"] = { - "tasks": [{"name": tt["name"]} for tt in task_types], - "apps": self.get_project_apps(data) - } - continue - - ent_path_items = [ent["name"] for ent in entity["link"]] - parents = ent_path_items[1:len(ent_path_items)-1:] - hierarchy = "" - if len(parents) > 0: - hierarchy = os.path.sep.join(parents) - - data["parents"] = parents - data["hierarchy"] = hierarchy - data["tasks"] = self.entities_dict[id].pop("tasks", []) - self.entities_dict[id]["final_entity"]["data"] = data - self.entities_dict[id]["final_entity"]["type"] = "asset" - - if not_set_ids: - self.log.debug(( - "- Debug information: Filtering bug, in entities dict are " - "empty dicts (function should not affect) <{}>" - ).format("| ".join(not_set_ids))) - for id in not_set_ids: - self.entities_dict.pop(id) - - def get_project_apps(self, proj_data): - apps = [] - missing_toml_msg = "Missing config file for application" - error_msg = ( - "Unexpected error happend during preparation of application" - ) - for app in proj_data.get("applications"): - try: - toml_path = avalon.lib.which_app(app) - # TODO report - if not toml_path: - self.log.warning(missing_toml_msg + '"{}"'.format(app)) - self.report_items["warning"][missing_toml_msg].append(app) - continue - - apps.append({ - "name": app, - "label": toml.load(toml_path)["label"] - }) - except Exception: - # TODO report - self.report_items["warning"][error_msg].append(app) - self.log.warning(( - "Error has happened during preparing application \"{}\"" - ).format(app), exc_info=True) - return apps - - def get_ent_path(self, ftrack_id): - ent_path = self._ent_pats_by_ftrack_id.get(ftrack_id) - if not ent_path: - entity = self.entities_dict[ftrack_id]["entity"] - ent_path = "/".join( - [ent["name"] for ent in entity["link"]] - ) - self._ent_pats_by_ftrack_id[ftrack_id] = ent_path - - return ent_path - - def prepare_avalon_entities(self, ft_project_name): - self.log.debug(( - "* Preparing avalon entities " - "(separate to Create, Update and Deleted groups)" - )) - # Avalon entities - self.dbcon.install() - self.dbcon.Session["AVALON_PROJECT"] = ft_project_name - avalon_project = self.dbcon.find_one({"type": "project"}) - avalon_entities = self.dbcon.find({"type": "asset"}) - self.avalon_project = avalon_project - self.avalon_entities = avalon_entities - - ftrack_avalon_mapper = {} - avalon_ftrack_mapper = {} - create_ftrack_ids = [] - update_ftrack_ids = [] - - same_mongo_id = [] - all_mongo_ids = {} - for ftrack_id, entity_dict in self.entities_dict.items(): - mongo_id = entity_dict["avalon_attrs"].get(self.id_cust_attr) - if not mongo_id: - continue - if mongo_id in all_mongo_ids: - same_mongo_id.append(mongo_id) - else: - all_mongo_ids[mongo_id] = [] - all_mongo_ids[mongo_id].append(ftrack_id) - - if avalon_project: - mongo_id = str(avalon_project["_id"]) - ftrack_avalon_mapper[self.ft_project_id] = mongo_id - avalon_ftrack_mapper[mongo_id] = self.ft_project_id - update_ftrack_ids.append(self.ft_project_id) - else: - create_ftrack_ids.append(self.ft_project_id) - - # make it go hierarchically - prepare_queue = queue.Queue() - - for child_id in self.entities_dict[self.ft_project_id]["children"]: - prepare_queue.put(child_id) - - while not prepare_queue.empty(): - ftrack_id = prepare_queue.get() - for child_id in self.entities_dict[ftrack_id]["children"]: - prepare_queue.put(child_id) - - entity_dict = self.entities_dict[ftrack_id] - ent_path = self.get_ent_path(ftrack_id) - - mongo_id = entity_dict["avalon_attrs"].get(self.id_cust_attr) - av_ent_by_mongo_id = self.avalon_ents_by_id.get(mongo_id) - if av_ent_by_mongo_id: - av_ent_ftrack_id = av_ent_by_mongo_id.get("data", {}).get( - "ftrackId" - ) - is_right = False - else_match_better = False - if av_ent_ftrack_id and av_ent_ftrack_id == ftrack_id: - is_right = True - - elif mongo_id not in same_mongo_id: - is_right = True - - else: - ftrack_ids_with_same_mongo = all_mongo_ids[mongo_id] - for _ftrack_id in ftrack_ids_with_same_mongo: - if _ftrack_id == av_ent_ftrack_id: - continue - - _entity_dict = self.entities_dict[_ftrack_id] - _mongo_id = _entity_dict["avalon_attrs"][ - self.id_cust_attr - ] - _av_ent_by_mongo_id = self.avalon_ents_by_id.get( - _mongo_id - ) - _av_ent_ftrack_id = _av_ent_by_mongo_id.get( - "data", {} - ).get("ftrackId") - if _av_ent_ftrack_id == ftrack_id: - else_match_better = True - break - - if not is_right and not else_match_better: - entity = entity_dict["entity"] - ent_path_items = [ent["name"] for ent in entity["link"]] - parents = ent_path_items[1:len(ent_path_items)-1:] - av_parents = av_ent_by_mongo_id["data"]["parents"] - if av_parents == parents: - is_right = True - else: - name = entity_dict["name"] - av_name = av_ent_by_mongo_id["name"] - if name == av_name: - is_right = True - - if is_right: - self.log.debug( - "Existing (by MongoID) <{}>".format(ent_path) - ) - ftrack_avalon_mapper[ftrack_id] = mongo_id - avalon_ftrack_mapper[mongo_id] = ftrack_id - update_ftrack_ids.append(ftrack_id) - continue - - mongo_id = self.avalon_ents_by_ftrack_id.get(ftrack_id) - if not mongo_id: - mongo_id = self.avalon_ents_by_name.get(entity_dict["name"]) - if mongo_id: - self.log.debug( - "Existing (by matching name) <{}>".format(ent_path) - ) - else: - self.log.debug( - "Existing (by FtrackID in mongo) <{}>".format(ent_path) - ) - - if mongo_id: - ftrack_avalon_mapper[ftrack_id] = mongo_id - avalon_ftrack_mapper[mongo_id] = ftrack_id - update_ftrack_ids.append(ftrack_id) - continue - - self.log.debug("New <{}>".format(ent_path)) - create_ftrack_ids.append(ftrack_id) - - deleted_entities = [] - for mongo_id in self.avalon_ents_by_id: - if mongo_id in avalon_ftrack_mapper: - continue - deleted_entities.append(mongo_id) - - av_ent = self.avalon_ents_by_id[mongo_id] - av_ent_path_items = [p for p in av_ent["data"]["parents"]] - av_ent_path_items.append(av_ent["name"]) - self.log.debug("Deleted <{}>".format("/".join(av_ent_path_items))) - - self.ftrack_avalon_mapper = ftrack_avalon_mapper - self.avalon_ftrack_mapper = avalon_ftrack_mapper - self.create_ftrack_ids = create_ftrack_ids - self.update_ftrack_ids = update_ftrack_ids - self.deleted_entities = deleted_entities - - self.log.debug(( - "Ftrack -> Avalon comparation: New <{}> " - "| Existing <{}> | Deleted <{}>" - ).format( - len(create_ftrack_ids), - len(update_ftrack_ids), - len(deleted_entities) - )) - - def filter_with_children(self, ftrack_id): - if ftrack_id not in self.entities_dict: - return - ent_dict = self.entities_dict[ftrack_id] - parent_id = ent_dict["parent_id"] - self.entities_dict[parent_id]["children"].remove(ftrack_id) - - children_queue = queue.Queue() - children_queue.put(ftrack_id) - while not children_queue.empty(): - _ftrack_id = children_queue.get() - entity_dict = self.entities_dict.pop(_ftrack_id, {"children": []}) - for child_id in entity_dict["children"]: - children_queue.put(child_id) - - def prepare_changes(self): - self.log.debug("* Preparing changes for avalon/ftrack") - hierarchy_changing_ids = [] - ignore_keys = collections.defaultdict(list) - - update_queue = queue.Queue() - for ftrack_id in self.update_ftrack_ids: - update_queue.put(ftrack_id) - - while not update_queue.empty(): - ftrack_id = update_queue.get() - if ftrack_id == self.ft_project_id: - changes = self.prepare_project_changes() - if changes: - self.updates[self.avalon_project_id] = changes - continue - - ftrack_ent_dict = self.entities_dict[ftrack_id] - - # *** check parents - parent_check = False - - ftrack_parent_id = ftrack_ent_dict["parent_id"] - avalon_id = self.ftrack_avalon_mapper[ftrack_id] - avalon_entity = self.avalon_ents_by_id[avalon_id] - avalon_parent_id = avalon_entity["data"]["visualParent"] - if avalon_parent_id is not None: - avalon_parent_id = str(avalon_parent_id) - - ftrack_parent_mongo_id = self.ftrack_avalon_mapper[ - ftrack_parent_id - ] - - # if parent is project - if (ftrack_parent_mongo_id == avalon_parent_id) or ( - ftrack_parent_id == self.ft_project_id and - avalon_parent_id is None - ): - parent_check = True - - # check name - ftrack_name = ftrack_ent_dict["name"] - avalon_name = avalon_entity["name"] - name_check = ftrack_name == avalon_name - - # IDEAL STATE: both parent and name check passed - if parent_check and name_check: - continue - - # If entity is changeable then change values of parent or name - if self.changeability_by_mongo_id[avalon_id]: - # TODO logging - if not parent_check: - if ftrack_parent_mongo_id == str(self.avalon_project_id): - new_parent_name = self.entities_dict[ - self.ft_project_id]["name"] - new_parent_id = None - else: - new_parent_name = self.avalon_ents_by_id[ - ftrack_parent_mongo_id]["name"] - new_parent_id = ObjectId(ftrack_parent_mongo_id) - - if avalon_parent_id == str(self.avalon_project_id): - old_parent_name = self.entities_dict[ - self.ft_project_id]["name"] - else: - old_parent_name = self.avalon_ents_by_id[ - ftrack_parent_mongo_id]["name"] - - self.updates[avalon_id]["data"] = { - "visualParent": new_parent_id - } - ignore_keys[ftrack_id].append("data.visualParent") - self.log.debug(( - "Avalon entity \"{}\" changed parent \"{}\" -> \"{}\"" - ).format(avalon_name, old_parent_name, new_parent_name)) - - if not name_check: - self.updates[avalon_id]["name"] = ftrack_name - ignore_keys[ftrack_id].append("name") - self.log.debug( - "Avalon entity \"{}\" was renamed to \"{}\"".format( - avalon_name, ftrack_name - ) - ) - continue - - # parents and hierarchy must be recalculated - hierarchy_changing_ids.append(ftrack_id) - - # Parent is project if avalon_parent_id is set to None - if avalon_parent_id is None: - avalon_parent_id = str(self.avalon_project_id) - - if not name_check: - ent_path = self.get_ent_path(ftrack_id) - # TODO report - # TODO logging - self.entities_dict[ftrack_id]["name"] = avalon_name - self.entities_dict[ftrack_id]["entity"]["name"] = ( - avalon_name - ) - self.entities_dict[ftrack_id]["final_entity"]["name"] = ( - avalon_name - ) - self.log.warning("Name was changed back to {} <{}>".format( - avalon_name, ent_path - )) - self._ent_pats_by_ftrack_id.pop(ftrack_id, None) - msg = ( - " It is not allowed to change" - " name of entity or it's parents" - " that already has published context" - ) - self.report_items["warning"][msg].append(ent_path) - - # skip parent oricessing if hierarchy didn't change - if parent_check: - continue - - # Logic when parenting(hierarchy) has changed and should not - old_ftrack_parent_id = self.avalon_ftrack_mapper.get( - avalon_parent_id - ) - - # If last ftrack parent id from mongo entity exist then just - # remap paren_id on entity - if old_ftrack_parent_id: - # TODO report - # TODO logging - ent_path = self.get_ent_path(ftrack_id) - msg = ( - " It is not allowed" - " to change hierarchy of entity or it's parents" - " that already has published context" - ) - self.report_items["warning"][msg].append(ent_path) - self.log.warning(( - "Entity has published context so was moved" - " back in hierarchy <{}>" - ).format(ent_path)) - self.entities_dict[ftrack_id]["entity"]["parent_id"] = ( - old_ftrack_parent_id - ) - self.entities_dict[ftrack_id]["parent_id"] = ( - old_ftrack_parent_id - ) - self.entities_dict[old_ftrack_parent_id][ - "children" - ].append(ftrack_id) - - continue - - old_parent_ent = self.avalon_ents_by_id.get(avalon_parent_id) - if not old_parent_ent: - old_parent_ent = self.avalon_archived_by_id.get( - avalon_parent_id - ) - - # TODO report - # TODO logging - if not old_parent_ent: - self.log.warning(( - "Parent entity was not found by id" - " - Trying to find by parent name" - )) - ent_path = self.get_ent_path(ftrack_id) - - parents = avalon_entity["data"]["parents"] - parent_name = parents[-1] - matching_entity_id = None - for id, entity_dict in self.entities_dict.items(): - if entity_dict["name"] == parent_name: - matching_entity_id = id - break - - if matching_entity_id is None: - # TODO logging - # TODO report (turn off auto-sync?) - self.log.error(( - "Entity has published context but was moved in" - " hierarchy and previous parent was not found so it is" - " not possible to solve this programmatically <{}>" - ).format(ent_path)) - msg = ( - " Parent of entity can't be" - " changed due to published context and previous parent" - " was not found" - ) - self.report_items["error"][msg].append(ent_path) - self.filter_with_children(ftrack_id) - continue - - matching_ent_dict = self.entities_dict.get(matching_entity_id) - match_ent_parents = matching_ent_dict.get( - "final_entity", {}).get( - "data", {}).get( - "parents", ["__NOT_SET__"] - ) - # TODO logging - # TODO report - if ( - len(match_ent_parents) >= len(parents) or - match_ent_parents[:-1] != parents - ): - ent_path = self.get_ent_path(ftrack_id) - self.log.error(( - "Entity has published context but was moved in" - " hierarchy and previous parents were moved too it is" - " not possible to solve this programmatically <{}>" - ).format(ent_path)) - msg = ( - " Parent of entity can't be" - " changed due to published context but whole hierarchy" - " was scrambled" - ) - continue - - old_parent_ent = matching_ent_dict["final_entity"] - - parent_id = self.ft_project_id - entities_to_create = [] - # TODO logging - self.log.warning( - "Ftrack entities must be recreated because have" - " published context but were removed" - ) - - _avalon_ent = old_parent_ent - - self.updates[avalon_parent_id] = {"type": "asset"} - success = True - while True: - _vis_par = _avalon_ent["data"]["visualParent"] - _name = _avalon_ent["name"] - if _name in self.all_ftrack_names: - av_ent_path_items = _avalon_ent["data"]["parents"] - av_ent_path_items.append(_name) - av_ent_path = "/".join(av_ent_path_items) - # TODO report - # TODO logging - self.log.error(( - "Can't recreate entity in Ftrack because entity with" - " same name already exists in different hierarchy <{}>" - ).format(av_ent_path)) - msg = ( - " Parent of entity can't be" - " changed due to published context but previous parent" - " had name that exist in different hierarchy level" - ) - self.report_items["error"][msg].append(av_ent_path) - self.filter_with_children(ftrack_id) - success = False - break - - entities_to_create.append(_avalon_ent) - if _vis_par is None: - break - - _vis_par = str(_vis_par) - _mapped = self.avalon_ftrack_mapper.get(_vis_par) - if _mapped: - parent_id = _mapped - break - - _avalon_ent = self.avalon_ents_by_id.get(_vis_par) - if not _avalon_ent: - _avalon_ent = self.avalon_archived_by_id.get(_vis_par) - - if success is False: - continue - - new_entity_id = None - for av_entity in reversed(entities_to_create): - new_entity_id = self.create_ftrack_ent_from_avalon_ent( - av_entity, parent_id - ) - update_queue.put(new_entity_id) - - if new_entity_id: - ftrack_ent_dict["entity"]["parent_id"] = new_entity_id - - if hierarchy_changing_ids: - self.reload_parents(hierarchy_changing_ids) - - for ftrack_id in self.update_ftrack_ids: - if ftrack_id == self.ft_project_id: - continue - - avalon_id = self.ftrack_avalon_mapper[ftrack_id] - avalon_entity = self.avalon_ents_by_id[avalon_id] - - avalon_attrs = self.entities_dict[ftrack_id]["avalon_attrs"] - if ( - self.id_cust_attr not in avalon_attrs or - avalon_attrs[self.id_cust_attr] != avalon_id - ): - configuration_id = self.entities_dict[ftrack_id][ - "avalon_attrs_id"][self.id_cust_attr] - - _entity_key = collections.OrderedDict({ - "configuration_id": configuration_id, - "entity_id": ftrack_id - }) - - self.session.recorded_operations.push( - fa_session.ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", - _entity_key, - "value", - fa_session.ftrack_api.symbol.NOT_SET, - avalon_id - ) - ) - # check rest of data - data_changes = self.compare_dict( - self.entities_dict[ftrack_id]["final_entity"], - avalon_entity, - ignore_keys[ftrack_id] - ) - if data_changes: - self.updates[avalon_id] = self.merge_dicts( - data_changes, - self.updates[avalon_id] - ) - - def synchronize(self): - self.log.debug("* Synchronization begins") - avalon_project_id = self.ftrack_avalon_mapper.get(self.ft_project_id) - if avalon_project_id: - self.avalon_project_id = ObjectId(avalon_project_id) - - # remove filtered ftrack ids from create/update list - for ftrack_id in self.all_filtered_entities: - if ftrack_id in self.create_ftrack_ids: - self.create_ftrack_ids.remove(ftrack_id) - elif ftrack_id in self.update_ftrack_ids: - self.update_ftrack_ids.remove(ftrack_id) - - self.log.debug("* Processing entities for archivation") - self.delete_entities() - - self.log.debug("* Processing new entities") - # Create not created entities - for ftrack_id in self.create_ftrack_ids: - # CHECK it is possible that entity was already created - # because is parent of another entity which was processed first - if ftrack_id in self.ftrack_avalon_mapper: - continue - self.create_avalon_entity(ftrack_id) - - if len(self.create_list) > 0: - self.dbcon.insert_many(self.create_list) - - self.session.commit() - - self.log.debug("* Processing entities for update") - self.prepare_changes() - self.update_entities() - self.session.commit() - - def create_avalon_entity(self, ftrack_id): - if ftrack_id == self.ft_project_id: - self.create_avalon_project() - return - - entity_dict = self.entities_dict[ftrack_id] - parent_ftrack_id = entity_dict["parent_id"] - avalon_parent = None - if parent_ftrack_id != self.ft_project_id: - avalon_parent = self.ftrack_avalon_mapper.get(parent_ftrack_id) - # if not avalon_parent: - # self.create_avalon_entity(parent_ftrack_id) - # avalon_parent = self.ftrack_avalon_mapper[parent_ftrack_id] - avalon_parent = ObjectId(avalon_parent) - - # avalon_archived_by_id avalon_archived_by_name - current_id = ( - entity_dict["avalon_attrs"].get(self.id_cust_attr) or "" - ).strip() - mongo_id = current_id - name = entity_dict["name"] - - # Check if exist archived asset in mongo - by ID - unarchive = False - unarchive_id = self.check_unarchivation(ftrack_id, mongo_id, name) - if unarchive_id is not None: - unarchive = True - mongo_id = unarchive_id - - item = entity_dict["final_entity"] - try: - new_id = ObjectId(mongo_id) - if mongo_id in self.avalon_ftrack_mapper: - new_id = ObjectId() - except InvalidId: - new_id = ObjectId() - - item["_id"] = new_id - item["parent"] = self.avalon_project_id - item["schema"] = self.entity_schemas["asset"] - item["data"]["visualParent"] = avalon_parent - - new_id_str = str(new_id) - self.ftrack_avalon_mapper[ftrack_id] = new_id_str - self.avalon_ftrack_mapper[new_id_str] = ftrack_id - - self._avalon_ents_by_id[new_id_str] = item - self._avalon_ents_by_ftrack_id[ftrack_id] = new_id_str - self._avalon_ents_by_name[item["name"]] = new_id_str - - if current_id != new_id_str: - # store mongo id to ftrack entity - configuration_id = self.entities_dict[ftrack_id][ - "avalon_attrs_id" - ][self.id_cust_attr] - _entity_key = collections.OrderedDict({ - "configuration_id": configuration_id, - "entity_id": ftrack_id - }) - - self.session.recorded_operations.push( - fa_session.ftrack_api.operation.UpdateEntityOperation( - "ContextCustomAttributeValue", - _entity_key, - "value", - fa_session.ftrack_api.symbol.NOT_SET, - new_id_str - ) - ) - - if unarchive is False: - self.create_list.append(item) - return - # If unarchive then replace entity data in database - self.dbcon.replace_one({"_id": new_id}, item) - self.remove_from_archived(mongo_id) - av_ent_path_items = item["data"]["parents"] - av_ent_path_items.append(item["name"]) - av_ent_path = "/".join(av_ent_path_items) - self.log.debug("Entity was unarchived <{}>".format(av_ent_path)) - - def check_unarchivation(self, ftrack_id, mongo_id, name): - archived_by_id = self.avalon_archived_by_id.get(mongo_id) - archived_by_name = self.avalon_archived_by_name.get(name) - - # if not found in archived then skip - if not archived_by_id and not archived_by_name: - return None - - entity_dict = self.entities_dict[ftrack_id] - - if archived_by_id: - # if is changeable then unarchive (nothing to check here) - if self.changeability_by_mongo_id[mongo_id]: - return mongo_id - - # TODO replace `__NOTSET__` with custom None constant - archived_parent_id = archived_by_id["data"].get( - "visualParent", "__NOTSET__" - ) - archived_parents = archived_by_id["data"].get("parents") - archived_name = archived_by_id["name"] - - if ( - archived_name != entity_dict["name"] or - archived_parents != entity_dict["final_entity"]["data"][ - "parents" - ] - ): - return None - - return mongo_id - - # First check if there is any that have same parents - for archived in archived_by_name: - mongo_id = str(archived["_id"]) - archived_parents = archived.get("data", {}).get("parents") - if ( - archived_parents == entity_dict["final_entity"]["data"][ - "parents" - ] - ): - return mongo_id - - # Secondly try to find more close to current ftrack entity - first_changeable = None - for archived in archived_by_name: - mongo_id = str(archived["_id"]) - if not self.changeability_by_mongo_id[mongo_id]: - continue - - if first_changeable is None: - first_changeable = mongo_id - - ftrack_parent_id = entity_dict["parent_id"] - map_ftrack_parent_id = self.ftrack_avalon_mapper.get( - ftrack_parent_id - ) - - # TODO replace `__NOTSET__` with custom None constant - archived_parent_id = archived.get("data", {}).get( - "visualParent", "__NOTSET__" - ) - if archived_parent_id is not None: - archived_parent_id = str(archived_parent_id) - - # skip if parent is archived - How this should be possible? - parent_entity = self.avalon_ents_by_id.get(archived_parent_id) - if ( - parent_entity and ( - map_ftrack_parent_id is not None and - map_ftrack_parent_id == str(parent_entity["_id"]) - ) - ): - return mongo_id - # Last return first changeable with same name (or None) - return first_changeable - - def create_avalon_project(self): - project_item = self.entities_dict[self.ft_project_id]["final_entity"] - mongo_id = ( - self.entities_dict[self.ft_project_id]["avalon_attrs"].get( - self.id_cust_attr - ) or "" - ).strip() - - try: - new_id = ObjectId(mongo_id) - except InvalidId: - new_id = ObjectId() - - project_item["_id"] = new_id - project_item["parent"] = None - project_item["schema"] = self.entity_schemas["project"] - project_item["config"]["schema"] = self.entity_schemas["config"] - project_item["config"]["template"] = self.get_avalon_project_template() - - self.ftrack_avalon_mapper[self.ft_project_id] = new_id - self.avalon_ftrack_mapper[new_id] = self.ft_project_id - - self.avalon_project_id = new_id - - self._avalon_ents_by_id[str(new_id)] = project_item - self._avalon_ents_by_ftrack_id[self.ft_project_id] = str(new_id) - self._avalon_ents_by_name[project_item["name"]] = str(new_id) - - self.create_list.append(project_item) - - # store mongo id to ftrack entity - entity = self.entities_dict[self.ft_project_id]["entity"] - entity["custom_attributes"][self.id_cust_attr] = str(new_id) - - def get_avalon_project_template(self): - """Get avalon template - Returns: - dictionary with templates - """ - project_name = self.entities_dict[self.ft_project_id]["name"] - templates = Anatomy(project_name).templates - return { - "workfile": templates["avalon"]["workfile"], - "work": templates["avalon"]["work"], - "publish": templates["avalon"]["publish"] - } - - def _bubble_changeability(self, unchangeable_ids): - unchangeable_queue = queue.Queue() - for entity_id in unchangeable_ids: - unchangeable_queue.put((entity_id, False)) - - processed_parents_ids = [] - subsets_to_remove = [] - while not unchangeable_queue.empty(): - entity_id, child_is_archived = unchangeable_queue.get() - # skip if already processed - if entity_id in processed_parents_ids: - continue - - entity = self.avalon_ents_by_id.get(entity_id) - # if entity is not archived but unchageable child was then skip - # - archived entities should not affect not archived? - if entity and child_is_archived: - continue - - # set changeability of current entity to False - self._changeability_by_mongo_id[entity_id] = False - processed_parents_ids.append(entity_id) - # if not entity then is probably archived - if not entity: - entity = self.avalon_archived_by_id.get(entity_id) - child_is_archived = True - - if not entity: - # if entity is not found then it is subset without parent - if entity_id in unchangeable_ids: - subsets_to_remove.append(entity_id) - else: - # TODO logging - What is happening here? - self.log.warning(( - "In avalon are entities without valid parents that" - " lead to Project (should not cause errors)" - " - MongoId <{}>" - ).format(str(entity_id))) - continue - - # skip if parent is project - parent_id = entity["data"]["visualParent"] - if parent_id is None: - continue - unchangeable_queue.put((str(parent_id), child_is_archived)) - - self._delete_subsets_without_asset(subsets_to_remove) - - def _delete_subsets_without_asset(self, not_existing_parents): - subset_ids = [] - version_ids = [] - repre_ids = [] - to_delete = [] - - for parent_id in not_existing_parents: - subsets = self.subsets_by_parent_id.get(parent_id) - if not subsets: - continue - for subset in subsets: - if subset.get("type") != "subset": - continue - subset_ids.append(subset["_id"]) - - db_subsets = self.dbcon.find({ - "_id": {"$in": subset_ids}, - "type": "subset" - }) - if not db_subsets: - return - - db_versions = self.dbcon.find({ - "parent": {"$in": subset_ids}, - "type": "version" - }) - if db_versions: - version_ids = [ver["_id"] for ver in db_versions] - - db_repres = self.dbcon.find({ - "parent": {"$in": version_ids}, - "type": "representation" - }) - if db_repres: - repre_ids = [repre["_id"] for repre in db_repres] - - to_delete.extend(subset_ids) - to_delete.extend(version_ids) - to_delete.extend(repre_ids) - - self.dbcon.delete_many({"_id": {"$in": to_delete}}) - - # Probably deprecated - def _check_changeability(self, parent_id=None): - for entity in self.avalon_ents_by_parent_id[parent_id]: - mongo_id = str(entity["_id"]) - is_changeable = self._changeability_by_mongo_id.get(mongo_id) - if is_changeable is not None: - continue - - self._check_changeability(mongo_id) - is_changeable = True - for child in self.avalon_ents_by_parent_id[parent_id]: - if not self._changeability_by_mongo_id[str(child["_id"])]: - is_changeable = False - break - - if is_changeable is True: - is_changeable = (mongo_id in self.subsets_by_parent_id) - self._changeability_by_mongo_id[mongo_id] = is_changeable - - def update_entities(self): - mongo_changes_bulk = [] - for mongo_id, changes in self.updates.items(): - filter = {"_id": ObjectId(mongo_id)} - change_data = self.from_dict_to_set(changes) - mongo_changes_bulk.append(UpdateOne(filter, change_data)) - - if not mongo_changes_bulk: - # TODO LOG - return - self.dbcon.bulk_write(mongo_changes_bulk) - - def from_dict_to_set(self, data): - result = {"$set": {}} - dict_queue = queue.Queue() - dict_queue.put((None, data)) - - while not dict_queue.empty(): - _key, _data = dict_queue.get() - for key, value in _data.items(): - new_key = key - if _key is not None: - new_key = "{}.{}".format(_key, key) - - if not isinstance(value, dict): - result["$set"][new_key] = value - continue - dict_queue.put((new_key, value)) - return result - - def reload_parents(self, hierarchy_changing_ids): - parents_queue = queue.Queue() - parents_queue.put((self.ft_project_id, [], False)) - while not parents_queue.empty(): - ftrack_id, parent_parents, changed = parents_queue.get() - _parents = parent_parents.copy() - if ftrack_id not in hierarchy_changing_ids and not changed: - if ftrack_id != self.ft_project_id: - _parents.append(self.entities_dict[ftrack_id]["name"]) - for child_id in self.entities_dict[ftrack_id]["children"]: - parents_queue.put((child_id, _parents, changed)) - continue - - changed = True - parents = [par for par in _parents] - hierarchy = "/".join(parents) - self.entities_dict[ftrack_id][ - "final_entity"]["data"]["parents"] = parents - self.entities_dict[ftrack_id][ - "final_entity"]["data"]["hierarchy"] = hierarchy - - _parents.append(self.entities_dict[ftrack_id]["name"]) - for child_id in self.entities_dict[ftrack_id]["children"]: - parents_queue.put((child_id, _parents, changed)) - - if ftrack_id in self.create_ftrack_ids: - mongo_id = self.ftrack_avalon_mapper[ftrack_id] - if "data" not in self.updates[mongo_id]: - self.updates[mongo_id]["data"] = {} - self.updates[mongo_id]["data"]["parents"] = parents - self.updates[mongo_id]["data"]["hierarchy"] = hierarchy - - def prepare_project_changes(self): - ftrack_ent_dict = self.entities_dict[self.ft_project_id] - ftrack_entity = ftrack_ent_dict["entity"] - avalon_code = self.avalon_project["data"]["code"] - # TODO Is possible to sync if full name was changed? - # if ftrack_ent_dict["name"] != self.avalon_project["name"]: - # ftrack_entity["full_name"] = avalon_name - # self.entities_dict[self.ft_project_id]["name"] = avalon_name - # self.entities_dict[self.ft_project_id]["final_entity"][ - # "name" - # ] = avalon_name - - # TODO logging - # TODO report - # TODO May this happen? Is possible to change project code? - if ftrack_entity["name"] != avalon_code: - ftrack_entity["name"] = avalon_code - self.entities_dict[self.ft_project_id]["final_entity"]["data"][ - "code" - ] = avalon_code - self.session.commit() - sub_msg = ( - "Project code was changed back to \"{}\"".format(avalon_code) - ) - msg = ( - "It is not allowed to change" - " project code after synchronization" - ) - self.report_items["warning"][msg] = sub_msg - self.log.warning(sub_msg) - - return self.compare_dict( - self.entities_dict[self.ft_project_id]["final_entity"], - self.avalon_project - ) - - def compare_dict(self, dict_new, dict_old, _ignore_keys=[]): - # _ignore_keys may be used for keys nested dict like"data.visualParent" - changes = {} - ignore_keys = [] - for key_val in _ignore_keys: - key_items = key_val.split(".") - if len(key_items) == 1: - ignore_keys.append(key_items[0]) - - for key, value in dict_new.items(): - if key in ignore_keys: - continue - - if key not in dict_old: - changes[key] = value - continue - - if isinstance(value, dict): - if not isinstance(dict_old[key], dict): - changes[key] = value - continue - - _new_ignore_keys = [] - for key_val in _ignore_keys: - key_items = key_val.split(".") - if len(key_items) <= 1: - continue - _new_ignore_keys.append(".".join(key_items[1:])) - - _changes = self.compare_dict( - value, dict_old[key], _new_ignore_keys - ) - if _changes: - changes[key] = _changes - continue - - if value != dict_old[key]: - changes[key] = value - - return changes - - def merge_dicts(self, dict_new, dict_old): - # _ignore_keys may be used for keys nested dict like"data.visualParent" - for key, value in dict_new.items(): - if key not in dict_old: - dict_old[key] = value - continue - - if isinstance(value, dict): - dict_old[key] = self.merge_dicts(value, dict_old[key]) - continue - - dict_old[key] = value - - return dict_old - - def delete_entities(self): - if not self.deleted_entities: - return - # Try to order so child is not processed before parent - deleted_entities = [] - _deleted_entities = [id for id in self.deleted_entities] - - while True: - if not _deleted_entities: - break - _ready = [] - for mongo_id in _deleted_entities: - ent = self.avalon_ents_by_id[mongo_id] - vis_par = ent["data"]["visualParent"] - if ( - vis_par is not None and - str(vis_par) in self.deleted_entities - ): - continue - _ready.append(mongo_id) - - for id in _ready: - deleted_entities.append(id) - _deleted_entities.remove(id) - - delete_ids = [] - for mongo_id in deleted_entities: - # delete if they are deletable - if self.changeability_by_mongo_id[mongo_id]: - delete_ids.append(ObjectId(mongo_id)) - continue - - # check if any new created entity match same entity - # - name and parents must match - deleted_entity = self.avalon_ents_by_id[mongo_id] - name = deleted_entity["name"] - parents = deleted_entity["data"]["parents"] - similar_ent_id = None - for ftrack_id in self.create_ftrack_ids: - _ent_final = self.entities_dict[ftrack_id]["final_entity"] - if _ent_final["name"] != name: - continue - if _ent_final["data"]["parents"] != parents: - continue - - # If in create is "same" then we can "archive" current - # since will be unarchived in create method - similar_ent_id = ftrack_id - break - - # If similar entity(same name and parents) is in create - # entities list then just change from create to update - if similar_ent_id is not None: - self.create_ftrack_ids.remove(similar_ent_id) - self.update_ftrack_ids.append(similar_ent_id) - self.avalon_ftrack_mapper[mongo_id] = similar_ent_id - self.ftrack_avalon_mapper[similar_ent_id] = mongo_id - continue - - found_by_name_id = None - for ftrack_id, ent_dict in self.entities_dict.items(): - if not ent_dict.get("name"): - continue - - if name == ent_dict["name"]: - found_by_name_id = ftrack_id - break - - if found_by_name_id is not None: - # * THESE conditins are too complex to implement in first stage - # - probably not possible to solve if this happen - # if found_by_name_id in self.create_ftrack_ids: - # # reparent entity of the new one create? - # pass - # - # elif found_by_name_id in self.update_ftrack_ids: - # found_mongo_id = self.ftrack_avalon_mapper[found_by_name_id] - # - # ent_dict = self.entities_dict[found_by_name_id] - - # TODO report - CRITICAL entity with same name alread exists in - # different hierarchy - can't recreate entity - continue - - _vis_parent = str(deleted_entity["data"]["visualParent"]) - if _vis_parent is None: - _vis_parent = self.avalon_project_id - ftrack_parent_id = self.avalon_ftrack_mapper[_vis_parent] - self.create_ftrack_ent_from_avalon_ent( - deleted_entity, ftrack_parent_id - ) - - filter = {"_id": {"$in": delete_ids}, "type": "asset"} - self.dbcon.update_many(filter, {"$set": {"type": "archived_asset"}}) - - def create_ftrack_ent_from_avalon_ent(self, av_entity, parent_id): - new_entity = None - parent_entity = self.entities_dict[parent_id]["entity"] - - _name = av_entity["name"] - _type = av_entity["data"].get("entityType", "folder") - - self.log.debug(( - "Re-ceating deleted entity {} <{}>" - ).format(_name, _type)) - - new_entity = self.session.create(_type, { - "name": _name, - "parent": parent_entity - }) - - final_entity = {} - for k, v in av_entity.items(): - final_entity[k] = v - - if final_entity.get("type") != "asset": - final_entity["type"] = "asset" - - new_entity_id = new_entity["id"] - new_entity_data = { - "entity": new_entity, - "parent_id": parent_id, - "entity_type": _type.lower(), - "entity_type_orig": _type, - "name": _name, - "final_entity": final_entity - } - for k, v in new_entity_data.items(): - self.entities_dict[new_entity_id][k] = v - - p_chilren = self.entities_dict[parent_id]["children"] - if new_entity_id not in p_chilren: - self.entities_dict[parent_id]["children"].append(new_entity_id) - - cust_attr, hier_attrs = self.get_avalon_attr() - for _attr in cust_attr: - key = _attr["key"] - if key not in av_entity["data"]: - continue - - if key not in new_entity["custom_attributes"]: - continue - - value = av_entity["data"][key] - if not value: - continue - - new_entity["custom_attributes"][key] = value - - av_entity_id = str(av_entity["_id"]) - new_entity["custom_attributes"][self.id_cust_attr] = av_entity_id - - self.ftrack_avalon_mapper[new_entity_id] = av_entity_id - self.avalon_ftrack_mapper[av_entity_id] = new_entity_id - - self.session.commit() - - ent_path = self.get_ent_path(new_entity_id) - msg = ( - "Deleted entity was recreated because had (or his children)" - " published context" - ) - - self.report_items["info"][msg].append(ent_path) - - return new_entity_id - - def regex_duplicate_interface(self): - items = [] - if self.failed_regex or self.tasks_failed_regex: - subtitle = "Not allowed symbols in entity names:" - items.append({ - "type": "label", - "value": "# {}".format(subtitle) - }) - items.append({ - "type": "label", - "value": ( - "

NOTE: Allowed symbols are Letters( a-Z )," - " Numbers( 0-9 ) and Underscore( _ )

" - ) - }) - log_msgs = [] - for name, ids in self.failed_regex.items(): - error_title = { - "type": "label", - "value": "## {}".format(name) - } - items.append(error_title) - paths = [] - for entity_id in ids: - ent_path = self.get_ent_path(entity_id) - paths.append(ent_path) - - error_message = { - "type": "label", - "value": '

{}

'.format("
".join(paths)) - } - items.append(error_message) - log_msgs.append("<{}> ({})".format(name, ",".join(paths))) - - for name, ids in self.tasks_failed_regex.items(): - error_title = { - "type": "label", - "value": "## Task: {}".format(name) - } - items.append(error_title) - paths = [] - for entity_id in ids: - ent_path = self.get_ent_path(entity_id) - ent_path = "/".join([ent_path, name]) - paths.append(ent_path) - - error_message = { - "type": "label", - "value": '

{}

'.format("
".join(paths)) - } - items.append(error_message) - log_msgs.append("<{}> ({})".format(name, ",".join(paths))) - - self.log.warning("{}{}".format(subtitle, ", ".join(log_msgs))) - - if self.duplicates: - subtitle = "Duplicated entity names:" - items.append({ - "type": "label", - "value": "# {}".format(subtitle) - }) - items.append({ - "type": "label", - "value": ( - "

NOTE: It is not allowed to have same name" - " for multiple entities in one project

" - ) - }) - log_msgs = [] - for name, ids in self.duplicates.items(): - error_title = { - "type": "label", - "value": "## {}".format(name) - } - items.append(error_title) - paths = [] - for entity_id in ids: - ent_path = self.get_ent_path(entity_id) - paths.append(ent_path) - - error_message = { - "type": "label", - "value": '

{}

'.format("
".join(paths)) - } - items.append(error_message) - log_msgs.append("<{}> ({})".format(name, ", ".join(paths))) - - self.log.warning("{}{}".format(subtitle, ", ".join(log_msgs))) - - return items - - def get_avalon_attr(self, split_hierarchical=True): - custom_attributes = [] - hier_custom_attributes = [] - cust_attrs_query = ( - "select id, entity_type, object_type_id, is_hierarchical, default" - " from CustomAttributeConfiguration" - " where group.name = \"avalon\"" - ) - all_avalon_attr = self.session.query(cust_attrs_query).all() - for cust_attr in all_avalon_attr: - if split_hierarchical and cust_attr["is_hierarchical"]: - hier_custom_attributes.append(cust_attr) - continue - - custom_attributes.append(cust_attr) - - if split_hierarchical: - # return tuple - return custom_attributes, hier_custom_attributes - - return custom_attributes - - def report(self): - items = [] - project_name = self.entities_dict[self.ft_project_id]["name"] - title = "Synchronization report ({}):".format(project_name) - - keys = ["error", "warning", "info"] - for key in keys: - subitems = [] - if key == "warning": - for _item in self.regex_duplicate_interface(): - subitems.append(_item) - - for msg, _items in self.report_items[key].items(): - if not _items: - continue - - subitems.append({ - "type": "label", - "value": "# {}".format(msg) - }) - if isinstance(_items, str): - _items = [_items] - subitems.append({ - "type": "label", - "value": '

{}

'.format("
".join(_items)) - }) - - if items and subitems: - items.append(self.report_splitter) - - items.extend(subitems) - - return { - "items": items, - "title": title, - "success": False, - "message": "Synchronization Finished" - } +from pype.ftrack.lib.avalon_sync import SyncEntitiesFactory +from pypeapp import config class SyncToAvalonServer(BaseAction): @@ -2179,13 +44,27 @@ class SyncToAvalonServer(BaseAction): "PYPE_STATICS_SERVER", "http://localhost:{}".format( config.get_presets().get("services", {}).get( - "statics_server", {} + "rest_api", {} ).get("default_port", 8021) ) ) ) - #: roles that are allowed to register this action - role_list = ["Pypeclub"] + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.entities_factory = SyncEntitiesFactory(self.log, self.session) + + def register(self): + self.session.event_hub.subscribe( + "topic=ftrack.action.discover", + self._discover, + priority=self.priority + ) + + launch_subscription = ( + "topic=ftrack.action.launch and data.actionIdentifier={0}" + ).format(self.identifier) + self.session.event_hub.subscribe(launch_subscription, self._launch) def discover(self, session, entities, event): """ Validation """ @@ -2213,8 +92,6 @@ class SyncToAvalonServer(BaseAction): for role in user["user_security_roles"]: if role["security_role"]["name"] in role_list: return True - break - return False def launch(self, session, in_entities, event): @@ -2228,28 +105,29 @@ class SyncToAvalonServer(BaseAction): ft_project_name = in_entities[0]["project"]["full_name"] try: - entities_factory = SyncEntitiesFactory( - self.log, session, ft_project_name - ) + output = self.entities_factory.launch_setup(ft_project_name) + if output is not None: + return output + time_1 = time.time() - entities_factory.set_cutom_attributes() + self.entities_factory.set_cutom_attributes() time_2 = time.time() # This must happen before all filtering!!! - entities_factory.prepare_avalon_entities(ft_project_name) + self.entities_factory.prepare_avalon_entities(ft_project_name) time_3 = time.time() - entities_factory.filter_by_ignore_sync() + self.entities_factory.filter_by_ignore_sync() time_4 = time.time() - entities_factory.duplicity_regex_check() + self.entities_factory.duplicity_regex_check() time_5 = time.time() - entities_factory.prepare_ftrack_ent_data() + self.entities_factory.prepare_ftrack_ent_data() time_6 = time.time() - entities_factory.synchronize() + self.entities_factory.synchronize() time_7 = time.time() self.log.debug( @@ -2280,7 +158,7 @@ class SyncToAvalonServer(BaseAction): "* Total time: {}".format(time_7 - time_start) ) - report = entities_factory.report() + report = self.entities_factory.report() if report and report.get("items"): default_title = "Synchronization report ({}):".format( ft_project_name @@ -2322,13 +200,13 @@ class SyncToAvalonServer(BaseAction): report = {"items": []} try: - report = entities_factory.report() + report = self.entities_factory.report() except Exception: pass _items = report.get("items", []) if _items: - items.append(entities_factory.report_splitter) + items.append(self.entities_factory.report_splitter) items.extend(_items) self.show_interface(items, title, event) @@ -2337,16 +215,16 @@ class SyncToAvalonServer(BaseAction): finally: try: - entities_factory.dbcon.uninstall() + self.entities_factory.dbcon.uninstall() except Exception: pass try: - entities_factory.session.close() + self.entities_factory.session.close() except Exception: pass + def register(session, plugins_presets={}): '''Register plugin. Called when used as an plugin.''' - SyncToAvalonServer(session, plugins_presets).register() diff --git a/pype/ftrack/events/event_del_avalon_id_from_new.py b/pype/ftrack/events/event_del_avalon_id_from_new.py index 3436fde252..d820e40467 100644 --- a/pype/ftrack/events/event_del_avalon_id_from_new.py +++ b/pype/ftrack/events/event_del_avalon_id_from_new.py @@ -1,6 +1,6 @@ -import ftrack_api -from pype.ftrack import BaseEvent, get_ca_mongoid -from pype.ftrack.events.event_sync_to_avalon import SyncToAvalon +from pype.ftrack.lib import BaseEvent +from pype.ftrack.lib.avalon_sync import CustAttrIdKey +from pype.ftrack.events.event_sync_to_avalon import SyncToAvalonEvent class DelAvalonIdFromNew(BaseEvent): @@ -11,7 +11,8 @@ class DelAvalonIdFromNew(BaseEvent): Priority of this event must be less than SyncToAvalon event ''' - priority = SyncToAvalon.priority - 1 + priority = SyncToAvalonEvent.priority - 1 + ignore_me = True def launch(self, session, event): created = [] @@ -28,7 +29,7 @@ class DelAvalonIdFromNew(BaseEvent): elif ( entity.get('action', None) == 'update' and - get_ca_mongoid() in entity['keys'] and + CustAttrIdKey in entity['keys'] and entity_id in created ): ftrack_entity = session.get( @@ -37,13 +38,11 @@ class DelAvalonIdFromNew(BaseEvent): ) cust_attr = ftrack_entity['custom_attributes'][ - get_ca_mongoid() + CustAttrIdKey ] if cust_attr != '': - ftrack_entity['custom_attributes'][ - get_ca_mongoid() - ] = '' + ftrack_entity['custom_attributes'][CustAttrIdKey] = '' session.commit() except Exception: @@ -53,5 +52,4 @@ class DelAvalonIdFromNew(BaseEvent): def register(session, plugins_presets): '''Register plugin. Called when used as an plugin.''' - DelAvalonIdFromNew(session, plugins_presets).register() diff --git a/pype/ftrack/events/event_sync_hier_attr.py b/pype/ftrack/events/event_sync_hier_attr.py deleted file mode 100644 index 682575b52c..0000000000 --- a/pype/ftrack/events/event_sync_hier_attr.py +++ /dev/null @@ -1,213 +0,0 @@ -import os -import sys - -from pype.ftrack.lib.io_nonsingleton import DbConnector - -import ftrack_api -from pype.ftrack import BaseEvent, lib -from bson.objectid import ObjectId - - -class SyncHierarchicalAttrs(BaseEvent): - # After sync to avalon event! - priority = 101 - db_con = DbConnector() - ca_mongoid = lib.get_ca_mongoid() - - def launch(self, session, event): - # Filter entities and changed values if it makes sence to run script - processable = [] - processable_ent = {} - for ent in event['data']['entities']: - # Ignore entities that are not tasks or projects - if ent['entityType'].lower() not in ['task', 'show']: - continue - - action = ent.get("action") - # skip if remove (Entity does not exist in Ftrack) - if action == "remove": - continue - - # When entity was add we don't care about keys - if action != "add": - keys = ent.get('keys') - if not keys: - continue - - entity = session.get(self._get_entity_type(ent), ent['entityId']) - processable.append(ent) - - processable_ent[ent['entityId']] = { - "entity": entity, - "action": action, - "link": entity["link"] - } - - if not processable: - return True - - # Find project of entities - ft_project = None - for entity_dict in processable_ent.values(): - try: - base_proj = entity_dict['link'][0] - except Exception: - continue - ft_project = session.get(base_proj['type'], base_proj['id']) - break - - # check if project is set to auto-sync - if ( - ft_project is None or - 'avalon_auto_sync' not in ft_project['custom_attributes'] or - ft_project['custom_attributes']['avalon_auto_sync'] is False - ): - return True - - # Get hierarchical custom attributes from "avalon" group - custom_attributes = {} - query = 'CustomAttributeGroup where name is "avalon"' - all_avalon_attr = session.query(query).one() - for cust_attr in all_avalon_attr['custom_attribute_configurations']: - if 'avalon_' in cust_attr['key']: - continue - if not cust_attr['is_hierarchical']: - continue - custom_attributes[cust_attr['key']] = cust_attr - - if not custom_attributes: - return True - - self.db_con.install() - self.db_con.Session['AVALON_PROJECT'] = ft_project['full_name'] - - for ent in processable: - entity_dict = processable_ent[ent['entityId']] - - entity = entity_dict["entity"] - ent_path = "/".join([ent["name"] for ent in entity_dict['link']]) - action = entity_dict["action"] - - keys_to_process = {} - if action == "add": - # Store all custom attributes when entity was added - for key in custom_attributes: - keys_to_process[key] = entity['custom_attributes'][key] - else: - # Update only updated keys - for key in ent['keys']: - if key in custom_attributes: - keys_to_process[key] = entity['custom_attributes'][key] - - processed_keys = self.get_hierarchical_values( - keys_to_process, entity - ) - # Do the processing of values - self.update_hierarchical_attribute(entity, processed_keys, ent_path) - - self.db_con.uninstall() - - return True - - def get_hierarchical_values(self, keys_dict, entity): - # check already set values - _set_keys = [] - for key, value in keys_dict.items(): - if value is not None: - _set_keys.append(key) - - # pop set values from keys_dict - set_keys = {} - for key in _set_keys: - set_keys[key] = keys_dict.pop(key) - - # find if entity has set values and pop them out - keys_to_pop = [] - for key in keys_dict.keys(): - _val = entity["custom_attributes"][key] - if _val: - keys_to_pop.append(key) - set_keys[key] = _val - - for key in keys_to_pop: - keys_dict.pop(key) - - # if there are not keys to find value return found - if not keys_dict: - return set_keys - - # end recursion if entity is project - if entity.entity_type.lower() == "project": - for key, value in keys_dict.items(): - set_keys[key] = value - - else: - result = self.get_hierarchical_values(keys_dict, entity["parent"]) - for key, value in result.items(): - set_keys[key] = value - - return set_keys - - def update_hierarchical_attribute(self, entity, keys_dict, ent_path): - # TODO store all keys at once for entity - custom_attributes = entity.get('custom_attributes') - if not custom_attributes: - return - - mongoid = custom_attributes.get(self.ca_mongoid) - if not mongoid: - return - - try: - mongoid = ObjectId(mongoid) - except Exception: - return - - mongo_entity = self.db_con.find_one({'_id': mongoid}) - if not mongo_entity: - return - - changed_keys = {} - data = mongo_entity.get('data') or {} - for key, value in keys_dict.items(): - cur_value = data.get(key) - if cur_value: - if cur_value == value: - continue - changed_keys[key] = value - data[key] = value - - if not changed_keys: - return - - self.log.debug( - "{} - updated hierarchical attributes: {}".format( - ent_path, str(changed_keys) - ) - ) - - self.db_con.update_many( - {'_id': mongoid}, - {'$set': {'data': data}} - ) - - for child in entity.get('children', []): - _keys_dict = {} - for key, value in keys_dict.items(): - if key not in child.get('custom_attributes', {}): - continue - child_value = child['custom_attributes'][key] - if child_value is not None: - continue - _keys_dict[key] = value - - if not _keys_dict: - continue - child_path = "/".join([ent["name"] for ent in child['link']]) - self.update_hierarchical_attribute(child, _keys_dict, child_path) - - -def register(session, plugins_presets): - '''Register plugin. Called when used as an plugin.''' - - SyncHierarchicalAttrs(session, plugins_presets).register() diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index db8ca845a6..8d25b5b801 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -1,51 +1,536 @@ +import os +import collections +import copy +import queue +import time +import atexit +import traceback + +from bson.objectid import ObjectId +from pymongo import UpdateOne + +from avalon import schema + +from pype.ftrack.lib import avalon_sync +from pype.ftrack.lib.avalon_sync import ( + CustAttrIdKey, CustAttrAutoSync, EntitySchemas +) import ftrack_api -from pype.ftrack import BaseEvent, lib +from pype.ftrack import BaseEvent + +from pype.ftrack.lib.io_nonsingleton import DbConnector -class SyncToAvalon(BaseEvent): +class SyncToAvalonEvent(BaseEvent): - priority = 100 + dbcon = DbConnector() - ignore_entityType = [ - 'assetversion', 'job', 'user', 'reviewsessionobject', 'timer', - 'socialfeed', 'socialnotification', 'timelog' + ignore_entTypes = [ + "socialfeed", "socialnotification", "note", + "assetversion", "job", "user", "reviewsessionobject", "timer", + "timelog", "auth_userrole", "appointment" ] + ignore_ent_types = ["Milestone"] + ignore_keys = ["statusid", "thumbid"] + + project_query = ( + "select full_name, name, custom_attributes" + ", project_schema._task_type_schema.types.name" + " from Project where id is \"{}\"" + ) + + entities_query_by_id = ( + "select id, name, parent_id, link, custom_attributes from TypedContext" + " where project_id is \"{}\" and id in ({})" + ) + entities_name_query_by_name = ( + "select id, name from TypedContext" + " where project_id is \"{}\" and name in ({})" + ) + created_entities = [] + + def __init__(self, session, plugins_presets={}): + '''Expects a ftrack_api.Session instance''' + self.set_process_session(session) + super().__init__(session, plugins_presets) + + @property + def cur_project(self): + if self._cur_project is None: + found_id = None + for ent_info in self._cur_event["data"]["entities"]: + if found_id is not None: + break + parents = ent_info.get("parents") or [] + for parent in parents: + if parent.get("entityType") == "show": + found_id = parent.get("entityId") + break + if found_id: + self._cur_project = self.process_session.query( + self.project_query.format(found_id) + ).one() + return self._cur_project + + @property + def avalon_cust_attrs(self): + if self._avalon_cust_attrs is None: + self._avalon_cust_attrs = avalon_sync.get_avalon_attr( + self.process_session + ) + return self._avalon_cust_attrs + + @property + def avalon_entities(self): + if self._avalon_ents is None: + self.dbcon.install() + self.dbcon.Session["AVALON_PROJECT"] = ( + self.cur_project["full_name"] + ) + avalon_project = self.dbcon.find_one({"type": "project"}) + avalon_entities = list(self.dbcon.find({"type": "asset"})) + self._avalon_ents = (avalon_project, avalon_entities) + return self._avalon_ents + + @property + def avalon_ents_by_name(self): + if self._avalon_ents_by_name is None: + self._avalon_ents_by_name = {} + proj, ents = self.avalon_entities + for ent in ents: + self._avalon_ents_by_name[ent["name"]] = ent + return self._avalon_ents_by_name + + @property + def avalon_ents_by_id(self): + if self._avalon_ents_by_id is None: + self._avalon_ents_by_id = {} + proj, ents = self.avalon_entities + self._avalon_ents_by_id[proj["_id"]] = proj + for ent in ents: + self._avalon_ents_by_id[ent["_id"]] = ent + return self._avalon_ents_by_id + + @property + def avalon_ents_by_parent_id(self): + if self._avalon_ents_by_parent_id is None: + self._avalon_ents_by_parent_id = collections.defaultdict(list) + proj, ents = self.avalon_entities + for ent in ents: + vis_par = ent["data"]["visualParent"] + if vis_par is None: + vis_par = proj["_id"] + self._avalon_ents_by_parent_id[vis_par].append(ent) + return self._avalon_ents_by_parent_id + + @property + def avalon_ents_by_ftrack_id(self): + if self._avalon_ents_by_ftrack_id is None: + self._avalon_ents_by_ftrack_id = {} + proj, ents = self.avalon_entities + ftrack_id = proj["data"]["ftrackId"] + self._avalon_ents_by_ftrack_id[ftrack_id] = proj + for ent in ents: + ftrack_id = ent["data"].get("ftrackId") + if ftrack_id is None: + continue + self._avalon_ents_by_ftrack_id[ftrack_id] = ent + return self._avalon_ents_by_ftrack_id + + @property + def avalon_subsets_by_parents(self): + if self._avalon_subsets_by_parents is None: + self._avalon_subsets_by_parents = collections.defaultdict(list) + self.dbcon.install() + self.dbcon.Session["AVALON_PROJECT"] = ( + self.cur_project["full_name"] + ) + for subset in self.dbcon.find({"type": "subset"}): + self._avalon_subsets_by_parents[subset["parent"]].append( + subset + ) + return self._avalon_subsets_by_parents + + @property + def avalon_archived_by_id(self): + if self._avalon_archived_by_id is None: + self._avalon_archived_by_id = {} + self.dbcon.install() + self.dbcon.Session["AVALON_PROJECT"] = ( + self.cur_project["full_name"] + ) + for asset in self.dbcon.find({"type": "archived_asset"}): + self._avalon_archived_by_id[asset["_id"]] = asset + return self._avalon_archived_by_id + + @property + def avalon_archived_by_name(self): + if self._avalon_archived_by_name is None: + self._avalon_archived_by_name = {} + for asset in self.avalon_archived_by_id.values(): + self._avalon_archived_by_name[asset["name"]] = asset + return self._avalon_archived_by_name + + @property + def changeability_by_mongo_id(self): + """Return info about changeability of entity and it's parents.""" + if self._changeability_by_mongo_id is None: + self._changeability_by_mongo_id = collections.defaultdict( + lambda: True + ) + avalon_project, avalon_entities = self.avalon_entities + self._changeability_by_mongo_id[avalon_project["_id"]] = False + self._bubble_changeability( + list(self.avalon_subsets_by_parents.keys()) + ) + + return self._changeability_by_mongo_id + + @property + def avalon_custom_attributes(self): + """Return info about changeability of entity and it's parents.""" + if self._avalon_custom_attributes is None: + self._avalon_custom_attributes = avalon_sync.get_avalon_attr( + self.process_session + ) + return self._avalon_custom_attributes + + def remove_cached_by_key(self, key, values): + if self._avalon_ents is None: + return + + if not isinstance(values, (list, tuple)): + values = [values] + + def get_found_data(entity): + if not entity: + return None + return { + "ftrack_id": entity["data"]["ftrackId"], + "parent_id": entity["data"]["visualParent"], + "_id": entity["_id"], + "name": entity["name"], + "entity": entity + } + + if key == "id": + key = "_id" + elif key == "ftrack_id": + key = "data.ftrackId" + + found_data = {} + project, entities = self._avalon_ents + key_items = key.split(".") + for value in values: + ent = None + if key == "_id": + if self._avalon_ents_by_id is not None: + ent = self._avalon_ents_by_id.get(value) + + elif key == "name": + if self._avalon_ents_by_name is not None: + ent = self._avalon_ents_by_name.get(value) + + elif key == "data.ftrackId": + if self._avalon_ents_by_ftrack_id is not None: + ent = self._avalon_ents_by_ftrack_id.get(value) + + if ent is None: + for _ent in entities: + _temp = _ent + for item in key_items: + _temp = _temp[item] + + if _temp == value: + ent = _ent + break + + found_data[value] = get_found_data(ent) + + for value in values: + data = found_data[value] + if not data: + # TODO logging + self.log.warning( + "Didn't found entity by key/value \"{}\" / \"{}\"".format( + key, value + ) + ) + continue + + ftrack_id = data["ftrack_id"] + parent_id = data["parent_id"] + mongo_id = data["_id"] + name = data["name"] + entity = data["entity"] + + project, ents = self._avalon_ents + ents.remove(entity) + self._avalon_ents = project, ents + + if self._avalon_ents_by_ftrack_id is not None: + self._avalon_ents_by_ftrack_id.pop(ftrack_id, None) + + if self._avalon_ents_by_parent_id is not None: + self._avalon_ents_by_parent_id[parent_id].remove(entity) + + if self._avalon_ents_by_id is not None: + self._avalon_ents_by_id.pop(mongo_id, None) + + if self._avalon_ents_by_name is not None: + self._avalon_ents_by_name.pop(name, None) + + if self._avalon_archived_by_id is not None: + self._avalon_archived_by_id[mongo_id] = entity + + if mongo_id in self.task_changes_by_avalon_id: + self.task_changes_by_avalon_id.pop(mongo_id) + + def _bubble_changeability(self, unchangeable_ids): + unchangeable_queue = queue.Queue() + for entity_id in unchangeable_ids: + unchangeable_queue.put((entity_id, False)) + + processed_parents_ids = [] + while not unchangeable_queue.empty(): + entity_id, child_is_archived = unchangeable_queue.get() + # skip if already processed + if entity_id in processed_parents_ids: + continue + + entity = self.avalon_ents_by_id.get(entity_id) + # if entity is not archived but unchageable child was then skip + # - archived entities should not affect not archived? + if entity and child_is_archived: + continue + + # set changeability of current entity to False + self._changeability_by_mongo_id[entity_id] = False + processed_parents_ids.append(entity_id) + # if not entity then is probably archived + if not entity: + entity = self.avalon_archived_by_id.get(entity_id) + child_is_archived = True + + if not entity: + # if entity is not found then it is subset without parent + if entity_id in unchangeable_ids: + _subset_ids = [ + str(sub["_id"]) for sub in + self.avalon_subsets_by_parents[entity_id] + ] + joined_subset_ids = "| ".join(_subset_ids) + self.log.warning(( + "Parent <{}> for subsets <{}> does not exist" + ).format(str(entity_id), joined_subset_ids)) + else: + self.log.warning(( + "In avalon are entities without valid parents that" + " lead to Project (should not cause errors)" + " - MongoId <{}>" + ).format(str(entity_id))) + continue + + # skip if parent is project + parent_id = entity["data"]["visualParent"] + if parent_id is None: + continue + unchangeable_queue.put((parent_id, child_is_archived)) + + def reset_variables(self): + """Reset variables so each event callback has clear env.""" + self._cur_project = None + + self._avalon_cust_attrs = None + + self._avalon_ents = None + self._avalon_ents_by_id = None + self._avalon_ents_by_parent_id = None + self._avalon_ents_by_ftrack_id = None + self._avalon_ents_by_name = None + self._avalon_subsets_by_parents = None + self._changeability_by_mongo_id = None + self._avalon_archived_by_id = None + self._avalon_archived_by_name = None + + self.task_changes_by_avalon_id = {} + + self._avalon_custom_attributes = None + self._ent_types_by_name = None + + self.ftrack_ents_by_id = {} + self.obj_id_ent_type_map = {} + self.ftrack_recreated_mapping = {} + + self.ftrack_added = {} + self.ftrack_moved = {} + self.ftrack_renamed = {} + self.ftrack_updated = {} + self.ftrack_removed = {} + + self.moved_in_avalon = [] + self.renamed_in_avalon = [] + self.hier_cust_attrs_changes = collections.defaultdict(list) + + self.duplicated = [] + self.regex_failed = [] + + self.regex_schemas = {} + self.updates = collections.defaultdict(dict) + + self.report_items = { + "info": collections.defaultdict(list), + "warning": collections.defaultdict(list), + "error": collections.defaultdict(list) + } + + def set_process_session(self, session): + try: + self.process_session.close() + except Exception: + pass + self.process_session = ftrack_api.Session( + server_url=session.server_url, + api_key=session.api_key, + api_user=session.api_user, + auto_connect_event_hub=True + ) + atexit.register(lambda: self.process_session.close()) + + def filter_updated(self, updates): + filtered_updates = {} + for ftrack_id, ent_info in updates.items(): + changed_keys = [k for k in (ent_info.get("keys") or [])] + changes = { + k: v for k, v in (ent_info.get("changes") or {}).items() + } + + entity_type = ent_info["entity_type"] + if entity_type == "Task": + if "name" in changed_keys: + ent_info["keys"] = ["name"] + ent_info["changes"] = {"name": changes.pop("name")} + filtered_updates[ftrack_id] = ent_info + continue + + for _key in self.ignore_keys: + if _key in changed_keys: + changed_keys.remove(_key) + changes.pop(_key, None) + + if not changed_keys: + continue + + # Remove custom attributes starting with `avalon_` from changes + # - these custom attributes are not synchronized + avalon_keys = [] + for key in changes: + if key.startswith("avalon_"): + avalon_keys.append(key) + + for _key in avalon_keys: + changed_keys.remove(_key) + changes.pop(_key, None) + + if not changed_keys: + continue + + ent_info["keys"] = changed_keys + ent_info["changes"] = changes + filtered_updates[ftrack_id] = ent_info + + return filtered_updates + + def get_ent_path(self, ftrack_id): + entity = self.ftrack_ents_by_id.get(ftrack_id) + if not entity: + entity = self.process_session.query( + self.entities_query_by_id.format( + self.cur_project["id"], ftrack_id + ) + ).first() + if entity: + self.ftrack_ents_by_id[ftrack_id] = entity + else: + return "unknown hierarchy" + return "/".join([ent["name"] for ent in entity["link"]]) def launch(self, session, event): - ca_mongoid = lib.get_ca_mongoid() - # If mongo_id textfield has changed: RETURN! - # - infinite loop - for ent in event['data']['entities']: - if ent.get('keys') is not None: - if ca_mongoid in ent['keys']: - return + # Try to commit and if any error happen then recreate session + try: + self.process_session.commit() + except Exception: + self.set_process_session(session) - entities = self._get_entities(session, event, self.ignore_entityType) - ft_project = None - # get project - for entity in entities: - try: - base_proj = entity['link'][0] - except Exception: + # Reset object values for each launch + self.reset_variables() + self._cur_event = event + + entities_by_action = { + "remove": {}, + "update": {}, + "move": {}, + "add": {} + } + + entities_info = event["data"]["entities"] + found_actions = set() + for ent_info in entities_info: + entityType = ent_info["entityType"] + if entityType in self.ignore_entTypes: continue - ft_project = session.get(base_proj['type'], base_proj['id']) - break - for ent_info in event['data']['entities']: + entity_type = ent_info.get("entity_type") + if not entity_type or entity_type in self.ignore_ent_types: + continue + + action = ent_info["action"] + ftrack_id = ent_info["entityId"] + if isinstance(ftrack_id, list): + self.log.warning(( + "BUG REPORT: Entity info has `entityId` as `list` \"{}\"" + ).format(ent_info)) + if len(ftrack_id) == 0: + continue + ftrack_id = ftrack_id[0] + + if action == "move": + ent_keys = ent_info["keys"] + # Seprate update info from move action + if len(ent_keys) > 1: + _ent_info = ent_info.copy() + for ent_key in ent_keys: + if ent_key == "parent_id": + _ent_info["changes"].pop(ent_key, None) + _ent_info["keys"].remove(ent_key) + else: + ent_info["changes"].pop(ent_key, None) + ent_info["keys"].remove(ent_key) + + entities_by_action["update"][ftrack_id] = _ent_info + + found_actions.add(action) + entities_by_action[action][ftrack_id] = ent_info + + found_actions = list(found_actions) + if not found_actions: + return True + + # Check if auto sync was turned on/off + updated = entities_by_action["update"] + for ftrack_id, ent_info in updated.items(): # filter project - if ent_info.get("entityType") != "show": + if ent_info["entityType"] != "show": continue - if ent_info.get("action") != "update": + changes = ent_info["changes"] + if CustAttrAutoSync not in changes: continue - changes = ent_info.get("changes") or {} - if 'avalon_auto_sync' not in changes: - continue - - auto_sync = changes['avalon_auto_sync']["new"] + auto_sync = changes[CustAttrAutoSync]["new"] if auto_sync == "1": # Trigger sync to avalon action if auto sync was turned on + ft_project = self.cur_project self.log.debug(( "Auto sync was turned on for project <{}>." " Triggering syncToAvalon action." @@ -54,11 +539,6 @@ class SyncToAvalon(BaseEvent): "entityId": ft_project["id"], "entityType": "show" }] - # Stop event so sync hierarchical won't be affected - # - other event should not be affected since auto-sync - # is in all cases single data event - event.stop() - # Trigger action self.trigger_action( action_name="sync.to.avalon.server", event=event, @@ -67,98 +547,1843 @@ class SyncToAvalon(BaseEvent): # Exit for both cases return True - # check if project is set to auto-sync + # Filter updated data by changed keys + updated = self.filter_updated(updated) + + # skip most of events where nothing has changed for avalon if ( - ft_project is None or - 'avalon_auto_sync' not in ft_project['custom_attributes'] or - ft_project['custom_attributes']['avalon_auto_sync'] is False + len(found_actions) == 1 and + found_actions[0] == "update" and + not updated + ): + return True + + ft_project = self.cur_project + # Check if auto-sync custom attribute exists + if CustAttrAutoSync not in ft_project["custom_attributes"]: + # TODO should we sent message to someone? + self.log.error(( + "Custom attribute \"{}\" is not created or user \"{}\" used" + " for Event server don't have permissions to access it!" + ).format(CustAttrAutoSync, self.session.api_user)) + return True + + # Skip if auto-sync is not set + auto_sync = ft_project["custom_attributes"][CustAttrAutoSync] + if auto_sync is not True: + return True + + debug_msg = "" + debug_msg += "Updated: {}".format(len(updated)) + debug_action_map = { + "add": "Created", + "remove": "Removed", + "move": "Moved" + } + for action, infos in entities_by_action.items(): + if action == "update": + continue + _action = debug_action_map[action] + debug_msg += "| {}: {}".format(_action, len(infos)) + + self.log.debug("Project changes <{}>: {}".format( + ft_project["full_name"], debug_msg + )) + # Get ftrack entities - find all ftrack ids first + ftrack_ids = [] + for ftrack_id in updated: + ftrack_ids.append(ftrack_id) + + for action, ftrack_ids in entities_by_action.items(): + # skip updated (already prepared) and removed (not exist in ftrack) + if action == "remove": + continue + + for ftrack_id in ftrack_ids: + if ftrack_id not in ftrack_ids: + ftrack_ids.append(ftrack_id) + + if ftrack_ids: + joined_ids = ", ".join(["\"{}\"".format(id) for id in ftrack_ids]) + ftrack_entities = self.process_session.query( + self.entities_query_by_id.format(ft_project["id"], joined_ids) + ).all() + for entity in ftrack_entities: + self.ftrack_ents_by_id[entity["id"]] = entity + + # Filter updates where name is changing + for ftrack_id, ent_info in updated.items(): + ent_keys = ent_info["keys"] + # Seprate update info from rename + if "name" not in ent_keys: + continue + + _ent_info = copy.deepcopy(ent_info) + for ent_key in ent_keys: + if ent_key == "name": + ent_info["changes"].pop(ent_key, None) + ent_info["keys"].remove(ent_key) + else: + _ent_info["changes"].pop(ent_key, None) + _ent_info["keys"].remove(ent_key) + + self.ftrack_renamed[ftrack_id] = _ent_info + + self.ftrack_removed = entities_by_action["remove"] + self.ftrack_moved = entities_by_action["move"] + self.ftrack_added = entities_by_action["add"] + self.ftrack_updated = updated + + self.log.debug("Synchronization begins") + try: + time_1 = time.time() + # 1.) Process removed - may affect all other actions + self.process_removed() + time_2 = time.time() + # 2.) Process renamed - may affect added + self.process_renamed() + time_3 = time.time() + # 3.) Process added - moved entity may be moved to new entity + self.process_added() + time_4 = time.time() + # 4.) Process moved + self.process_moved() + time_5 = time.time() + # 5.) Process updated + self.process_updated() + time_6 = time.time() + # 6.) Process changes in hierarchy or hier custom attribues + self.process_hier_cleanup() + if self.updates: + self.update_entities() + time_7 = time.time() + + time_removed = time_2 - time_1 + time_renamed = time_3 - time_2 + time_added = time_4 - time_3 + time_moved = time_5 - time_4 + time_updated = time_6 - time_5 + time_cleanup = time_7 - time_6 + time_total = time_7 - time_1 + self.log.debug("Process time: {} <{}, {}, {}, {}, {}, {}>".format( + time_total, time_removed, time_renamed, time_added, time_moved, + time_updated, time_cleanup + )) + + except Exception: + msg = "An error has happened during synchronization" + self.report_items["error"][msg].append(( + str(traceback.format_exc()).replace("\n", "
") + ).replace(" ", " ")) + + self.report() + return True + + def process_removed(self): + if not self.ftrack_removed: + return + ent_infos = self.ftrack_removed + removable_ids = [] + recreate_ents = [] + removed_names = [] + for ftrack_id, removed in ent_infos.items(): + entity_type = removed["entity_type"] + parent_id = removed["parentId"] + removed_name = removed["changes"]["name"]["old"] + if entity_type == "Task": + avalon_ent = self.avalon_ents_by_ftrack_id.get(parent_id) + if not avalon_ent: + self.log.debug(( + "Parent entity of task was not found in avalon <{}>" + ).format(self.get_ent_path(parent_id))) + continue + + mongo_id = avalon_ent["_id"] + if mongo_id not in self.task_changes_by_avalon_id: + self.task_changes_by_avalon_id[mongo_id] = ( + avalon_ent["data"]["tasks"] + ) + + if removed_name in self.task_changes_by_avalon_id[mongo_id]: + self.task_changes_by_avalon_id[mongo_id].remove( + removed_name + ) + + continue + + avalon_ent = self.avalon_ents_by_ftrack_id.get(ftrack_id) + if not avalon_ent: + continue + mongo_id = avalon_ent["_id"] + if self.changeability_by_mongo_id[mongo_id]: + removable_ids.append(mongo_id) + removed_names.append(removed_name) + else: + recreate_ents.append(avalon_ent) + + if removable_ids: + # TODO logging + self.log.debug("Assets marked as archived <{}>".format( + ", ".join(removed_names) + )) + self.dbcon.update_many( + {"_id": {"$in": removable_ids}, "type": "asset"}, + {"$set": {"type": "archived_asset"}} + ) + self.remove_cached_by_key("id", removable_ids) + + if recreate_ents: + # sort removed entities by parents len + # - length of parents determine hierarchy level + recreate_ents = sorted( + recreate_ents, + key=(lambda item: len( + (item.get("data", {}).get("parents") or []) + )) + ) + # TODO logging + # TODO report + recreate_msg = ( + "Deleted entity was recreated||Entity was recreated because" + " it or its children contain published data" + ) + proj, ents = self.avalon_entities + for avalon_entity in recreate_ents: + old_ftrack_id = avalon_entity["data"]["ftrackId"] + vis_par = avalon_entity["data"]["visualParent"] + if vis_par is None: + vis_par = proj["_id"] + parent_ent = self.avalon_ents_by_id[vis_par] + parent_ftrack_id = parent_ent["data"]["ftrackId"] + parent_ftrack_ent = self.ftrack_ents_by_id.get( + parent_ftrack_id + ) + if not parent_ftrack_ent: + if parent_ent["type"].lower() == "project": + parent_ftrack_ent = self.cur_project + else: + parent_ftrack_ent = self.process_session.query( + self.entities_query_by_id.format( + self.cur_project["id"], parent_ftrack_id + ) + ).one() + entity_type = avalon_entity["data"]["entityType"] + new_entity = self.process_session.create(entity_type, { + "name": avalon_entity["name"], + "parent": parent_ftrack_ent + }) + try: + self.process_session.commit() + except Exception: + # TODO logging + # TODO report + self.process_session.rolback() + ent_path_items = [self.cur_project["full_name"]] + ent_path_items.extend([ + par for par in avalon_entity["data"]["parents"] + ]) + ent_path_items.append(avalon_entity["name"]) + ent_path = "/".join(ent_path_items) + + error_msg = "Couldn't recreate entity in Ftrack" + report_msg = ( + "{}||Trying to recreate because it or its children" + " contain published data" + ).format(error_msg) + self.report_items["warning"][report_msg].append(ent_path) + self.log.warning( + "{}. Process session commit failed! <{}>".format( + error_msg, ent_path + ), + exc_info=True + ) + continue + + new_entity_id = new_entity["id"] + avalon_entity["data"]["ftrackId"] = new_entity_id + + for key, val in avalon_entity["data"].items(): + if not val: + continue + if key not in new_entity["custom_attributes"]: + continue + + new_entity["custom_attributes"][key] = val + + new_entity["custom_attributes"][CustAttrIdKey] = ( + str(avalon_entity["_id"]) + ) + ent_path = self.get_ent_path(new_entity_id) + + try: + self.process_session.commit() + except Exception: + # TODO logging + # TODO report + self.process_session.rolback() + error_msg = ( + "Couldn't update custom attributes after recreation" + " of entity in Ftrack" + ) + report_msg = ( + "{}||Entity was recreated because it or its children" + " contain published data" + ).format(error_msg) + self.report_items["warning"][report_msg].append(ent_path) + self.log.warning( + "{}. Process session commit failed! <{}>".format( + error_msg, ent_path + ), + exc_info=True + ) + continue + + self.report_items["info"][recreate_msg].append(ent_path) + + self.ftrack_recreated_mapping[old_ftrack_id] = new_entity_id + self.process_session.commit() + + found_idx = None + for idx, _entity in enumerate(self._avalon_ents): + if _entity["_id"] == avalon_entity["_id"]: + found_idx = idx + break + + if found_idx is None: + continue + + # Prepare updates dict for mongo update + if "data" not in self.updates[avalon_entity["_id"]]: + self.updates[avalon_entity["_id"]]["data"] = {} + + self.updates[avalon_entity["_id"]]["data"]["ftrackId"] = ( + new_entity_id + ) + # Update cached entities + self._avalon_ents[found_idx] = avalon_entity + + if self._avalon_ents_by_id is not None: + mongo_id = avalon_entity["_id"] + self._avalon_ents_by_id[mongo_id] = avalon_entity + + if self._avalon_ents_by_parent_id is not None: + vis_par = avalon_entity["data"]["visualParent"] + children = self._avalon_ents_by_parent_id[vis_par] + found_idx = None + for idx, _entity in enumerate(children): + if _entity["_id"] == avalon_entity["_id"]: + found_idx = idx + break + children[found_idx] = avalon_entity + self._avalon_ents_by_parent_id[vis_par] = children + + if self._avalon_ents_by_ftrack_id is not None: + self._avalon_ents_by_ftrack_id.pop(old_ftrack_id) + self._avalon_ents_by_ftrack_id[new_entity_id] = ( + avalon_entity + ) + + if self._avalon_ents_by_name is not None: + name = avalon_entity["name"] + self._avalon_ents_by_name[name] = avalon_entity + + # Check if entities with same name can be synchronized + if not removed_names: + return + + self.check_names_synchronizable(removed_names) + + def check_names_synchronizable(self, names): + """Check if entities with specific names are importable. + + This check should happend after removing entity or renaming entity. + When entity was removed or renamed then it's name is possible to sync. + """ + joined_passed_names = ", ".join( + ["\"{}\"".format(name) for name in names] + ) + same_name_entities = self.process_session.query( + self.entities_name_query_by_name.format( + self.cur_project["id"], joined_passed_names + ) + ).all() + if not same_name_entities: + return + + entities_by_name = collections.defaultdict(list) + for entity in same_name_entities: + entities_by_name[entity["name"]].append(entity) + + synchronizable_ents = [] + self.log.debug(( + "Deleting of entities should allow to synchronize another entities" + " with same name." + )) + for name, ents in entities_by_name.items(): + if len(ents) != 1: + self.log.debug(( + "Name \"{}\" still have more than one entity <{}>" + ).format( + name, "| ".join( + [self.get_ent_path(ent["id"]) for ent in ents] + ) + )) + continue + + entity = ents[0] + ent_path = self.get_ent_path(entity["id"]) + # TODO logging + self.log.debug( + "Checking if can synchronize entity <{}>".format(ent_path) + ) + # skip if already synchronized + ftrack_id = entity["id"] + if ftrack_id in self.avalon_ents_by_ftrack_id: + # TODO logging + self.log.debug( + "- Entity is already synchronized (skipping) <{}>".format( + ent_path + ) + ) + continue + + parent_id = entity["parent_id"] + if parent_id not in self.avalon_ents_by_ftrack_id: + # TODO logging + self.log.debug(( + "- Entity's parent entity doesn't seems to" + " be synchronized (skipping) <{}>" + ).format(ent_path)) + continue + + synchronizable_ents.append(entity) + + if not synchronizable_ents: + return + + synchronizable_ents = sorted( + synchronizable_ents, + key=(lambda entity: len(entity["link"])) + ) + + children_queue = queue.Queue() + for entity in synchronizable_ents: + parent_avalon_ent = self.avalon_ents_by_ftrack_id[ + entity["parent_id"] + ] + self.create_entity_in_avalon(entity, parent_avalon_ent) + + for child in entity["children"]: + if child.entity_type.lower() == "task": + continue + children_queue.put(child) + + while not children_queue.empty(): + entity = children_queue.get() + ftrack_id = entity["id"] + name = entity["name"] + ent_by_ftrack_id = self.avalon_ents_by_ftrack_id.get(ftrack_id) + if ent_by_ftrack_id: + raise Exception(( + "This is bug, parent was just synchronized to avalon" + " but entity is already in database {}" + ).format(dict(entity))) + + # Entity has duplicated name with another entity + # - may be renamed: in that case renaming method will handle that + duplicate_ent = self.avalon_ents_by_name.get(name) + if duplicate_ent: + continue + + passed_regex = avalon_sync.check_regex( + name, "asset", schema_patterns=self.regex_schemas + ) + if not passed_regex: + continue + + parent_id = entity["parent_id"] + parent_avalon_ent = self.avalon_ents_by_ftrack_id[parent_id] + + self.create_entity_in_avalon(entity, parent_avalon_ent) + + for child in entity["children"]: + if child.entity_type.lower() == "task": + continue + children_queue.put(child) + + def create_entity_in_avalon(self, ftrack_ent, parent_avalon): + proj, ents = self.avalon_entities + + # Parents, Hierarchy + ent_path_items = [ent["name"] for ent in ftrack_ent["link"]] + parents = ent_path_items[1:len(ent_path_items)-1:] + hierarchy = "" + if len(parents) > 0: + hierarchy = os.path.sep.join(parents) + + # TODO logging + self.log.debug( + "Trying to synchronize entity <{}>".format( + "/".join(ent_path_items) + ) + ) + + # Tasks + tasks = [] + for child in ftrack_ent["children"]: + if child.entity_type.lower() != "task": + continue + tasks.append(child["name"]) + + # Visual Parent + vis_par = None + if parent_avalon["type"].lower() != "project": + vis_par = parent_avalon["_id"] + + mongo_id = ObjectId() + name = ftrack_ent["name"] + final_entity = { + "_id": mongo_id, + "name": name, + "type": "asset", + "schema": EntitySchemas["asset"], + "parent": proj["_id"], + "data": { + "ftrackId": ftrack_ent["id"], + "entityType": ftrack_ent.entity_type, + "parents": parents, + "hierarchy": hierarchy, + "tasks": tasks, + "visualParent": vis_par + } + } + cust_attrs = self.get_cust_attr_values(ftrack_ent) + for key, val in cust_attrs.items(): + if key.startswith("avalon_"): + continue + final_entity["data"][key] = val + + _mongo_id_str = cust_attrs.get(CustAttrIdKey) + if _mongo_id_str: + try: + _mongo_id = ObjectId(_mongo_id_str) + if _mongo_id not in self.avalon_ents_by_id: + mongo_id = _mongo_id + final_entity["_id"] = mongo_id + + except Exception: + pass + + ent_path_items = [self.cur_project["full_name"]] + ent_path_items.extend([par for par in parents]) + ent_path_items.append(name) + ent_path = "/".join(ent_path_items) + + try: + schema.validate(final_entity) + except Exception: + # TODO logging + # TODO report + error_msg = ( + "Schema validation failed for new entity (This is a bug)" + ) + error_traceback = ( + str(traceback.format_exc()).replace("\n", "
") + ).replace(" ", " ") + + item_msg = ent_path + "
" + error_traceback + self.report_items["error"][error_msg].append(item_msg) + self.log.error( + "{}: \"{}\"".format(error_msg, str(final_entity)), + exc_info=True + ) + return None + + replaced = False + archived = self.avalon_archived_by_name.get(name) + if archived: + archived_id = archived["_id"] + if ( + archived["data"]["parents"] == parents or + self.changeability_by_mongo_id[archived_id] + ): + # TODO logging + self.log.debug( + "Entity was unarchived instead of creation <{}>".format( + ent_path + ) + ) + mongo_id = archived_id + final_entity["_id"] = mongo_id + self.dbcon.replace_one({"_id": mongo_id}, final_entity) + replaced = True + + if not replaced: + self.dbcon.insert_one(final_entity) + # TODO logging + self.log.debug("Entity was synchronized <{}>".format(ent_path)) + + mongo_id_str = str(mongo_id) + if mongo_id_str != ftrack_ent["custom_attributes"][CustAttrIdKey]: + ftrack_ent["custom_attributes"][CustAttrIdKey] = mongo_id_str + try: + self.process_session.commit() + except Exception: + self.process_session.rolback() + # TODO logging + # TODO report + error_msg = "Failed to store MongoID to entity's custom attribute" + report_msg = ( + "{}||SyncToAvalon action may solve this issue" + ).format(error_msg) + + self.report_items["warning"][report_msg].append(ent_path) + self.log.error( + "{}: \"{}\"".format(error_msg, ent_path), + exc_info=True + ) + + # modify cached data + # Skip if self._avalon_ents is not set(maybe never happen) + if self._avalon_ents is None: + return final_entity + + if self._avalon_ents is not None: + proj, ents = self._avalon_ents + ents.append(final_entity) + self._avalon_ents = (proj, ents) + + if self._avalon_ents_by_id is not None: + self._avalon_ents_by_id[mongo_id] = final_entity + + if self._avalon_ents_by_parent_id is not None: + self._avalon_ents_by_parent_id[vis_par].append(final_entity) + + if self._avalon_ents_by_ftrack_id is not None: + self._avalon_ents_by_ftrack_id[ftrack_ent["id"]] = final_entity + + if self._avalon_ents_by_name is not None: + self._avalon_ents_by_name[ftrack_ent["name"]] = final_entity + + return final_entity + + def get_cust_attr_values(self, entity, keys=None): + output = {} + custom_attrs, hier_attrs = self.avalon_custom_attributes + not_processed_keys = True + if keys: + not_processed_keys = [k for k in keys] + # Notmal custom attributes + processed_keys = [] + for attr in custom_attrs: + if not not_processed_keys: + break + key = attr["key"] + if key in processed_keys: + continue + + if key not in entity["custom_attributes"]: + continue + + if keys: + if key not in keys: + continue + else: + not_processed_keys.remove(key) + + output[key] = entity["custom_attributes"][key] + processed_keys.append(key) + + if not not_processed_keys: + return output + + # Hierarchical cust attrs + hier_keys = [] + defaults = {} + for attr in hier_attrs: + key = attr["key"] + if keys and key not in keys: + continue + hier_keys.append(key) + defaults[key] = attr["default"] + + hier_values = avalon_sync.get_hierarchical_attributes( + self.process_session, entity, hier_keys, defaults + ) + for key, val in hier_values.items(): + output[key] = val + + return output + + def process_renamed(self): + if not self.ftrack_renamed: + return + + ent_infos = self.ftrack_renamed + renamed_tasks = {} + not_found = {} + changeable_queue = queue.Queue() + for ftrack_id, ent_info in ent_infos.items(): + entity_type = ent_info["entity_type"] + new_name = ent_info["changes"]["name"]["new"] + old_name = ent_info["changes"]["name"]["old"] + if entity_type == "Task": + parent_id = ent_info["parentId"] + renamed_tasks[parent_id] = { + "new": new_name, + "old": old_name, + "ent_info": ent_info + } + continue + + ent_path = self.get_ent_path(ftrack_id) + avalon_ent = self.avalon_ents_by_ftrack_id.get(ftrack_id) + if not avalon_ent: + # TODO logging + self.log.debug(( + "Can't change the name (Entity is not is avalon) <{}>" + ).format(ent_path)) + not_found[ftrack_id] = ent_info + continue + + if new_name == avalon_ent["name"]: + # TODO logging + self.log.debug(( + "Avalon entity already has the same name <{}>" + ).format(ent_path)) + continue + + mongo_id = avalon_ent["_id"] + if self.changeability_by_mongo_id[mongo_id]: + changeable_queue.put((ftrack_id, avalon_ent, new_name)) + else: + ftrack_ent = self.ftrack_ents_by_id[ftrack_id] + ftrack_ent["name"] = avalon_ent["name"] + try: + self.process_session.commit() + # TODO logging + # TODO report + error_msg = "Entity renamed back" + report_msg = ( + "{}||It is not possible to change" + " the name of an entity or it's parents, " + " if it already contained published data." + ).format(error_msg) + self.report_items["info"][report_msg].append(ent_path) + self.log.warning("{} <{}>".format(error_msg, ent_path)) + + except Exception: + self.process_session.rollback() + # TODO report + # TODO logging + error_msg = ( + "Couldn't rename the entity back to its original name" + ) + report_msg = ( + "{}||Renamed because it is not possible to" + " change the name of an entity or it's parents, " + " if it already contained published data." + ).format(error_msg) + error_traceback = ( + str(traceback.format_exc()).replace("\n", "
") + ).replace(" ", " ") + + item_msg = ent_path + "
" + error_traceback + self.report_items["warning"][report_msg].append(item_msg) + self.log.warning( + "{}: \"{}\"".format(error_msg, ent_path), + exc_info=True + ) + + old_names = [] + # Process renaming in Avalon DB + while not changeable_queue.empty(): + ftrack_id, avalon_ent, new_name = changeable_queue.get() + mongo_id = avalon_ent["_id"] + old_name = avalon_ent["name"] + + _entity_type = "asset" + if entity_type == "Project": + _entity_type = "project" + + passed_regex = avalon_sync.check_regex( + new_name, _entity_type, schema_patterns=self.regex_schemas + ) + if not passed_regex: + self.regex_failed.append(ftrack_id) + continue + + # if avalon does not have same name then can be changed + same_name_avalon_ent = self.avalon_ents_by_name.get(new_name) + if not same_name_avalon_ent: + old_val = self._avalon_ents_by_name.pop(old_name) + old_val["name"] = new_name + self._avalon_ents_by_name[new_name] = old_val + self.updates[mongo_id] = {"name": new_name} + self.renamed_in_avalon.append(mongo_id) + + old_names.append(old_name) + if new_name in old_names: + old_names.remove(new_name) + + # TODO logging + ent_path = self.get_ent_path(ftrack_id) + self.log.debug( + "Name of entity will be changed to \"{}\" <{}>".format( + new_name, ent_path + ) + ) + continue + + # Check if same name is in changable_queue + # - it's name may be changed in next iteration + same_name_ftrack_id = same_name_avalon_ent["data"]["ftrackId"] + same_is_unprocessed = False + for item in list(changeable_queue.queue): + if same_name_ftrack_id == item[0]: + same_is_unprocessed = True + break + + if same_is_unprocessed: + changeable_queue.put((ftrack_id, avalon_ent, new_name)) + continue + + self.duplicated.append(ftrack_id) + + if old_names: + self.check_names_synchronizable(old_names) + + for parent_id, task_change in renamed_tasks.items(): + avalon_ent = self.avalon_ents_by_ftrack_id.get(parent_id) + ent_info = task_change["ent_info"] + if not avalon_ent: + not_found[ent_info["entityId"]] = ent_info + continue + + new_name = task_change["new"] + old_name = task_change["old"] + passed_regex = avalon_sync.check_regex( + new_name, "task", schema_patterns=self.regex_schemas + ) + if not passed_regex: + ftrack_id = ent_info["enityId"] + self.regex_failed.append(ftrack_id) + continue + + mongo_id = avalon_ent["_id"] + if mongo_id not in self.task_changes_by_avalon_id: + self.task_changes_by_avalon_id[mongo_id] = ( + avalon_ent["data"]["tasks"] + ) + + if old_name in self.task_changes_by_avalon_id[mongo_id]: + self.task_changes_by_avalon_id[mongo_id].remove(old_name) + else: + parent_ftrack_ent = self.ftrack_ents_by_id.get(parent_id) + if not parent_ftrack_ent: + parent_ftrack_ent = self.process_session.query( + self.entities_query_by_id.format( + self.cur_project["id"], parent_id + ) + ).first() + + if parent_ftrack_ent: + self.ftrack_ents_by_id[parent_id] = parent_ftrack_ent + child_names = [] + for child in parent_ftrack_ent["children"]: + if child.entity_type.lower() != "task": + continue + child_names.append(child["name"]) + + tasks = [task for task in ( + self.task_changes_by_avalon_id[mongo_id] + )] + for task in tasks: + if task not in child_names: + self.task_changes_by_avalon_id[mongo_id].remove( + task + ) + + if new_name not in self.task_changes_by_avalon_id[mongo_id]: + self.task_changes_by_avalon_id[mongo_id].append(new_name) + + # not_found are not processed since all not found are + # not found because they are not synchronizable + + def process_added(self): + ent_infos = self.ftrack_added + if not ent_infos: + return + + cust_attrs, hier_attrs = self.avalon_cust_attrs + entity_type_conf_ids = {} + # Skip if already exit in avalon db or tasks entities + # - happen when was created by any sync event/action + pop_out_ents = [] + new_tasks_by_parent = collections.defaultdict(list) + _new_ent_infos = {} + for ftrack_id, ent_info in ent_infos.items(): + if self.avalon_ents_by_ftrack_id.get(ftrack_id): + pop_out_ents.append(ftrack_id) + self.log.warning( + "Added entity is already synchronized <{}>".format( + self.get_ent_path(ftrack_id) + ) + ) + continue + + entity_type = ent_info["entity_type"] + if entity_type == "Task": + parent_id = ent_info["parentId"] + new_tasks_by_parent[parent_id].append(ent_info) + pop_out_ents.append(ftrack_id) + continue + + name = ( + ent_info + .get("changes", {}) + .get("name", {}) + .get("new") + ) + avalon_ent_by_name = self.avalon_ents_by_name.get(name) + avalon_ent_by_name_ftrack_id = ( + avalon_ent_by_name + .get("data", {}) + .get("ftrackId") + ) + if avalon_ent_by_name and avalon_ent_by_name_ftrack_id is None: + ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id) + if not ftrack_ent: + ftrack_ent = self.process_session.query( + self.entities_query_by_id.format( + self.cur_project["id"], ftrack_id + ) + ).one() + self.ftrack_ents_by_id[ftrack_id] = ftrack_ent + + ent_path_items = [ent["name"] for ent in ftrack_ent["link"]] + parents = ent_path_items[1:len(ent_path_items)-1:] + + avalon_ent_parents = ( + avalon_ent_by_name.get("data", {}).get("parents") + ) + if parents == avalon_ent_parents: + self.dbcon.update_one({ + "_id": avalon_ent_by_name["_id"] + }, { + "$set": { + "data.ftrackId": ftrack_id, + "data.entityType": entity_type + } + }) + + avalon_ent_by_name["data"]["ftrackId"] = ftrack_id + avalon_ent_by_name["data"]["entityType"] = entity_type + + self._avalon_ents_by_ftrack_id[ftrack_id] = ( + avalon_ent_by_name + ) + if self._avalon_ents_by_parent_id: + found = None + for _parent_id_, _entities_ in ( + self._avalon_ents_by_parent_id.items() + ): + for _idx_, entity in enumerate(_entities_): + if entity["_id"] == avalon_ent_by_name["_id"]: + found = (_parent_id_, _idx_) + break + + if found: + break + + if found: + _parent_id_, _idx_ = found + self._avalon_ents_by_parent_id[_parent_id_][ + _idx_] = avalon_ent_by_name + + if self._avalon_ents_by_id: + self._avalon_ents_by_id[avalon_ent_by_name["_id"]] = ( + avalon_ent_by_name + ) + + if self._avalon_ents_by_name: + self._avalon_ents_by_name[name] = avalon_ent_by_name + + if self._avalon_ents: + found = None + project, entities = self._avalon_ents + for _idx_, _ent_ in enumerate(entities): + if _ent_["_id"] != avalon_ent_by_name["_id"]: + continue + found = _idx_ + break + + if found is not None: + entities[found] = avalon_ent_by_name + self._avalon_ents = project, entities + + pop_out_ents.append(ftrack_id) + continue + + configuration_id = entity_type_conf_ids.get(entity_type) + if not configuration_id: + for attr in cust_attrs: + key = attr["key"] + if key != CustAttrIdKey: + continue + + if attr["entity_type"] != ent_info["entityType"]: + continue + + if ( + ent_info["entityType"] == "task" and + attr["object_type_id"] != ent_info["objectTypeId"] + ): + continue + + configuration_id = attr["id"] + entity_type_conf_ids[entity_type] = configuration_id + break + + _entity_key = collections.OrderedDict({ + "configuration_id": configuration_id, + "entity_id": ftrack_id + }) + + self.process_session.recorded_operations.push( + ftrack_api.operation.UpdateEntityOperation( + "ContextCustomAttributeValue", + _entity_key, + "value", + ftrack_api.symbol.NOT_SET, + "" + ) + ) + + try: + # Commit changes of mongo_id to empty string + self.process_session.commit() + self.log.debug("Commititng unsetting") + except Exception: + self.process_session.rollback() + # TODO logging + msg = ( + "Could not set value of Custom attribute, where mongo id" + " is stored, to empty string. Ftrack ids: \"{}\"" + ).format(", ".join(ent_infos.keys())) + self.log.warning(msg, exc_info=True) + + for ftrack_id in pop_out_ents: + ent_infos.pop(ftrack_id) + + # sort by parents length (same as by hierarchy level) + _ent_infos = sorted( + ent_infos.values(), + key=(lambda ent_info: len(ent_info.get("parents", []))) + ) + to_sync_by_id = collections.OrderedDict() + for ent_info in _ent_infos: + ft_id = ent_info["entityId"] + to_sync_by_id[ft_id] = self.ftrack_ents_by_id[ft_id] + + # cache regex success (for tasks) + for ftrack_id, entity in to_sync_by_id.items(): + if entity.entity_type.lower() == "project": + raise Exception(( + "Project can't be created with event handler!" + "This is a bug" + )) + parent_id = entity["parent_id"] + parent_avalon = self.avalon_ents_by_ftrack_id.get(parent_id) + if not parent_avalon: + # TODO logging + self.log.debug(( + "Skipping synchronization of entity" + " because parent was not found in Avalon DB <{}>" + ).format(self.get_ent_path(ftrack_id))) + continue + + is_synchonizable = True + name = entity["name"] + passed_regex = avalon_sync.check_regex( + name, "asset", schema_patterns=self.regex_schemas + ) + if not passed_regex: + self.regex_failed.append(ftrack_id) + is_synchonizable = False + + if name in self.avalon_ents_by_name: + self.duplicated.append(ftrack_id) + is_synchonizable = False + + if not is_synchonizable: + continue + + self.create_entity_in_avalon(entity, parent_avalon) + + for parent_id, ent_infos in new_tasks_by_parent.items(): + avalon_ent = self.avalon_ents_by_ftrack_id.get(parent_id) + if not avalon_ent: + # TODO logging + self.log.debug(( + "Skipping synchronization of task" + " because parent was not found in Avalon DB <{}>" + ).format(self.get_ent_path(parent_id))) + continue + + mongo_id = avalon_ent["_id"] + if mongo_id not in self.task_changes_by_avalon_id: + self.task_changes_by_avalon_id[mongo_id] = ( + avalon_ent["data"]["tasks"] + ) + + for ent_info in ent_infos: + new_name = ent_info["changes"]["name"]["new"] + passed_regex = avalon_sync.check_regex( + new_name, "task", schema_patterns=self.regex_schemas + ) + if not passed_regex: + self.regex_failed.append(ent_infos["entityId"]) + continue + + if new_name not in self.task_changes_by_avalon_id[mongo_id]: + self.task_changes_by_avalon_id[mongo_id].append(new_name) + + def process_moved(self): + if not self.ftrack_moved: + return + + ftrack_moved = {k: v for k, v in sorted( + self.ftrack_moved.items(), + key=(lambda line: len( + (line[1].get("data", {}).get("parents") or []) + )) + )} + + for ftrack_id, ent_info in ftrack_moved.items(): + avalon_ent = self.avalon_ents_by_ftrack_id.get(ftrack_id) + if not avalon_ent: + continue + + new_parent_id = ent_info["changes"]["parent_id"]["new"] + old_parent_id = ent_info["changes"]["parent_id"]["old"] + + mongo_id = avalon_ent["_id"] + if self.changeability_by_mongo_id[mongo_id]: + par_av_ent = self.avalon_ents_by_ftrack_id.get(new_parent_id) + if not par_av_ent: + # TODO logging + # TODO report + ent_path_items = [self.cur_project["full_name"]] + ent_path_items.extend(avalon_ent["data"]["parents"]) + ent_path_items.append(avalon_ent["name"]) + ent_path = "/".join(ent_path_items) + + error_msg = ( + "New parent of entity is not synchronized to avalon" + ) + report_msg = ( + "{}||Parent in Avalon can't be changed. That" + " may cause issues. Please fix parent or move entity" + " under valid entity." + ).format(error_msg) + + self.report_items["warning"][report_msg].append(ent_path) + self.log.warning("{} <{}>".format(error_msg, ent_path)) + continue + + # THIS MUST HAPPEND AFTER CREATING NEW ENTITIES !!!! + # - because may be moved to new created entity + if "data" not in self.updates[mongo_id]: + self.updates[mongo_id]["data"] = {} + + vis_par_id = None + if par_av_ent["type"].lower() != "project": + vis_par_id = par_av_ent["_id"] + self.updates[mongo_id]["data"]["visualParent"] = vis_par_id + self.moved_in_avalon.append(mongo_id) + + # TODO logging + ent_path_items = [self.cur_project["full_name"]] + ent_path_items.extend(par_av_ent["data"]["parents"]) + ent_path_items.append(par_av_ent["name"]) + ent_path_items.append(avalon_ent["name"]) + ent_path = "/".join(ent_path_items) + self.log.debug(( + "Parent of entity ({}) was changed in avalon <{}>" + ).format(str(mongo_id), ent_path) + ) + + else: + avalon_ent = self.avalon_ents_by_id[mongo_id] + avalon_parent_id = avalon_ent["data"]["visualParent"] + if avalon_parent_id is None: + avalon_parent_id = avalon_ent["parent"] + + avalon_parent = self.avalon_ents_by_id[avalon_parent_id] + parent_id = avalon_parent["data"]["ftrackId"] + + # For cases when parent was deleted at the same time + if parent_id in self.ftrack_recreated_mapping: + parent_id = ( + self.ftrack_recreated_mapping[parent_id] + ) + + ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id) + if not ftrack_ent: + ftrack_ent = self.process_session.query( + self.entities_query_by_id.format( + self.cur_project["id"], ftrack_id + ) + ).one() + self.ftrack_ents_by_id[ftrack_id] = ftrack_ent + + if parent_id == ftrack_ent["parent_id"]: + continue + + ftrack_ent["parent_id"] = parent_id + try: + self.process_session.commit() + # TODO logging + # TODO report + msg = "Entity was moved back" + report_msg = ( + "{}||Entity can't be moved when" + " it or its children contain published data" + ).format(msg) + ent_path = self.get_ent_path(ftrack_id) + self.report_items["info"][report_msg].append(ent_path) + self.log.warning("{} <{}>".format(msg, ent_path)) + + except Exception: + self.process_session.rollback() + # TODO logging + # TODO report + error_msg = ( + "Couldn't moved the entity back to its original parent" + ) + report_msg = ( + "{}||Moved back because it is not possible to" + " move with an entity or it's parents, " + " if it already contained published data." + ).format(error_msg) + error_traceback = ( + str(traceback.format_exc()).replace("\n", "
") + ).replace(" ", " ") + + item_msg = ent_path + "
" + error_traceback + self.report_items["warning"][report_msg].append(item_msg) + self.log.warning( + "{}: \"{}\"".format(error_msg, ent_path), + exc_info=True + ) + + def process_updated(self): + # Only custom attributes changes should get here + if not self.ftrack_updated: + return + + ent_infos = self.ftrack_updated + ftrack_mongo_mapping = {} + not_found_ids = [] + for ftrack_id, ent_info in ent_infos.items(): + avalon_ent = self.avalon_ents_by_ftrack_id.get(ftrack_id) + if not avalon_ent: + not_found_ids.append(ftrack_id) + continue + + ftrack_mongo_mapping[ftrack_id] = avalon_ent["_id"] + + for ftrack_id in not_found_ids: + ent_infos.pop(ftrack_id) + + if not ent_infos: + return + + cust_attrs, hier_attrs = self.avalon_cust_attrs + cust_attrs_by_obj_id = collections.defaultdict(dict) + for cust_attr in cust_attrs: + key = cust_attr["key"] + if key.startswith("avalon_"): + continue + + ca_ent_type = cust_attr["entity_type"] + + if ca_ent_type == "show": + cust_attrs_by_obj_id[ca_ent_type][key] = cust_attr + + elif ca_ent_type == "task": + obj_id = cust_attr["object_type_id"] + cust_attrs_by_obj_id[obj_id][key] = cust_attr + + hier_attrs_keys = [attr["key"] for attr in hier_attrs] + + for ftrack_id, ent_info in ent_infos.items(): + mongo_id = ftrack_mongo_mapping[ftrack_id] + entType = ent_info["entityType"] + ent_path = self.get_ent_path(ftrack_id) + if entType == "show": + ent_cust_attrs = cust_attrs_by_obj_id.get("show") + else: + obj_type_id = ent_info["objectTypeId"] + ent_cust_attrs = cust_attrs_by_obj_id.get(obj_type_id) + + if ent_cust_attrs is None: + self.log.warning(( + "BUG REPORT: Entity has ent type without" + " custom attributes <{}> \"{}\"" + ).format(entType, ent_info)) + continue + + for key, values in ent_info["changes"].items(): + if key in hier_attrs_keys: + self.hier_cust_attrs_changes[key].append(ftrack_id) + continue + + if key not in ent_cust_attrs: + continue + + if "data" not in self.updates[mongo_id]: + self.updates[mongo_id]["data"] = {} + value = values["new"] + self.updates[mongo_id]["data"][key] = value + self.log.debug( + "Setting data value of \"{}\" to \"{}\" <{}>".format( + key, value, ent_path + ) + ) + + if entType != "show" or key != "applications": + continue + + # Store apps to project't config + apps_str = ent_info["changes"]["applications"]["new"] + cust_attr_apps = [app for app in apps_str.split(", ") if app] + + proj_apps, warnings = ( + avalon_sync.get_project_apps(cust_attr_apps) + ) + if "config" not in self.updates[mongo_id]: + self.updates[mongo_id]["config"] = {} + self.updates[mongo_id]["config"]["apps"] = proj_apps + + for msg, items in warnings.items(): + if not msg or not items: + continue + self.report_items["warning"][msg] = items + + def process_hier_cleanup(self): + if ( + not self.moved_in_avalon and + not self.renamed_in_avalon and + not self.hier_cust_attrs_changes and + not self.task_changes_by_avalon_id ): return - # check if project have Custom Attribute 'avalon_mongo_id' - if ca_mongoid not in ft_project['custom_attributes']: - message = ( - "Custom attribute '{}' for 'Project' is not created" - " or don't have set permissions for API" - ).format(ca_mongoid) - self.log.warning(message) - self.show_message(event, message, False) - return + parent_changes = [] + hier_cust_attrs_ids = [] + hier_cust_attrs_keys = [] + all_keys = False + for mongo_id in self.moved_in_avalon: + parent_changes.append(mongo_id) + hier_cust_attrs_ids.append(mongo_id) + all_keys = True - # get avalon project if possible - import_entities = [] + for mongo_id in self.renamed_in_avalon: + if mongo_id not in parent_changes: + parent_changes.append(mongo_id) - custom_attributes = lib.get_avalon_attr(session) - - avalon_project = lib.get_avalon_project(ft_project) - if avalon_project is None: - import_entities.append(ft_project) - - for entity in entities: - if entity.entity_type.lower() in ['task']: - entity = entity['parent'] - - if 'custom_attributes' not in entity: + for key, ftrack_ids in self.hier_cust_attrs_changes.items(): + if key.startswith("avalon_"): continue - if ca_mongoid not in entity['custom_attributes']: + for ftrack_id in ftrack_ids: + avalon_ent = self.avalon_ents_by_ftrack_id[ftrack_id] + mongo_id = avalon_ent["_id"] + if mongo_id in hier_cust_attrs_ids: + continue + hier_cust_attrs_ids.append(mongo_id) + if not all_keys and key not in hier_cust_attrs_keys: + hier_cust_attrs_keys.append(key) - message = ( - "Custom attribute '{}' for '{}' is not created" - " or don't have set permissions for API" - ).format(ca_mongoid, entity.entity_type) + # Tasks preparation **** + for mongo_id, tasks in self.task_changes_by_avalon_id.items(): + avalon_ent = self.avalon_ents_by_id[mongo_id] + if "data" not in self.updates[mongo_id]: + self.updates[mongo_id]["data"] = {} - self.log.warning(message) - self.show_message(event, message, False) - return + self.updates[mongo_id]["data"]["tasks"] = tasks - if entity not in import_entities: - import_entities.append(entity) + # Parents preparation *** + mongo_to_ftrack_parents = {} + missing_ftrack_ents = {} + for mongo_id in parent_changes: + avalon_ent = self.avalon_ents_by_id[mongo_id] + ftrack_id = avalon_ent["data"]["ftrackId"] + if ftrack_id not in self.ftrack_ents_by_id: + missing_ftrack_ents[ftrack_id] = mongo_id + continue + ftrack_ent = self.ftrack_ents_by_id[ftrack_id] + mongo_to_ftrack_parents[mongo_id] = len(ftrack_ent["link"]) - if len(import_entities) < 1: + if missing_ftrack_ents: + joine_ids = ", ".join( + ["\"{}\"".format(id) for id in missing_ftrack_ents.keys()] + ) + entities = self.process_session.query( + self.entities_query_by_id.format( + self.cur_project["id"], joine_ids + ) + ).all() + for entity in entities: + ftrack_id = entity["id"] + self.ftrack_ents_by_id[ftrack_id] = entity + mongo_id = missing_ftrack_ents[ftrack_id] + mongo_to_ftrack_parents[mongo_id] = len(entity["link"]) + + stored_parents_by_mongo = {} + # sort by hierarchy level + mongo_to_ftrack_parents = [k for k, v in sorted( + mongo_to_ftrack_parents.items(), + key=(lambda item: item[1]) + )] + self.log.debug( + "Updating parents and hieararchy because of name/parenting changes" + ) + for mongo_id in mongo_to_ftrack_parents: + avalon_ent = self.avalon_ents_by_id[mongo_id] + vis_par = avalon_ent["data"]["visualParent"] + if vis_par in stored_parents_by_mongo: + parents = [par for par in stored_parents_by_mongo[vis_par]] + if vis_par is not None: + parent_ent = self.avalon_ents_by_id[vis_par] + parents.append(parent_ent["name"]) + stored_parents_by_mongo[mongo_id] = parents + continue + + ftrack_id = avalon_ent["data"]["ftrackId"] + ftrack_ent = self.ftrack_ents_by_id[ftrack_id] + ent_path_items = [ent["name"] for ent in ftrack_ent["link"]] + parents = ent_path_items[1:len(ent_path_items)-1:] + stored_parents_by_mongo[mongo_id] = parents + + for mongo_id, parents in stored_parents_by_mongo.items(): + avalon_ent = self.avalon_ents_by_id[mongo_id] + cur_par = avalon_ent["data"]["parents"] + if cur_par == parents: + continue + + hierarchy = "" + if len(parents) > 0: + hierarchy = os.path.sep.join(parents) + + if "data" not in self.updates[mongo_id]: + self.updates[mongo_id]["data"] = {} + self.updates[mongo_id]["data"]["parents"] = parents + self.updates[mongo_id]["data"]["hierarchy"] = hierarchy + + # Skip custom attributes if didn't change + if not hier_cust_attrs_ids: + # TODO logging + self.log.debug( + "Hierarchical attributes were not changed. Skipping" + ) + self.update_entities() return - try: - for entity in import_entities: - result = lib.import_to_avalon( - session=session, - entity=entity, - ft_project=ft_project, - av_project=avalon_project, - custom_attributes=custom_attributes + cust_attrs, hier_attrs = self.avalon_cust_attrs + + # Hierarchical custom attributes preparation *** + if all_keys: + hier_cust_attrs_keys = [ + attr["key"] for attr in hier_attrs if ( + not attr["key"].startswith("avalon_") ) - if 'errors' in result and len(result['errors']) > 0: - session.commit() - lib.show_errors(self, event, result['errors']) - - return - - if avalon_project is None: - if 'project' in result: - avalon_project = result['project'] - - except Exception as e: - # reset session to clear it - session.rollback() - - message = str(e) - title = 'Hey You! Unknown Error has been raised! (*look below*)' - ftrack_message = ( - 'SyncToAvalon event ended with unexpected error' - ' please check log file or contact Administrator' - ' for more information.' - ) - items = [ - {'type': 'label', 'value': '# Fatal Error'}, - {'type': 'label', 'value': '

{}

'.format(ftrack_message)} ] - self.show_interface(items, title, event=event) - self.log.error( - 'Fatal error during sync: {}'.format(message), exc_info=True + + mongo_ftrack_mapping = {} + cust_attrs_ftrack_ids = [] + # ftrack_parenting = collections.defaultdict(list) + entities_dict = collections.defaultdict(dict) + + children_queue = queue.Queue() + parent_queue = queue.Queue() + + for mongo_id in hier_cust_attrs_ids: + avalon_ent = self.avalon_ents_by_id[mongo_id] + parent_queue.put(avalon_ent) + ftrack_id = avalon_ent["data"]["ftrackId"] + if ftrack_id not in entities_dict: + entities_dict[ftrack_id] = { + "children": [], + "parent_id": None, + "hier_attrs": {} + } + + mongo_ftrack_mapping[mongo_id] = ftrack_id + cust_attrs_ftrack_ids.append(ftrack_id) + children_ents = self.avalon_ents_by_parent_id.get(mongo_id) or [] + for children_ent in children_ents: + _ftrack_id = children_ent["data"]["ftrackId"] + if _ftrack_id in entities_dict: + continue + + entities_dict[_ftrack_id] = { + "children": [], + "parent_id": None, + "hier_attrs": {} + } + # if _ftrack_id not in ftrack_parenting[ftrack_id]: + # ftrack_parenting[ftrack_id].append(_ftrack_id) + entities_dict[_ftrack_id]["parent_id"] = ftrack_id + if _ftrack_id not in entities_dict[ftrack_id]["children"]: + entities_dict[ftrack_id]["children"].append(_ftrack_id) + children_queue.put(children_ent) + + while not children_queue.empty(): + avalon_ent = children_queue.get() + mongo_id = avalon_ent["_id"] + ftrack_id = avalon_ent["data"]["ftrackId"] + if ftrack_id in cust_attrs_ftrack_ids: + continue + + mongo_ftrack_mapping[mongo_id] = ftrack_id + cust_attrs_ftrack_ids.append(ftrack_id) + + children_ents = self.avalon_ents_by_parent_id.get(mongo_id) or [] + for children_ent in children_ents: + _ftrack_id = children_ent["data"]["ftrackId"] + if _ftrack_id in entities_dict: + continue + + entities_dict[_ftrack_id] = { + "children": [], + "parent_id": None, + "hier_attrs": {} + } + entities_dict[_ftrack_id]["parent_id"] = ftrack_id + if _ftrack_id not in entities_dict[ftrack_id]["children"]: + entities_dict[ftrack_id]["children"].append(_ftrack_id) + children_queue.put(children_ent) + + while not parent_queue.empty(): + avalon_ent = parent_queue.get() + if avalon_ent["type"].lower() == "project": + continue + + ftrack_id = avalon_ent["data"]["ftrackId"] + + vis_par = avalon_ent["data"]["visualParent"] + if vis_par is None: + vis_par = avalon_ent["parent"] + + parent_ent = self.avalon_ents_by_id[vis_par] + parent_ftrack_id = parent_ent["data"]["ftrackId"] + if parent_ftrack_id not in entities_dict: + entities_dict[parent_ftrack_id] = { + "children": [], + "parent_id": None, + "hier_attrs": {} + } + + if ftrack_id not in entities_dict[parent_ftrack_id]["children"]: + entities_dict[parent_ftrack_id]["children"].append(ftrack_id) + + entities_dict[ftrack_id]["parent_id"] = parent_ftrack_id + + if parent_ftrack_id in cust_attrs_ftrack_ids: + continue + mongo_ftrack_mapping[vis_par] = parent_ftrack_id + cust_attrs_ftrack_ids.append(parent_ftrack_id) + # if ftrack_id not in ftrack_parenting[parent_ftrack_id]: + # ftrack_parenting[parent_ftrack_id].append(ftrack_id) + + parent_queue.put(parent_ent) + + # Prepare values to query + entity_ids_joined = ", ".join([ + "\"{}\"".format(id) for id in cust_attrs_ftrack_ids + ]) + attributes_joined = ", ".join([ + "\"{}\"".format(name) for name in hier_cust_attrs_keys + ]) + + queries = [{ + "action": "query", + "expression": ( + "select value, entity_id from CustomAttributeValue " + "where entity_id in ({}) and configuration.key in ({})" + ).format(entity_ids_joined, attributes_joined) + }] + + if hasattr(self.process_session, "call"): + [values] = self.process_session.call(queries) + else: + [values] = self.process_session._call(queries) + + ftrack_project_id = self.cur_project["id"] + + for attr in hier_attrs: + key = attr["key"] + if key not in hier_cust_attrs_keys: + continue + entities_dict[ftrack_project_id]["hier_attrs"][key] = ( + attr["default"] ) - return + # PREPARE DATA BEFORE THIS + avalon_hier = [] + for value in values["data"]: + if value["value"] is None: + continue + entity_id = value["entity_id"] + key = value["configuration"]["key"] + entities_dict[entity_id]["hier_attrs"][key] = value["value"] + + # Get dictionary with not None hierarchical values to pull to childs + project_values = {} + for key, value in ( + entities_dict[ftrack_project_id]["hier_attrs"].items() + ): + if value is not None: + project_values[key] = value + + for key in avalon_hier: + value = entities_dict[ftrack_project_id]["avalon_attrs"][key] + if value is not None: + project_values[key] = value + + hier_down_queue = queue.Queue() + hier_down_queue.put((project_values, ftrack_project_id)) + + while not hier_down_queue.empty(): + hier_values, parent_id = hier_down_queue.get() + for child_id in entities_dict[parent_id]["children"]: + _hier_values = hier_values.copy() + for name in hier_cust_attrs_keys: + value = entities_dict[child_id]["hier_attrs"].get(name) + if value is not None: + _hier_values[name] = value + + entities_dict[child_id]["hier_attrs"].update(_hier_values) + hier_down_queue.put((_hier_values, child_id)) + + ftrack_mongo_mapping = {} + for mongo_id, ftrack_id in mongo_ftrack_mapping.items(): + ftrack_mongo_mapping[ftrack_id] = mongo_id + + for ftrack_id, data in entities_dict.items(): + mongo_id = ftrack_mongo_mapping[ftrack_id] + avalon_ent = self.avalon_ents_by_id[mongo_id] + ent_path = self.get_ent_path(ftrack_id) + # TODO logging + self.log.debug( + "Updating hierarchical attributes <{}>".format(ent_path) + ) + for key, value in data["hier_attrs"].items(): + if ( + key in avalon_ent["data"] and + avalon_ent["data"][key] == value + ): + continue + + self.log.debug("- {}: {}".format(key, value)) + if "data" not in self.updates[mongo_id]: + self.updates[mongo_id]["data"] = {} + + self.updates[mongo_id]["data"][key] = value + + self.update_entities() + + def update_entities(self): + mongo_changes_bulk = [] + for mongo_id, changes in self.updates.items(): + filter = {"_id": mongo_id} + change_data = avalon_sync.from_dict_to_set(changes) + mongo_changes_bulk.append(UpdateOne(filter, change_data)) + + if not mongo_changes_bulk: + return + + self.dbcon.bulk_write(mongo_changes_bulk) + self.updates = collections.defaultdict(dict) + + @property + def duplicated_report(self): + if not self.duplicated: + return [] + + ft_project = self.cur_project + duplicated_names = [] + for ftrack_id in self.duplicated: + ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id) + if not ftrack_ent: + ftrack_ent = self.process_session.query( + self.entities_query_by_id.format( + ft_project["id"], ftrack_id + ) + ).one() + self.ftrack_ents_by_id[ftrack_id] = ftrack_ent + name = ftrack_ent["name"] + if name not in duplicated_names: + duplicated_names.append(name) + + joined_names = ", ".join( + ["\"{}\"".format(name) for name in duplicated_names] + ) + ft_ents = self.process_session.query( + self.entities_name_query_by_name.format( + ft_project["id"], joined_names + ) + ).all() + + ft_ents_by_name = collections.defaultdict(list) + for ft_ent in ft_ents: + name = ft_ent["name"] + ft_ents_by_name[name].append(ft_ent) + + if not ft_ents_by_name: + return [] + + subtitle = "Duplicated entity names:" + items = [] + items.append({ + "type": "label", + "value": "# {}".format(subtitle) + }) + items.append({ + "type": "label", + "value": ( + "

NOTE: It is not allowed to use the same name" + " for multiple entities in the same project

" + ) + }) + + for name, ents in ft_ents_by_name.items(): + items.append({ + "type": "label", + "value": "## {}".format(name) + }) + paths = [] + for ent in ents: + ftrack_id = ent["id"] + ent_path = "/".join([_ent["name"] for _ent in ent["link"]]) + avalon_ent = self.avalon_ents_by_id.get(ftrack_id) + + if avalon_ent: + additional = " (synchronized)" + if avalon_ent["name"] != name: + additional = " (synchronized as {})".format( + avalon_ent["name"] + ) + ent_path += additional + paths.append(ent_path) + + items.append({ + "type": "label", + "value": '

{}

'.format("
".join(paths)) + }) + + return items + + @property + def regex_report(self): + if not self.regex_failed: + return [] + + subtitle = "Entity names contain prohibited symbols:" + items = [] + items.append({ + "type": "label", + "value": "# {}".format(subtitle) + }) + items.append({ + "type": "label", + "value": ( + "

NOTE: You can use Letters( a-Z )," + " Numbers( 0-9 ) and Underscore( _ )

" + ) + }) + + ft_project = self.cur_project + for ftrack_id in self.regex_failed: + ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id) + if not ftrack_ent: + ftrack_ent = self.process_session.query( + self.entities_query_by_id.format( + ft_project["id"], ftrack_id + ) + ).one() + self.ftrack_ents_by_id[ftrack_id] = ftrack_ent + + name = ftrack_ent["name"] + ent_path_items = [_ent["name"] for _ent in ftrack_ent["link"][:-1]] + ent_path_items.append("{}".format(name)) + ent_path = "/".join(ent_path_items) + items.append({ + "type": "label", + "value": "

{} - {}

".format(name, ent_path) + }) + + return items + + def report(self): + msg_len = len(self.duplicated) + len(self.regex_failed) + for msgs in self.report_items.values(): + msg_len += len(msgs) + + if msg_len == 0: + return + + items = [] + project_name = self.cur_project["full_name"] + title = "Synchronization report ({}):".format(project_name) + + keys = ["error", "warning", "info"] + for key in keys: + subitems = [] + if key == "warning": + subitems.extend(self.duplicated_report) + subitems.extend(self.regex_report) + + for _msg, _items in self.report_items[key].items(): + if not _items: + continue + + msg_items = _msg.split("||") + msg = msg_items[0] + subitems.append({ + "type": "label", + "value": "# {}".format(msg) + }) + + if len(msg_items) > 1: + for note in msg_items[1:]: + subitems.append({ + "type": "label", + "value": "

NOTE: {}

".format(note) + }) + + if isinstance(_items, str): + _items = [_items] + subitems.append({ + "type": "label", + "value": '

{}

'.format("
".join(_items)) + }) + + if items and subitems: + items.append(self.report_splitter) + + items.extend(subitems) + + self.show_interface( + items=items, + title=title, + event=self._cur_event + ) + return True def register(session, plugins_presets): '''Register plugin. Called when used as an plugin.''' - SyncToAvalon(session, plugins_presets).register() + SyncToAvalonEvent(session, plugins_presets).register() diff --git a/pype/ftrack/events/event_thumbnail_updates.py b/pype/ftrack/events/event_thumbnail_updates.py index 47909da055..5421aa7543 100644 --- a/pype/ftrack/events/event_thumbnail_updates.py +++ b/pype/ftrack/events/event_thumbnail_updates.py @@ -1,4 +1,3 @@ -import ftrack_api from pype.ftrack import BaseEvent @@ -26,28 +25,34 @@ class ThumbnailEvents(BaseEvent): # Update task thumbnail from published version # if (entity['entityType'] == 'assetversion' and # entity['action'] == 'encoded'): - if ( - entity['entityType'] == 'assetversion' - and 'thumbid' in (entity.get('keys') or []) + elif ( + entity['entityType'] == 'assetversion' and + entity['action'] != 'remove' and + 'thumbid' in (entity.get('keys') or []) ): version = session.get('AssetVersion', entity['entityId']) + if not version: + continue + thumbnail = version.get('thumbnail') - if thumbnail: - parent = version['asset']['parent'] - task = version['task'] - parent['thumbnail_id'] = version['thumbnail_id'] - if parent.entity_type.lower() == "project": - name = parent["full_name"] - else: - name = parent["name"] - msg = '>>> Updating thumbnail for shot [ {} ]'.format(name) + if not thumbnail: + continue - if task: - task['thumbnail_id'] = version['thumbnail_id'] - msg += " and task [ {} ]".format(task["name"]) + parent = version['asset']['parent'] + task = version['task'] + parent['thumbnail_id'] = version['thumbnail_id'] + if parent.entity_type.lower() == "project": + name = parent["full_name"] + else: + name = parent["name"] + msg = '>>> Updating thumbnail for shot [ {} ]'.format(name) - self.log.info(msg) + if task: + task['thumbnail_id'] = version['thumbnail_id'] + msg += " and task [ {} ]".format(task["name"]) + + self.log.info(msg) try: session.commit() @@ -57,5 +62,4 @@ class ThumbnailEvents(BaseEvent): def register(session, plugins_presets): '''Register plugin. Called when used as an plugin.''' - ThumbnailEvents(session, plugins_presets).register() diff --git a/pype/ftrack/events/event_user_assigment.py b/pype/ftrack/events/event_user_assigment.py index fe15eb1e20..eaacfd959a 100644 --- a/pype/ftrack/events/event_user_assigment.py +++ b/pype/ftrack/events/event_user_assigment.py @@ -1,12 +1,15 @@ -import ftrack_api -from pype.ftrack import BaseEvent, lib -from pype.ftrack.lib.io_nonsingleton import DbConnector -from bson.objectid import ObjectId -from pypeapp import config -from pypeapp import Anatomy -import subprocess import os import re +import subprocess + +from pype.ftrack import BaseEvent +from pype.ftrack.lib.avalon_sync import CustAttrIdKey +from pype.ftrack.lib.io_nonsingleton import DbConnector + +from bson.objectid import ObjectId + +from pypeapp import config +from pypeapp import Anatomy class UserAssigmentEvent(BaseEvent): @@ -36,7 +39,6 @@ class UserAssigmentEvent(BaseEvent): """ db_con = DbConnector() - ca_mongoid = lib.get_ca_mongoid() def error(self, *err): for e in err: @@ -105,7 +107,7 @@ class UserAssigmentEvent(BaseEvent): self.db_con.Session['AVALON_PROJECT'] = task['project']['full_name'] avalon_entity = None - parent_id = parent['custom_attributes'].get(self.ca_mongoid) + parent_id = parent['custom_attributes'].get(CustAttrIdKey) if parent_id: parent_id = ObjectId(parent_id) avalon_entity = self.db_con.find_one({ @@ -205,7 +207,9 @@ class UserAssigmentEvent(BaseEvent): # formatting work dir is easiest part as we can use whole path work_dir = anatomy.format(data)['avalon']['work'] # we also need publish but not whole - publish = anatomy.format_all(data)['partial']['avalon']['publish'] + filled_all = anatomy.format_all(data) + publish = filled_all['avalon']['publish'] + # now find path to {asset} m = re.search("(^.+?{})".format(data['asset']), publish) diff --git a/pype/ftrack/events/event_version_to_task_statuses.py b/pype/ftrack/events/event_version_to_task_statuses.py index 81398373bb..0d2a3130c0 100644 --- a/pype/ftrack/events/event_version_to_task_statuses.py +++ b/pype/ftrack/events/event_version_to_task_statuses.py @@ -1,73 +1,134 @@ -import ftrack_api from pype.ftrack import BaseEvent +from pypeapp import config class VersionToTaskStatus(BaseEvent): + # Presets usage + default_status_mapping = {} + def launch(self, session, event): '''Propagates status from version to task when changed''' # start of event procedure ---------------------------------- for entity in event['data'].get('entities', []): - # Filter non-assetversions - if ( - entity['entityType'] == 'assetversion' and - 'statusid' in (entity.get('keys') or []) - ): + # Filter AssetVersions + if entity["entityType"] != "assetversion": + continue - version = session.get('AssetVersion', entity['entityId']) - try: - version_status = session.get( - 'Status', entity['changes']['statusid']['new'] - ) - except Exception: + # Skip if statusid not in keys (in changes) + keys = entity.get("keys") + if not keys or "statusid" not in keys: + continue + + # Get new version task name + version_status_id = ( + entity + .get("changes", {}) + .get("statusid", {}) + .get("new", {}) + ) + + # Just check that `new` is set to any value + if not version_status_id: + continue + + try: + version_status = session.get("Status", version_status_id) + except Exception: + self.log.warning( + "Troubles with query status id [ {} ]".format( + version_status_id + ), + exc_info=True + ) + + if not version_status: + continue + + version_status_orig = version_status["name"] + + # Load status mapping from presets + status_mapping = ( + config.get_presets() + .get("ftrack", {}) + .get("ftrack_config", {}) + .get("status_version_to_task") + ) or self.default_status_mapping + + # Skip if mapping is empty + if not status_mapping: + continue + + # Lower version status name and check if has mapping + version_status = version_status_orig.lower() + new_status_names = [] + mapped = status_mapping.get(version_status) + if mapped: + new_status_names.extend(list(mapped)) + + new_status_names.append(version_status) + + self.log.debug( + "Processing AssetVersion status change: [ {} ]".format( + version_status_orig + ) + ) + + # Lower all names from presets + new_status_names = [name.lower() for name in new_status_names] + + # Get entities necessary for processing + version = session.get("AssetVersion", entity["entityId"]) + task = version.get("task") + if not task: + continue + + project_schema = task["project"]["project_schema"] + # Get all available statuses for Task + statuses = project_schema.get_statuses("Task", task["type_id"]) + # map lowered status name with it's object + stat_names_low = { + status["name"].lower(): status for status in statuses + } + + new_status = None + for status_name in new_status_names: + if status_name not in stat_names_low: continue - task_status = version_status - task = version['task'] - self.log.info('>>> version status: [ {} ]'.format( - version_status['name'])) - status_to_set = None - # Filter to versions with status change to "render complete" - if version_status['name'].lower() == 'reviewed': - status_to_set = 'Change requested' + # store object of found status + new_status = stat_names_low[status_name] + self.log.debug("Status to set: [ {} ]".format( + new_status["name"] + )) + break - if version_status['name'].lower() == 'approved': - status_to_set = 'Complete' + # Skip if status names were not found for paticulat entity + if not new_status: + self.log.warning( + "Any of statuses from presets can be set: {}".format( + str(new_status_names) + ) + ) + continue - self.log.info( - '>>> status to set: [ {} ]'.format(status_to_set)) + # Get full path to task for logging + ent_path = "/".join([ent["name"] for ent in task["link"]]) - if status_to_set is not None: - query = 'Status where name is "{}"'.format(status_to_set) - try: - task_status = session.query(query).one() - except Exception: - self.log.info( - '!!! status was not found in Ftrack [ {} ]'.format( - status_to_set - )) - continue - - # Proceed if the task status was set - if task_status is not None: - # Get path to task - path = task['name'] - for p in task['ancestors']: - path = p['name'] + '/' + path - - # Setting task status - try: - task['status'] = task_status - session.commit() - except Exception as e: - session.rollback() - self.log.warning('!!! [ {} ] status couldnt be set:\ - [ {} ]'.format(path, e)) - session.rollback() - else: - self.log.info('>>> [ {} ] updated to [ {} ]'.format( - path, task_status['name'])) + # Setting task status + try: + task["status"] = new_status + session.commit() + self.log.debug("[ {} ] Status updated to [ {} ]".format( + ent_path, new_status['name'] + )) + except Exception: + session.rollback() + self.log.warning( + "[ {} ]Status couldn't be set".format(ent_path), + exc_info=True + ) def register(session, plugins_presets): diff --git a/pype/ftrack/ftrack_server/event_server_cli.py b/pype/ftrack/ftrack_server/event_server_cli.py index 56a301e8f2..b09b0bc84e 100644 --- a/pype/ftrack/ftrack_server/event_server_cli.py +++ b/pype/ftrack/ftrack_server/event_server_cli.py @@ -7,11 +7,9 @@ import socket import argparse import atexit import time -from urllib.parse import urlparse import ftrack_api from pype.ftrack.lib import credentials -from pype.ftrack.ftrack_server import FtrackServer from pype.ftrack.ftrack_server.lib import ( ftrack_events_mongo_settings, check_ftrack_url ) @@ -67,9 +65,8 @@ def validate_credentials(url, user, api): except Exception as e: print( 'ERROR: Can\'t log into Ftrack with used credentials:' - ' Ftrack server: "{}" // Username: {} // API key: {}'.format( - url, user, api - )) + ' Ftrack server: "{}" // Username: {} // API key: {}' + ).format(url, user, api) return False print('DEBUG: Credentials Username: "{}", API key: "{}" are valid.'.format( @@ -147,9 +144,9 @@ def legacy_server(ftrack_url): ).format(str(max_fail_count), str(wait_time_after_max_fail))) subproc_failed_count += 1 elif (( - datetime.datetime.now() - subproc_last_failed - ).seconds > wait_time_after_max_fail): - subproc_failed_count = 0 + datetime.datetime.now() - subproc_last_failed + ).seconds > wait_time_after_max_fail): + subproc_failed_count = 0 # If thread failed test Ftrack and Mongo connection elif subproc.poll() is not None: @@ -277,9 +274,9 @@ def main_loop(ftrack_url): ).format(str(max_fail_count), str(wait_time_after_max_fail))) storer_failed_count += 1 elif (( - datetime.datetime.now() - storer_last_failed - ).seconds > wait_time_after_max_fail): - storer_failed_count = 0 + datetime.datetime.now() - storer_last_failed + ).seconds > wait_time_after_max_fail): + storer_failed_count = 0 # If thread failed test Ftrack and Mongo connection elif not storer_thread.isAlive(): @@ -313,13 +310,13 @@ def main_loop(ftrack_url): processor_failed_count += 1 elif (( - datetime.datetime.now() - processor_last_failed - ).seconds > wait_time_after_max_fail): - processor_failed_count = 0 + datetime.datetime.now() - processor_last_failed + ).seconds > wait_time_after_max_fail): + processor_failed_count = 0 # If thread failed test Ftrack and Mongo connection elif not processor_thread.isAlive(): - if storer_thread.mongo_error: + if processor_thread.mongo_error: raise Exception( "Exiting because have issue with acces to MongoDB" ) diff --git a/pype/ftrack/ftrack_server/lib.py b/pype/ftrack/ftrack_server/lib.py index 748937c7bd..fefba580e0 100644 --- a/pype/ftrack/ftrack_server/lib.py +++ b/pype/ftrack/ftrack_server/lib.py @@ -1,10 +1,32 @@ import os +import sys +import logging +import getpass +import atexit +import tempfile +import threading +import datetime +import time +import queue +import pymongo + import requests +import ftrack_api +import ftrack_api.session +import ftrack_api.cache +import ftrack_api.operation +import ftrack_api._centralized_storage_scenario +import ftrack_api.event +from ftrack_api.logging import LazyLogMessage as L try: from urllib.parse import urlparse, parse_qs except ImportError: from urlparse import urlparse, parse_qs +from pypeapp import Logger + +from pype.ftrack.lib.custom_db_connector import DbConnector + def ftrack_events_mongo_settings(): host = None @@ -49,7 +71,9 @@ def ftrack_events_mongo_settings(): def get_ftrack_event_mongo_info(): - host, port, database, username, password, collection, auth_db = ftrack_events_mongo_settings() + host, port, database, username, password, collection, auth_db = ( + ftrack_events_mongo_settings() + ) user_pass = "" if username and password: user_pass = "{}:{}@".format(username, password) @@ -97,3 +121,334 @@ def check_ftrack_url(url, log_errors=True): print('DEBUG: Ftrack server {} is accessible.'.format(url)) return url + + +class StorerEventHub(ftrack_api.event.hub.EventHub): + def __init__(self, *args, **kwargs): + self.sock = kwargs.pop("sock") + super(StorerEventHub, self).__init__(*args, **kwargs) + + def _handle_packet(self, code, packet_identifier, path, data): + """Override `_handle_packet` which extend heartbeat""" + code_name = self._code_name_mapping[code] + if code_name == "heartbeat": + # Reply with heartbeat. + self.sock.sendall(b"storer") + return self._send_packet(self._code_name_mapping['heartbeat']) + + elif code_name == "connect": + event = ftrack_api.event.base.Event( + topic="pype.storer.started", + data={}, + source={ + "id": self.id, + "user": {"username": self._api_user} + } + ) + self._event_queue.put(event) + + return super(StorerEventHub, self)._handle_packet( + code, packet_identifier, path, data + ) + + +class ProcessEventHub(ftrack_api.event.hub.EventHub): + url, database, table_name = get_ftrack_event_mongo_info() + + is_table_created = False + pypelog = Logger().get_logger("Session Processor") + + def __init__(self, *args, **kwargs): + self.dbcon = DbConnector( + mongo_url=self.url, + database_name=self.database, + table_name=self.table_name + ) + self.sock = kwargs.pop("sock") + super(ProcessEventHub, self).__init__(*args, **kwargs) + + def prepare_dbcon(self): + try: + self.dbcon.install() + self.dbcon._database.list_collection_names() + except pymongo.errors.AutoReconnect: + self.pypelog.error( + "Mongo server \"{}\" is not responding, exiting.".format( + os.environ["AVALON_MONGO"] + ) + ) + sys.exit(0) + + except pymongo.errors.OperationFailure: + self.pypelog.error(( + "Error with Mongo access, probably permissions." + "Check if exist database with name \"{}\"" + " and collection \"{}\" inside." + ).format(self.database, self.table_name)) + self.sock.sendall(b"MongoError") + sys.exit(0) + + def wait(self, duration=None): + """Overriden wait + + Event are loaded from Mongo DB when queue is empty. Handled event is + set as processed in Mongo DB. + """ + started = time.time() + self.prepare_dbcon() + while True: + try: + event = self._event_queue.get(timeout=0.1) + except queue.Empty: + if not self.load_events(): + time.sleep(0.5) + else: + try: + self._handle(event) + self.dbcon.update_one( + {"id": event["id"]}, + {"$set": {"pype_data.is_processed": True}} + ) + except pymongo.errors.AutoReconnect: + self.pypelog.error(( + "Mongo server \"{}\" is not responding, exiting." + ).format(os.environ["AVALON_MONGO"])) + sys.exit(0) + # Additional special processing of events. + if event['topic'] == 'ftrack.meta.disconnected': + break + + if duration is not None: + if (time.time() - started) > duration: + break + + def load_events(self): + """Load not processed events sorted by stored date""" + ago_date = datetime.datetime.now() - datetime.timedelta(days=3) + result = self.dbcon.delete_many({ + "pype_data.stored": {"$lte": ago_date}, + "pype_data.is_processed": True + }) + + not_processed_events = self.dbcon.find( + {"pype_data.is_processed": False} + ).sort( + [("pype_data.stored", pymongo.ASCENDING)] + ) + + found = False + for event_data in not_processed_events: + new_event_data = { + k: v for k, v in event_data.items() + if k not in ["_id", "pype_data"] + } + try: + event = ftrack_api.event.base.Event(**new_event_data) + except Exception: + self.logger.exception(L( + 'Failed to convert payload into event: {0}', + event_data + )) + continue + found = True + self._event_queue.put(event) + + return found + + def _handle_packet(self, code, packet_identifier, path, data): + """Override `_handle_packet` which skip events and extend heartbeat""" + code_name = self._code_name_mapping[code] + if code_name == "event": + return + if code_name == "heartbeat": + self.sock.sendall(b"processor") + return self._send_packet(self._code_name_mapping["heartbeat"]) + + return super()._handle_packet(code, packet_identifier, path, data) + + +class UserEventHub(ftrack_api.event.hub.EventHub): + def __init__(self, *args, **kwargs): + self.sock = kwargs.pop("sock") + super(UserEventHub, self).__init__(*args, **kwargs) + + def _handle_packet(self, code, packet_identifier, path, data): + """Override `_handle_packet` which extend heartbeat""" + code_name = self._code_name_mapping[code] + if code_name == "heartbeat": + # Reply with heartbeat. + self.sock.sendall(b"hearbeat") + return self._send_packet(self._code_name_mapping['heartbeat']) + + elif code_name == "connect": + event = ftrack_api.event.base.Event( + topic="pype.storer.started", + data={}, + source={ + "id": self.id, + "user": {"username": self._api_user} + } + ) + self._event_queue.put(event) + + return super(UserEventHub, self)._handle_packet( + code, packet_identifier, path, data + ) + + +class SocketSession(ftrack_api.session.Session): + '''An isolated session for interaction with an ftrack server.''' + def __init__( + self, server_url=None, api_key=None, api_user=None, auto_populate=True, + plugin_paths=None, cache=None, cache_key_maker=None, + auto_connect_event_hub=None, schema_cache_path=None, + plugin_arguments=None, sock=None, Eventhub=None + ): + super(ftrack_api.session.Session, self).__init__() + self.logger = logging.getLogger( + __name__ + '.' + self.__class__.__name__ + ) + self._closed = False + + if server_url is None: + server_url = os.environ.get('FTRACK_SERVER') + + if not server_url: + raise TypeError( + 'Required "server_url" not specified. Pass as argument or set ' + 'in environment variable FTRACK_SERVER.' + ) + + self._server_url = server_url + + if api_key is None: + api_key = os.environ.get( + 'FTRACK_API_KEY', + # Backwards compatibility + os.environ.get('FTRACK_APIKEY') + ) + + if not api_key: + raise TypeError( + 'Required "api_key" not specified. Pass as argument or set in ' + 'environment variable FTRACK_API_KEY.' + ) + + self._api_key = api_key + + if api_user is None: + api_user = os.environ.get('FTRACK_API_USER') + if not api_user: + try: + api_user = getpass.getuser() + except Exception: + pass + + if not api_user: + raise TypeError( + 'Required "api_user" not specified. Pass as argument, set in ' + 'environment variable FTRACK_API_USER or one of the standard ' + 'environment variables used by Python\'s getpass module.' + ) + + self._api_user = api_user + + # Currently pending operations. + self.recorded_operations = ftrack_api.operation.Operations() + self.record_operations = True + + self.cache_key_maker = cache_key_maker + if self.cache_key_maker is None: + self.cache_key_maker = ftrack_api.cache.StringKeyMaker() + + # Enforce always having a memory cache at top level so that the same + # in-memory instance is returned from session. + self.cache = ftrack_api.cache.LayeredCache([ + ftrack_api.cache.MemoryCache() + ]) + + if cache is not None: + if callable(cache): + cache = cache(self) + + if cache is not None: + self.cache.caches.append(cache) + + self._managed_request = None + self._request = requests.Session() + self._request.auth = ftrack_api.session.SessionAuthentication( + self._api_key, self._api_user + ) + + self.auto_populate = auto_populate + + # Fetch server information and in doing so also check credentials. + self._server_information = self._fetch_server_information() + + # Now check compatibility of server based on retrieved information. + self.check_server_compatibility() + + # Construct event hub and load plugins. + if Eventhub is None: + Eventhub = ftrack_api.event.hub.EventHub + self._event_hub = Eventhub( + self._server_url, + self._api_user, + self._api_key, + sock=sock + ) + + self._auto_connect_event_hub_thread = None + if auto_connect_event_hub in (None, True): + # Connect to event hub in background thread so as not to block main + # session usage waiting for event hub connection. + self._auto_connect_event_hub_thread = threading.Thread( + target=self._event_hub.connect + ) + self._auto_connect_event_hub_thread.daemon = True + self._auto_connect_event_hub_thread.start() + + # To help with migration from auto_connect_event_hub default changing + # from True to False. + self._event_hub._deprecation_warning_auto_connect = ( + auto_connect_event_hub is None + ) + + # Register to auto-close session on exit. + atexit.register(self.close) + + self._plugin_paths = plugin_paths + if self._plugin_paths is None: + self._plugin_paths = os.environ.get( + 'FTRACK_EVENT_PLUGIN_PATH', '' + ).split(os.pathsep) + + self._discover_plugins(plugin_arguments=plugin_arguments) + + # TODO: Make schemas read-only and non-mutable (or at least without + # rebuilding types)? + if schema_cache_path is not False: + if schema_cache_path is None: + schema_cache_path = os.environ.get( + 'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir() + ) + + schema_cache_path = os.path.join( + schema_cache_path, 'ftrack_api_schema_cache.json' + ) + + self.schemas = self._load_schemas(schema_cache_path) + self.types = self._build_entity_type_classes(self.schemas) + + ftrack_api._centralized_storage_scenario.register(self) + + self._configure_locations() + self.event_hub.publish( + ftrack_api.event.base.Event( + topic='ftrack.api.session.ready', + data=dict( + session=self + ) + ), + synchronous=True + ) diff --git a/pype/ftrack/ftrack_server/session_processor.py b/pype/ftrack/ftrack_server/session_processor.py deleted file mode 100644 index 86a9775dce..0000000000 --- a/pype/ftrack/ftrack_server/session_processor.py +++ /dev/null @@ -1,292 +0,0 @@ -import logging -import os -import atexit -import datetime -import tempfile -import threading -import time -import requests -import queue -import pymongo - -import ftrack_api -import ftrack_api.session -import ftrack_api.cache -import ftrack_api.operation -import ftrack_api._centralized_storage_scenario -import ftrack_api.event -from ftrack_api.logging import LazyLogMessage as L - -from pype.ftrack.lib.custom_db_connector import DbConnector -from pype.ftrack.ftrack_server.lib import get_ftrack_event_mongo_info -from pypeapp import Logger - -log = Logger().get_logger("Session processor") - - -class ProcessEventHub(ftrack_api.event.hub.EventHub): - url, database, table_name = get_ftrack_event_mongo_info() - - is_table_created = False - - def __init__(self, *args, **kwargs): - self.dbcon = DbConnector( - mongo_url=self.url, - database_name=self.database, - table_name=self.table_name - ) - self.sock = kwargs.pop("sock") - super(ProcessEventHub, self).__init__(*args, **kwargs) - - def prepare_dbcon(self): - try: - self.dbcon.install() - self.dbcon._database.collection_names() - except pymongo.errors.AutoReconnect: - log.error("Mongo server \"{}\" is not responding, exiting.".format( - os.environ["AVALON_MONGO"] - )) - sys.exit(0) - - except pymongo.errors.OperationFailure: - log.error(( - "Error with Mongo access, probably permissions." - "Check if exist database with name \"{}\"" - " and collection \"{}\" inside." - ).format(self.database, self.table_name)) - self.sock.sendall(b"MongoError") - sys.exit(0) - - def wait(self, duration=None): - """Overriden wait - - Event are loaded from Mongo DB when queue is empty. Handled event is - set as processed in Mongo DB. - """ - started = time.time() - self.prepare_dbcon() - while True: - try: - event = self._event_queue.get(timeout=0.1) - except queue.Empty: - if not self.load_events(): - time.sleep(0.5) - else: - try: - self._handle(event) - self.dbcon.update_one( - {"id": event["id"]}, - {"$set": {"pype_data.is_processed": True}} - ) - except pymongo.errors.AutoReconnect: - log.error(( - "Mongo server \"{}\" is not responding, exiting." - ).format(os.environ["AVALON_MONGO"])) - sys.exit(0) - # Additional special processing of events. - if event['topic'] == 'ftrack.meta.disconnected': - break - - if duration is not None: - if (time.time() - started) > duration: - break - - def load_events(self): - """Load not processed events sorted by stored date""" - ago_date = datetime.datetime.now() - datetime.timedelta(days=3) - result = self.dbcon.delete_many({ - "pype_data.stored": {"$lte": ago_date}, - "pype_data.is_processed": True - }) - - not_processed_events = self.dbcon.find( - {"pype_data.is_processed": False} - ).sort( - [("pype_data.stored", pymongo.ASCENDING)] - ) - - found = False - for event_data in not_processed_events: - new_event_data = { - k: v for k, v in event_data.items() - if k not in ["_id", "pype_data"] - } - try: - event = ftrack_api.event.base.Event(**new_event_data) - except Exception: - self.logger.exception(L( - 'Failed to convert payload into event: {0}', - event_data - )) - continue - found = True - self._event_queue.put(event) - - return found - - def _handle_packet(self, code, packet_identifier, path, data): - """Override `_handle_packet` which skip events and extend heartbeat""" - code_name = self._code_name_mapping[code] - if code_name == "event": - return - if code_name == "heartbeat": - self.sock.sendall(b"processor") - return self._send_packet(self._code_name_mapping["heartbeat"]) - - return super()._handle_packet(code, packet_identifier, path, data) - - -class ProcessSession(ftrack_api.session.Session): - '''An isolated session for interaction with an ftrack server.''' - def __init__( - self, server_url=None, api_key=None, api_user=None, auto_populate=True, - plugin_paths=None, cache=None, cache_key_maker=None, - auto_connect_event_hub=None, schema_cache_path=None, - plugin_arguments=None, sock=None - ): - super(ftrack_api.session.Session, self).__init__() - self.logger = logging.getLogger( - __name__ + '.' + self.__class__.__name__ - ) - self._closed = False - - if server_url is None: - server_url = os.environ.get('FTRACK_SERVER') - - if not server_url: - raise TypeError( - 'Required "server_url" not specified. Pass as argument or set ' - 'in environment variable FTRACK_SERVER.' - ) - - self._server_url = server_url - - if api_key is None: - api_key = os.environ.get( - 'FTRACK_API_KEY', - # Backwards compatibility - os.environ.get('FTRACK_APIKEY') - ) - - if not api_key: - raise TypeError( - 'Required "api_key" not specified. Pass as argument or set in ' - 'environment variable FTRACK_API_KEY.' - ) - - self._api_key = api_key - - if api_user is None: - api_user = os.environ.get('FTRACK_API_USER') - if not api_user: - try: - api_user = getpass.getuser() - except Exception: - pass - - if not api_user: - raise TypeError( - 'Required "api_user" not specified. Pass as argument, set in ' - 'environment variable FTRACK_API_USER or one of the standard ' - 'environment variables used by Python\'s getpass module.' - ) - - self._api_user = api_user - - # Currently pending operations. - self.recorded_operations = ftrack_api.operation.Operations() - self.record_operations = True - - self.cache_key_maker = cache_key_maker - if self.cache_key_maker is None: - self.cache_key_maker = ftrack_api.cache.StringKeyMaker() - - # Enforce always having a memory cache at top level so that the same - # in-memory instance is returned from session. - self.cache = ftrack_api.cache.LayeredCache([ - ftrack_api.cache.MemoryCache() - ]) - - if cache is not None: - if callable(cache): - cache = cache(self) - - if cache is not None: - self.cache.caches.append(cache) - - self._managed_request = None - self._request = requests.Session() - self._request.auth = ftrack_api.session.SessionAuthentication( - self._api_key, self._api_user - ) - - self.auto_populate = auto_populate - - # Fetch server information and in doing so also check credentials. - self._server_information = self._fetch_server_information() - - # Now check compatibility of server based on retrieved information. - self.check_server_compatibility() - - # Construct event hub and load plugins. - self._event_hub = ProcessEventHub( - self._server_url, - self._api_user, - self._api_key, - sock=sock - ) - - self._auto_connect_event_hub_thread = None - if auto_connect_event_hub in (None, True): - # Connect to event hub in background thread so as not to block main - # session usage waiting for event hub connection. - self._auto_connect_event_hub_thread = threading.Thread( - target=self._event_hub.connect - ) - self._auto_connect_event_hub_thread.daemon = True - self._auto_connect_event_hub_thread.start() - - # To help with migration from auto_connect_event_hub default changing - # from True to False. - self._event_hub._deprecation_warning_auto_connect = ( - auto_connect_event_hub is None - ) - - # Register to auto-close session on exit. - atexit.register(self.close) - - self._plugin_paths = plugin_paths - if self._plugin_paths is None: - self._plugin_paths = os.environ.get( - 'FTRACK_EVENT_PLUGIN_PATH', '' - ).split(os.pathsep) - - self._discover_plugins(plugin_arguments=plugin_arguments) - - # TODO: Make schemas read-only and non-mutable (or at least without - # rebuilding types)? - if schema_cache_path is not False: - if schema_cache_path is None: - schema_cache_path = os.environ.get( - 'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir() - ) - - schema_cache_path = os.path.join( - schema_cache_path, 'ftrack_api_schema_cache.json' - ) - - self.schemas = self._load_schemas(schema_cache_path) - self.types = self._build_entity_type_classes(self.schemas) - - ftrack_api._centralized_storage_scenario.register(self) - - self._configure_locations() - self.event_hub.publish( - ftrack_api.event.base.Event( - topic='ftrack.api.session.ready', - data=dict( - session=self - ) - ), - synchronous=True - ) diff --git a/pype/ftrack/ftrack_server/session_storer.py b/pype/ftrack/ftrack_server/session_storer.py deleted file mode 100644 index b3201c9e4d..0000000000 --- a/pype/ftrack/ftrack_server/session_storer.py +++ /dev/null @@ -1,257 +0,0 @@ -import logging -import os -import atexit -import tempfile -import threading -import requests - -import ftrack_api -import ftrack_api.session -import ftrack_api.cache -import ftrack_api.operation -import ftrack_api._centralized_storage_scenario -import ftrack_api.event -from ftrack_api.logging import LazyLogMessage as L - - -class StorerEventHub(ftrack_api.event.hub.EventHub): - def __init__(self, *args, **kwargs): - self.sock = kwargs.pop("sock") - super(StorerEventHub, self).__init__(*args, **kwargs) - - def _handle_packet(self, code, packet_identifier, path, data): - """Override `_handle_packet` which extend heartbeat""" - if self._code_name_mapping[code] == "heartbeat": - # Reply with heartbeat. - self.sock.sendall(b"storer") - return self._send_packet(self._code_name_mapping['heartbeat']) - - return super(StorerEventHub, self)._handle_packet( - code, packet_identifier, path, data - ) - - -class StorerSession(ftrack_api.session.Session): - '''An isolated session for interaction with an ftrack server.''' - def __init__( - self, server_url=None, api_key=None, api_user=None, auto_populate=True, - plugin_paths=None, cache=None, cache_key_maker=None, - auto_connect_event_hub=None, schema_cache_path=None, - plugin_arguments=None, sock=None - ): - '''Initialise session. - - *server_url* should be the URL of the ftrack server to connect to - including any port number. If not specified attempt to look up from - :envvar:`FTRACK_SERVER`. - - *api_key* should be the API key to use for authentication whilst - *api_user* should be the username of the user in ftrack to record - operations against. If not specified, *api_key* should be retrieved - from :envvar:`FTRACK_API_KEY` and *api_user* from - :envvar:`FTRACK_API_USER`. - - If *auto_populate* is True (the default), then accessing entity - attributes will cause them to be automatically fetched from the server - if they are not already. This flag can be changed on the session - directly at any time. - - *plugin_paths* should be a list of paths to search for plugins. If not - specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`. - - *cache* should be an instance of a cache that fulfils the - :class:`ftrack_api.cache.Cache` interface and will be used as the cache - for the session. It can also be a callable that will be called with the - session instance as sole argument. The callable should return ``None`` - if a suitable cache could not be configured, but session instantiation - can continue safely. - - .. note:: - - The session will add the specified cache to a pre-configured layered - cache that specifies the top level cache as a - :class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary - to construct a separate memory cache for typical behaviour. Working - around this behaviour or removing the memory cache can lead to - unexpected behaviour. - - *cache_key_maker* should be an instance of a key maker that fulfils the - :class:`ftrack_api.cache.KeyMaker` interface and will be used to - generate keys for objects being stored in the *cache*. If not specified, - a :class:`~ftrack_api.cache.StringKeyMaker` will be used. - - If *auto_connect_event_hub* is True then embedded event hub will be - automatically connected to the event server and allow for publishing and - subscribing to **non-local** events. If False, then only publishing and - subscribing to **local** events will be possible until the hub is - manually connected using :meth:`EventHub.connect - `. - - .. note:: - - The event hub connection is performed in a background thread to - improve session startup time. If a registered plugin requires a - connected event hub then it should check the event hub connection - status explicitly. Subscribing to events does *not* require a - connected event hub. - - Enable schema caching by setting *schema_cache_path* to a folder path. - If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to - determine the path to store cache in. If the environment variable is - also not specified then a temporary directory will be used. Set to - `False` to disable schema caching entirely. - - *plugin_arguments* should be an optional mapping (dict) of keyword - arguments to pass to plugin register functions upon discovery. If a - discovered plugin has a signature that is incompatible with the passed - arguments, the discovery mechanism will attempt to reduce the passed - arguments to only those that the plugin accepts. Note that a warning - will be logged in this case. - - ''' - super(ftrack_api.session.Session, self).__init__() - self.logger = logging.getLogger( - __name__ + '.' + self.__class__.__name__ - ) - self._closed = False - - if server_url is None: - server_url = os.environ.get('FTRACK_SERVER') - - if not server_url: - raise TypeError( - 'Required "server_url" not specified. Pass as argument or set ' - 'in environment variable FTRACK_SERVER.' - ) - - self._server_url = server_url - - if api_key is None: - api_key = os.environ.get( - 'FTRACK_API_KEY', - # Backwards compatibility - os.environ.get('FTRACK_APIKEY') - ) - - if not api_key: - raise TypeError( - 'Required "api_key" not specified. Pass as argument or set in ' - 'environment variable FTRACK_API_KEY.' - ) - - self._api_key = api_key - - if api_user is None: - api_user = os.environ.get('FTRACK_API_USER') - if not api_user: - try: - api_user = getpass.getuser() - except Exception: - pass - - if not api_user: - raise TypeError( - 'Required "api_user" not specified. Pass as argument, set in ' - 'environment variable FTRACK_API_USER or one of the standard ' - 'environment variables used by Python\'s getpass module.' - ) - - self._api_user = api_user - - # Currently pending operations. - self.recorded_operations = ftrack_api.operation.Operations() - self.record_operations = True - - self.cache_key_maker = cache_key_maker - if self.cache_key_maker is None: - self.cache_key_maker = ftrack_api.cache.StringKeyMaker() - - # Enforce always having a memory cache at top level so that the same - # in-memory instance is returned from session. - self.cache = ftrack_api.cache.LayeredCache([ - ftrack_api.cache.MemoryCache() - ]) - - if cache is not None: - if callable(cache): - cache = cache(self) - - if cache is not None: - self.cache.caches.append(cache) - - self._managed_request = None - self._request = requests.Session() - self._request.auth = ftrack_api.session.SessionAuthentication( - self._api_key, self._api_user - ) - - self.auto_populate = auto_populate - - # Fetch server information and in doing so also check credentials. - self._server_information = self._fetch_server_information() - - # Now check compatibility of server based on retrieved information. - self.check_server_compatibility() - - # Construct event hub and load plugins. - self._event_hub = StorerEventHub( - self._server_url, - self._api_user, - self._api_key, - sock=sock - ) - - self._auto_connect_event_hub_thread = None - if auto_connect_event_hub in (None, True): - # Connect to event hub in background thread so as not to block main - # session usage waiting for event hub connection. - self._auto_connect_event_hub_thread = threading.Thread( - target=self._event_hub.connect - ) - self._auto_connect_event_hub_thread.daemon = True - self._auto_connect_event_hub_thread.start() - - # To help with migration from auto_connect_event_hub default changing - # from True to False. - self._event_hub._deprecation_warning_auto_connect = ( - auto_connect_event_hub is None - ) - - # Register to auto-close session on exit. - atexit.register(self.close) - - self._plugin_paths = plugin_paths - if self._plugin_paths is None: - self._plugin_paths = os.environ.get( - 'FTRACK_EVENT_PLUGIN_PATH', '' - ).split(os.pathsep) - - self._discover_plugins(plugin_arguments=plugin_arguments) - - # TODO: Make schemas read-only and non-mutable (or at least without - # rebuilding types)? - if schema_cache_path is not False: - if schema_cache_path is None: - schema_cache_path = os.environ.get( - 'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir() - ) - - schema_cache_path = os.path.join( - schema_cache_path, 'ftrack_api_schema_cache.json' - ) - - self.schemas = self._load_schemas(schema_cache_path) - self.types = self._build_entity_type_classes(self.schemas) - - ftrack_api._centralized_storage_scenario.register(self) - - self._configure_locations() - self.event_hub.publish( - ftrack_api.event.base.Event( - topic='ftrack.api.session.ready', - data=dict( - session=self - ) - ), - synchronous=True - ) diff --git a/pype/ftrack/ftrack_server/socket_thread.py b/pype/ftrack/ftrack_server/socket_thread.py index d0a2868743..8e217870ba 100644 --- a/pype/ftrack/ftrack_server/socket_thread.py +++ b/pype/ftrack/ftrack_server/socket_thread.py @@ -1,7 +1,6 @@ import os import sys import time -import signal import socket import threading import subprocess @@ -10,7 +9,9 @@ from pypeapp import Logger class SocketThread(threading.Thread): """Thread that checks suprocess of storer of processor of events""" + MAX_TIMEOUT = 35 + def __init__(self, name, port, filepath): super(SocketThread, self).__init__() self.log = Logger().get_logger("SocketThread", "Event Thread") @@ -26,6 +27,8 @@ class SocketThread(threading.Thread): self.mongo_error = False + self._temp_data = {} + def stop(self): self._is_running = False @@ -50,8 +53,7 @@ class SocketThread(threading.Thread): ) self.subproc = subprocess.Popen( - ["python", self.filepath, "-port", str(self.port)], - stdout=subprocess.PIPE + [sys.executable, self.filepath, "-port", str(self.port)] ) # Listen for incoming connections @@ -81,8 +83,9 @@ class SocketThread(threading.Thread): try: if not self._is_running: break + data = None try: - data = connection.recv(16) + data = self.get_data_from_con(connection) time_con = time.time() except socket.timeout: @@ -99,10 +102,7 @@ class SocketThread(threading.Thread): self._is_running = False break - if data: - if data == b"MongoError": - self.mongo_error = True - connection.sendall(data) + self._handle_data(connection, data) except Exception as exc: self.log.error( @@ -115,9 +115,15 @@ class SocketThread(threading.Thread): if self.subproc.poll() is None: self.subproc.terminate() - lines = self.subproc.stdout.readlines() - if lines: - print("*** Socked Thread stdout ***") - for line in lines: - os.write(1, line) self.finished = True + + def get_data_from_con(self, connection): + return connection.recv(16) + + def _handle_data(self, connection, data): + if not data: + return + + if data == b"MongoError": + self.mongo_error = True + connection.sendall(data) diff --git a/pype/ftrack/ftrack_server/sub_event_processor.py b/pype/ftrack/ftrack_server/sub_event_processor.py index 6ada787223..9c971ca916 100644 --- a/pype/ftrack/ftrack_server/sub_event_processor.py +++ b/pype/ftrack/ftrack_server/sub_event_processor.py @@ -1,12 +1,9 @@ -import os import sys -import datetime import signal import socket -import pymongo from ftrack_server import FtrackServer -from pype.ftrack.ftrack_server.session_processor import ProcessSession +from pype.ftrack.ftrack_server.lib import SocketSession, ProcessEventHub from pypeapp import Logger log = Logger().get_logger("Event processor") @@ -24,12 +21,14 @@ def main(args): sock.sendall(b"CreatedProcess") try: - session = ProcessSession(auto_connect_event_hub=True, sock=sock) - server = FtrackServer('event') + session = SocketSession( + auto_connect_event_hub=True, sock=sock, Eventhub=ProcessEventHub + ) + server = FtrackServer("event") log.debug("Launched Ftrack Event processor") server.run_server(session) - except Exception as exc: + except Exception: log.error("Event server crashed. See traceback below", exc_info=True) finally: diff --git a/pype/ftrack/ftrack_server/sub_event_storer.py b/pype/ftrack/ftrack_server/sub_event_storer.py index aaaf63accd..dfe8e21654 100644 --- a/pype/ftrack/ftrack_server/sub_event_storer.py +++ b/pype/ftrack/ftrack_server/sub_event_storer.py @@ -5,16 +5,24 @@ import signal import socket import pymongo +import ftrack_api from ftrack_server import FtrackServer -from pype.ftrack.ftrack_server.lib import get_ftrack_event_mongo_info +from pype.ftrack.ftrack_server.lib import ( + get_ftrack_event_mongo_info, + SocketSession, + StorerEventHub +) from pype.ftrack.lib.custom_db_connector import DbConnector -from session_storer import StorerSession from pypeapp import Logger log = Logger().get_logger("Event storer") -url, database, table_name = get_ftrack_event_mongo_info() +class SessionFactory: + session = None + + +url, database, table_name = get_ftrack_event_mongo_info() dbcon = DbConnector( mongo_url=url, database_name=database, @@ -24,10 +32,11 @@ dbcon = DbConnector( # ignore_topics = ["ftrack.meta.connected"] ignore_topics = [] + def install_db(): try: dbcon.install() - dbcon._database.collection_names() + dbcon._database.list_collection_names() except pymongo.errors.AutoReconnect: log.error("Mongo server \"{}\" is not responding, exiting.".format( os.environ["AVALON_MONGO"] @@ -49,7 +58,7 @@ def launch(event): try: # dbcon.insert_one(event_data) - dbcon.update({"id": event_id}, event_data, upsert=True) + dbcon.replace_one({"id": event_id}, event_data, upsert=True) log.debug("Event: {} stored".format(event_id)) except pymongo.errors.AutoReconnect: @@ -65,10 +74,75 @@ def launch(event): ) +def trigger_sync(event): + session = SessionFactory.session + source_id = event.get("source", {}).get("id") + if not source_id or source_id != session.event_hub.id: + return + + if session is None: + log.warning("Session is not set. Can't trigger Sync to avalon action.") + return True + + projects = session.query("Project").all() + if not projects: + return True + + query = { + "pype_data.is_processed": False, + "topic": "ftrack.action.launch", + "data.actionIdentifier": "sync.to.avalon.server" + } + set_dict = { + "$set": {"pype_data.is_processed": True} + } + dbcon.update_many(query, set_dict) + + selections = [] + for project in projects: + if project["status"] != "active": + continue + + auto_sync = project["custom_attributes"].get("avalon_auto_sync") + if not auto_sync: + continue + + selections.append({ + "entityId": project["id"], + "entityType": "show" + }) + + if not selections: + return + + user = session.query( + "User where username is \"{}\"".format(session.api_user) + ).one() + user_data = { + "username": user["username"], + "id": user["id"] + } + + for selection in selections: + event_data = { + "actionIdentifier": "sync.to.avalon.server", + "selection": [selection] + } + session.event_hub.publish( + ftrack_api.event.base.Event( + topic="ftrack.action.launch", + data=event_data, + source=dict(user=user_data) + ), + on_error="ignore" + ) + + def register(session): '''Registers the event, subscribing the discover and launch topics.''' install_db() session.event_hub.subscribe("topic=*", launch) + session.event_hub.subscribe("topic=pype.storer.started", trigger_sync) def main(args): @@ -84,7 +158,10 @@ def main(args): sock.sendall(b"CreatedStore") try: - session = StorerSession(auto_connect_event_hub=True, sock=sock) + session = SocketSession( + auto_connect_event_hub=True, sock=sock, Eventhub=StorerEventHub + ) + SessionFactory.session = session register(session) server = FtrackServer("event") log.debug("Launched Ftrack Event storer") diff --git a/pype/ftrack/ftrack_server/sub_legacy_server.py b/pype/ftrack/ftrack_server/sub_legacy_server.py index 31f38d0404..8b7bab5e2e 100644 --- a/pype/ftrack/ftrack_server/sub_legacy_server.py +++ b/pype/ftrack/ftrack_server/sub_legacy_server.py @@ -1,4 +1,3 @@ -import os import sys import time import datetime @@ -7,7 +6,6 @@ import threading from ftrack_server import FtrackServer import ftrack_api -from ftrack_api.event.hub import EventHub from pypeapp import Logger log = Logger().get_logger("Event Server Legacy") @@ -37,7 +35,10 @@ class TimerChecker(threading.Thread): if not self.session.event_hub.connected: if not connected: - if (datetime.datetime.now() - start).seconds > self.max_time_out: + if ( + (datetime.datetime.now() - start).seconds > + self.max_time_out + ): log.error(( "Exiting event server. Session was not connected" " to ftrack server in {} seconds." @@ -61,7 +62,7 @@ class TimerChecker(threading.Thread): def main(args): check_thread = None try: - server = FtrackServer('event') + server = FtrackServer("event") session = ftrack_api.Session(auto_connect_event_hub=True) check_thread = TimerChecker(server, session) diff --git a/pype/ftrack/ftrack_server/sub_user_server.py b/pype/ftrack/ftrack_server/sub_user_server.py new file mode 100644 index 0000000000..f0d39447a8 --- /dev/null +++ b/pype/ftrack/ftrack_server/sub_user_server.py @@ -0,0 +1,56 @@ +import sys +import signal +import socket + +import traceback + +from ftrack_server import FtrackServer +from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub + +from pypeapp import Logger + +log = Logger().get_logger("FtrackUserServer") + + +def main(args): + port = int(args[-1]) + + # Create a TCP/IP socket + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + + # Connect the socket to the port where the server is listening + server_address = ("localhost", port) + log.debug( + "User Ftrack Server connected to {} port {}".format(*server_address) + ) + sock.connect(server_address) + sock.sendall(b"CreatedUser") + + try: + session = SocketSession( + auto_connect_event_hub=True, sock=sock, Eventhub=UserEventHub + ) + server = FtrackServer("action") + log.debug("Launched User Ftrack Server") + server.run_server(session=session) + except Exception: + traceback.print_exception(*sys.exc_info()) + + finally: + log.debug("Closing socket") + sock.close() + return 1 + + +if __name__ == "__main__": + # Register interupt signal + def signal_handler(sig, frame): + log.info( + "Process was forced to stop. Process ended." + ) + sys.exit(0) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + sys.exit(main(sys.argv)) diff --git a/pype/ftrack/lib/__init__.py b/pype/ftrack/lib/__init__.py index 6198230e57..9af9ded943 100644 --- a/pype/ftrack/lib/__init__.py +++ b/pype/ftrack/lib/__init__.py @@ -1,4 +1,4 @@ -from .avalon_sync import * +from . import avalon_sync from .credentials import * from .ftrack_app_handler import * from .ftrack_event_handler import * diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 0baf99d2cf..f08dc73c19 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -1,369 +1,82 @@ import os import re -import json -from pype.lib import get_avalon_database -from bson.objectid import ObjectId +import queue +import collections + +from pype.ftrack.lib.io_nonsingleton import DbConnector + import avalon import avalon.api -from avalon import schema -from avalon.vendor import toml, jsonschema -from pypeapp import Logger, Anatomy, config +from avalon.vendor import toml +from pypeapp import Logger, Anatomy + +from bson.objectid import ObjectId +from bson.errors import InvalidId +from pymongo import UpdateOne +import ftrack_api -ValidationError = jsonschema.ValidationError log = Logger().get_logger(__name__) -def get_ca_mongoid(): - # returns name of Custom attribute that stores mongo_id - return 'avalon_mongo_id' +# Current schemas for avalon types +EntitySchemas = { + "project": "avalon-core:project-2.0", + "asset": "avalon-core:asset-3.0", + "config": "avalon-core:config-1.0" +} + +# name of Custom attribute that stores mongo_id from avalon db +CustAttrIdKey = "avalon_mongo_id" +CustAttrAutoSync = "avalon_auto_sync" -def import_to_avalon( - session, entity, ft_project, av_project, custom_attributes -): - database = get_avalon_database() - project_name = ft_project['full_name'] - output = {} - errors = [] +def check_regex(name, entity_type, in_schema=None, schema_patterns=None): + schema_name = "asset-3.0" + if in_schema: + schema_name = in_schema + elif entity_type == "project": + schema_name = "project-2.0" + elif entity_type == "task": + schema_name = "task" - entity_type = entity.entity_type - ent_path = "/".join([ent["name"] for ent in entity['link']]) - - log.debug("{} [{}] - Processing".format(ent_path, entity_type)) - - ca_mongoid = get_ca_mongoid() - # Validate if entity has custom attribute avalon_mongo_id - if ca_mongoid not in entity['custom_attributes']: - msg = ( - 'Custom attribute "{}" for "{}" is not created' - ' or don\'t have set permissions for API' - ).format(ca_mongoid, entity['name']) - log.error(msg) - errors.append({'Custom attribute error': msg}) - output['errors'] = errors - return output - - # Validate if entity name match REGEX in schema - avalon_check_name(entity) - - entity_type = entity.entity_type - # Project //////////////////////////////////////////////////////////////// - if entity_type in ['Project']: - type = 'project' - - proj_config = get_project_config(entity) - schema.validate(proj_config) - - av_project_code = None - if av_project is not None and 'code' in av_project['data']: - av_project_code = av_project['data']['code'] - ft_project_code = ft_project['name'] - - if av_project is None: - log.debug("{} - Creating project".format(project_name)) - item = { - 'schema': "avalon-core:project-2.0", - 'type': type, - 'name': project_name, - 'data': dict(), - 'config': proj_config, - 'parent': None, - } - schema.validate(item) - - database[project_name].insert_one(item) - - av_project = database[project_name].find_one( - {'type': type} - ) - - elif ( - av_project['name'] != project_name or - ( - av_project_code is not None and - av_project_code != ft_project_code - ) - ): - msg = ( - 'You can\'t change {0} "{1}" to "{2}"' - ', avalon wouldn\'t work properly!' - '\n{0} was changed back!' - ) - if av_project['name'] != project_name: - entity['full_name'] = av_project['name'] - errors.append( - {'Changed name error': msg.format( - 'Project name', av_project['name'], project_name - )} - ) - - if ( - av_project_code is not None and - av_project_code != ft_project_code - ): - log.warning(( - "{0} - Project code" - " is different in Avalon (\"{1}\")" - " that in Ftrack (\"{2}\")!" - " Trying to change it back in Ftrack to \"{1}\"." - ).format( - ent_path, str(av_project_code), str(ft_project_code) - )) - - entity['name'] = av_project_code - errors.append( - {'Changed name error': msg.format( - 'Project code', av_project_code, ft_project_code - )} - ) - - try: - session.commit() - log.info(( - "{} - Project code was changed back to \"{}\"" - ).format(ent_path, str(av_project_code))) - except Exception: - log.error( - ( - "{} - Couldn't change project code back to \"{}\"." - ).format(ent_path, str(av_project_code)), - exc_info=True - ) - - output['errors'] = errors - return output + name_pattern = None + if schema_patterns is not None: + name_pattern = schema_patterns.get(schema_name) + if not name_pattern: + default_pattern = "^[a-zA-Z0-9_.]*$" + schema_obj = avalon.schema._cache.get(schema_name + ".json") + if not schema_obj: + name_pattern = default_pattern else: - # not override existing templates! - templates = av_project['config'].get('template', None) - if templates is not None: - for key, value in proj_config['template'].items(): - if ( - key in templates and - templates[key] is not None and - templates[key] != value - ): - proj_config['template'][key] = templates[key] - - projectId = av_project['_id'] - - data = get_data( - entity, session, custom_attributes - ) - - cur_data = av_project.get('data') or {} - - enter_data = {} - for k, v in cur_data.items(): - enter_data[k] = v - for k, v in data.items(): - enter_data[k] = v - - log.debug("{} - Updating data".format(ent_path)) - database[project_name].update_many( - {'_id': ObjectId(projectId)}, - {'$set': { - 'name': project_name, - 'config': proj_config, - 'data': data - }} - ) - - entity['custom_attributes'][ca_mongoid] = str(projectId) - session.commit() - - output['project'] = av_project - - return output - - # Asset - ///////////////////////////////////////////////////////////// - if av_project is None: - result = import_to_avalon( - session, ft_project, ft_project, av_project, custom_attributes - ) - - if 'errors' in result: - output['errors'] = result['errors'] - return output - - elif 'project' not in result: - msg = 'During project import went something wrong' - errors.append({'Unexpected error': msg}) - output['errors'] = errors - return output - - av_project = result['project'] - output['project'] = result['project'] - - projectId = av_project['_id'] - data = get_data( - entity, session, custom_attributes - ) - - name = entity['name'] - - avalon_asset = None - # existence of this custom attr is already checked - if ca_mongoid not in entity['custom_attributes']: - msg = ( - "Entity type \"{}\" don't have created custom attribute \"{}\"" - " or user \"{}\" don't have permissions to read or change it." - ).format(entity_type, ca_mongoid, session.api_user) - - log.error(msg) - errors.append({'Missing Custom attribute': msg}) - output['errors'] = errors - return output - - mongo_id = entity['custom_attributes'][ca_mongoid] - mongo_id = mongo_id.replace(' ', '').replace('\n', '') - try: - ObjectId(mongo_id) - except Exception: - mongo_id = '' - - if mongo_id != '': - avalon_asset = database[project_name].find_one( - {'_id': ObjectId(mongo_id)} - ) - - if avalon_asset is None: - avalon_asset = database[project_name].find_one( - {'type': 'asset', 'name': name} - ) - if avalon_asset is None: - item = { - 'schema': "avalon-core:asset-3.0", - 'name': name, - 'parent': ObjectId(projectId), - 'type': 'asset', - 'data': data - } - schema.validate(item) - mongo_id = database[project_name].insert_one(item).inserted_id - log.debug("{} - Created in project \"{}\"".format( - ent_path, project_name - )) - # Raise error if it seems to be different ent. with same name - elif avalon_asset['data']['parents'] != data['parents']: - msg = ( - "{} - In Avalon DB already exists entity with name \"{}\"" - "\n- \"{}\"" - ).format(ent_path, name, "/".join(db_asset_path_items)) - log.error(msg) - errors.append({'Entity name duplication': msg}) - output['errors'] = errors - return output - - # Store new ID (in case that asset was removed from DB) - else: - mongo_id = avalon_asset['_id'] - else: - if avalon_asset['name'] != entity['name']: - if changeability_check_childs(entity) is False: - msg = ( - '{} - You can\'t change name "{}" to "{}"' - ', avalon wouldn\'t work properly!' - '\n\nName was changed back!' - '\n\nCreate new entity if you want to change name.' - ).format(ent_path, avalon_asset['name'], entity['name']) - - log.warning(msg) - entity['name'] = avalon_asset['name'] - session.commit() - errors.append({'Changed name error': msg}) - - if avalon_asset['data']['parents'] != data['parents']: - old_path = '/'.join(avalon_asset['data']['parents']) - new_path = '/'.join(data['parents']) - - msg = ( - 'You can\'t move with entities.' - '\nEntity "{}" was moved from "{}" to "{}"' - '\n\nAvalon won\'t work properly, {}!' + name_pattern = schema_obj.get( + "properties", {}).get( + "name", {}).get( + "pattern", default_pattern ) + if schema_patterns is not None: + schema_patterns[schema_name] = name_pattern - moved_back = False - if 'visualParent' in avalon_asset['data']: - asset_parent_id = avalon_asset['data']['visualParent'] or avalon_asset['parent'] - - asset_parent = database[project_name].find_one( - {'_id': ObjectId(asset_parent_id)} - ) - ft_parent_id = asset_parent['data']['ftrackId'] - try: - entity['parent_id'] = ft_parent_id - session.commit() - msg = msg.format( - avalon_asset['name'], old_path, new_path, - 'entity was moved back' - ) - log.warning(msg) - moved_back = True - - except Exception: - moved_back = False - - if moved_back is False: - msg = msg.format( - avalon_asset['name'], old_path, new_path, - 'please move it back' - ) - log.error(msg) - - errors.append({'Hierarchy change error': msg}) - - if len(errors) > 0: - output['errors'] = errors - return output - - avalon_asset = database[project_name].find_one( - {'_id': ObjectId(mongo_id)} - ) - - cur_data = avalon_asset.get('data') or {} - - enter_data = {} - for k, v in cur_data.items(): - enter_data[k] = v - for k, v in data.items(): - enter_data[k] = v - - database[project_name].update_many( - {'_id': ObjectId(mongo_id)}, - {'$set': { - 'name': name, - 'data': enter_data, - 'parent': ObjectId(projectId) - }}) - log.debug("{} - Updated data (in project \"{}\")".format( - ent_path, project_name - )) - entity['custom_attributes'][ca_mongoid] = str(mongo_id) - session.commit() - - return output + if re.match(name_pattern, name): + return True + return False -def get_avalon_attr(session, split_hierarchical=False): +def get_avalon_attr(session, split_hierarchical=True): custom_attributes = [] hier_custom_attributes = [] cust_attrs_query = ( - "select id, entity_type, object_type_id, is_hierarchical" + "select id, entity_type, object_type_id, is_hierarchical, default" " from CustomAttributeConfiguration" " where group.name = \"avalon\"" ) all_avalon_attr = session.query(cust_attrs_query).all() for cust_attr in all_avalon_attr: - if 'avalon_' in cust_attr['key']: + if split_hierarchical and cust_attr["is_hierarchical"]: + hier_custom_attributes.append(cust_attr) continue - if split_hierarchical: - if cust_attr["is_hierarchical"]: - hier_custom_attributes.append(cust_attr) - continue - custom_attributes.append(cust_attr) if split_hierarchical: @@ -373,256 +86,2192 @@ def get_avalon_attr(session, split_hierarchical=False): return custom_attributes -def changeability_check_childs(entity): - if (entity.entity_type.lower() != 'task' and 'children' not in entity): - return True - childs = entity['children'] - for child in childs: - if child.entity_type.lower() == 'task': - available_statuses = config.get_presets().get( - "ftrack", {}).get( - "ftrack_config", {}).get( - "sync_to_avalon", {}).get( - "statuses_name_change", [] - ) - ent_status = child['status']['name'].lower() - if ent_status not in available_statuses: - return False - # If not task go deeper - elif changeability_check_childs(child) is False: - return False - # If everything is allright - return True +def from_dict_to_set(data): + result = {"$set": {}} + dict_queue = queue.Queue() + dict_queue.put((None, data)) + + while not dict_queue.empty(): + _key, _data = dict_queue.get() + for key, value in _data.items(): + new_key = key + if _key is not None: + new_key = "{}.{}".format(_key, key) + + if not isinstance(value, dict): + result["$set"][new_key] = value + continue + dict_queue.put((new_key, value)) + return result -def get_data(entity, session, custom_attributes): - database = get_avalon_database() - - entity_type = entity.entity_type - - if entity_type.lower() == 'project': - ft_project = entity - elif entity_type.lower() != 'project': - ft_project = entity['project'] - av_project = get_avalon_project(ft_project) - - project_name = ft_project['full_name'] - - data = {} - data['ftrackId'] = entity['id'] - data['entityType'] = entity_type - - ent_types_query = "select id, name from ObjectType" - ent_types = session.query(ent_types_query).all() - ent_types_by_name = { - ent_type["name"]: ent_type["id"] for ent_type in ent_types - } - - for cust_attr in custom_attributes: - # skip hierarchical attributes - if cust_attr.get('is_hierarchical', False): - continue - - key = cust_attr['key'] - if cust_attr['entity_type'].lower() in ['asset']: - data[key] = entity['custom_attributes'][key] - - elif ( - cust_attr['entity_type'].lower() in ['show'] and - entity_type.lower() == 'project' - ): - data[key] = entity['custom_attributes'][key] - - elif ( - cust_attr['entity_type'].lower() in ['task'] and - entity_type.lower() != 'project' - ): - # Put space between capitals (e.g. 'AssetBuild' -> 'Asset Build') - entity_type_full = re.sub(r"(\w)([A-Z])", r"\1 \2", entity_type) - # Get object id of entity type - ent_obj_type_id = ent_types_by_name.get(entity_type_full) - - # Backup soluction when id is not found by prequeried objects - if not ent_obj_type_id: - query = 'ObjectType where name is "{}"'.format( - entity_type_full - ) - ent_obj_type_id = session.query(query).one()['id'] - - if cust_attr['object_type_id'] == ent_obj_type_id: - if key in entity['custom_attributes']: - data[key] = entity['custom_attributes'][key] - - if entity_type in ['Project']: - data['code'] = entity['name'] - return data - - # Get info for 'Data' in Avalon DB - tasks = [] - for child in entity['children']: - if child.entity_type in ['Task']: - tasks.append(child['name']) - - # Get list of parents without project - parents = [] - folderStruct = [] - for i in range(1, len(entity['link'])-1): - parEnt = session.get( - entity['link'][i]['type'], - entity['link'][i]['id'] - ) - parName = parEnt['name'] - folderStruct.append(parName) - parents.append(parEnt) - - parentId = None - - for parent in parents: - parentId = database[project_name].find_one( - {'type': 'asset', 'name': parName} - )['_id'] - if parent['parent'].entity_type != 'project' and parentId is None: - import_to_avalon( - session, parent, ft_project, av_project, custom_attributes - ) - parentId = database[project_name].find_one( - {'type': 'asset', 'name': parName} - )['_id'] - - hierarchy = "" - if len(folderStruct) > 0: - hierarchy = os.path.sep.join(folderStruct) - - data['visualParent'] = parentId - data['parents'] = folderStruct - data['tasks'] = tasks - data['hierarchy'] = hierarchy - - return data - - -def get_avalon_project(ft_project): - database = get_avalon_database() - project_name = ft_project['full_name'] - ca_mongoid = get_ca_mongoid() - if ca_mongoid not in ft_project['custom_attributes']: - return None - - # try to find by Id - project_id = ft_project['custom_attributes'][ca_mongoid] - try: - avalon_project = database[project_name].find_one({ - '_id': ObjectId(project_id) - }) - except Exception: - avalon_project = None - - if avalon_project is None: - avalon_project = database[project_name].find_one({ - 'type': 'project' - }) - - return avalon_project - - -def get_avalon_project_template(): +def get_avalon_project_template(project_name): """Get avalon template - Returns: dictionary with templates """ - templates = Anatomy().templates + templates = Anatomy(project_name).templates return { - 'workfile': templates["avalon"]["workfile"], - 'work': templates["avalon"]["work"], - 'publish': templates["avalon"]["publish"] + "workfile": templates["avalon"]["workfile"], + "work": templates["avalon"]["work"], + "publish": templates["avalon"]["publish"] } -def get_project_config(entity): - proj_config = {} - proj_config['schema'] = 'avalon-core:config-1.0' - proj_config['tasks'] = get_tasks(entity) - proj_config['apps'] = get_project_apps(entity) - proj_config['template'] = get_avalon_project_template() - - return proj_config - - -def get_tasks(project): - task_types = project['project_schema']['_task_type_schema']['types'] - return [{'name': task_type['name']} for task_type in task_types] - - -def get_project_apps(entity): - """ Get apps from project - Requirements: - 'Entity' MUST be object of ftrack entity with entity_type 'Project' - Checking if app from ftrack is available in Templates/bin/{app_name}.toml - - Returns: - Array with dictionaries with app Name and Label - """ +def get_project_apps(in_app_list): apps = [] - for app in entity['custom_attributes']['applications']: + # TODO report + missing_toml_msg = "Missing config file for application" + error_msg = ( + "Unexpected error happend during preparation of application" + ) + warnings = collections.defaultdict(list) + for app in in_app_list: try: toml_path = avalon.lib.which_app(app) if not toml_path: - log.warning(( - 'Missing config file for application "{}"' - ).format(app)) + log.warning(missing_toml_msg + ' "{}"'.format(app)) + warnings[missing_toml_msg].append(app) continue apps.append({ - 'name': app, - 'label': toml.load(toml_path)['label'] + "name": app, + "label": toml.load(toml_path)["label"] + }) + except Exception: + warnings[error_msg].append(app) + log.warning(( + "Error has happened during preparing application \"{}\"" + ).format(app), exc_info=True) + return apps, warnings + + +def get_hierarchical_attributes(session, entity, attr_names, attr_defaults={}): + entity_ids = [] + if entity.entity_type.lower() == "project": + entity_ids.append(entity["id"]) + else: + typed_context = session.query(( + "select ancestors.id, project from TypedContext where id is \"{}\"" + ).format(entity["id"])).one() + entity_ids.append(typed_context["id"]) + entity_ids.extend( + [ent["id"] for ent in reversed(typed_context["ancestors"])] + ) + entity_ids.append(typed_context["project"]["id"]) + + missing_defaults = [] + for attr_name in attr_names: + if attr_name not in attr_defaults: + missing_defaults.append(attr_name) + + join_ent_ids = ", ".join( + ["\"{}\"".format(entity_id) for entity_id in entity_ids] + ) + join_attribute_names = ", ".join( + ["\"{}\"".format(key) for key in attr_names] + ) + queries = [] + queries.append({ + "action": "query", + "expression": ( + "select value, entity_id from CustomAttributeValue " + "where entity_id in ({}) and configuration.key in ({})" + ).format(join_ent_ids, join_attribute_names) + }) + + if not missing_defaults: + if hasattr(session, "call"): + [values] = session.call(queries) + else: + [values] = session._call(queries) + else: + join_missing_names = ", ".join( + ["\"{}\"".format(key) for key in missing_defaults] + ) + queries.append({ + "action": "query", + "expression": ( + "select default from CustomAttributeConfiguration " + "where key in ({})" + ).format(join_missing_names) + }) + + [values, default_values] = session.call(queries) + for default_value in default_values: + key = default_value["data"][0]["key"] + attr_defaults[key] = default_value["data"][0]["default"] + + hier_values = {} + for key, val in attr_defaults.items(): + hier_values[key] = val + + if not values["data"]: + return hier_values + + _hier_values = collections.defaultdict(list) + for value in values["data"]: + key = value["configuration"]["key"] + _hier_values[key].append(value) + + for key, values in _hier_values.items(): + value = sorted( + values, key=lambda value: entity_ids.index(value["entity_id"]) + )[0] + hier_values[key] = value["value"] + + return hier_values + + +class SyncEntitiesFactory: + dbcon = DbConnector() + + project_query = ( + "select full_name, name, custom_attributes" + ", project_schema._task_type_schema.types.name" + " from Project where full_name is \"{}\"" + ) + entities_query = ( + "select id, name, parent_id, link" + " from TypedContext where project_id is \"{}\"" + ) + ignore_custom_attr_key = "avalon_ignore_sync" + + report_splitter = {"type": "label", "value": "---"} + + def __init__(self, log_obj, session): + self.log = log_obj + self._server_url = session.server_url + self._api_key = session.api_key + self._api_user = session.api_user + + def launch_setup(self, project_full_name): + try: + self.session.close() + except Exception: + pass + + self.session = ftrack_api.Session( + server_url=self._server_url, + api_key=self._api_key, + api_user=self._api_user, + auto_connect_event_hub=True + ) + + self.duplicates = {} + self.failed_regex = {} + self.tasks_failed_regex = collections.defaultdict(list) + self.report_items = { + "info": collections.defaultdict(list), + "warning": collections.defaultdict(list), + "error": collections.defaultdict(list) + } + + self.create_list = [] + self.updates = collections.defaultdict(dict) + + self.avalon_project = None + self.avalon_entities = None + + self._avalon_ents_by_id = None + self._avalon_ents_by_ftrack_id = None + self._avalon_ents_by_name = None + self._avalon_ents_by_parent_id = None + + self._avalon_archived_ents = None + self._avalon_archived_by_id = None + self._avalon_archived_by_parent_id = None + self._avalon_archived_by_name = None + + self._subsets_by_parent_id = None + self._changeability_by_mongo_id = None + + self.all_filtered_entities = {} + self.filtered_ids = [] + self.not_selected_ids = [] + + self._ent_paths_by_ftrack_id = {} + + self.ftrack_avalon_mapper = None + self.avalon_ftrack_mapper = None + self.create_ftrack_ids = None + self.update_ftrack_ids = None + self.deleted_entities = None + + # Get Ftrack project + ft_project = self.session.query( + self.project_query.format(project_full_name) + ).one() + ft_project_id = ft_project["id"] + + # Skip if project is ignored + if ft_project["custom_attributes"].get( + self.ignore_custom_attr_key + ) is True: + msg = ( + "Project \"{}\" has set `Ignore Sync` custom attribute to True" + ).format(project_full_name) + self.log.warning(msg) + return {"success": False, "message": msg} + + self.log.debug(( + "*** Synchronization initialization started <{}>." + ).format(project_full_name)) + # Check if `avalon_mongo_id` custom attribute exist or is accessible + if CustAttrIdKey not in ft_project["custom_attributes"]: + items = [] + items.append({ + "type": "label", + "value": "# Can't access Custom attribute <{}>".format( + CustAttrIdKey + ) + }) + items.append({ + "type": "label", + "value": ( + "

- Check if user \"{}\" has permissions" + " to access the Custom attribute

" + ).format(self._api_key) + }) + items.append({ + "type": "label", + "value": "

- Check if the Custom attribute exist

" + }) + return { + "items": items, + "title": "Synchronization failed", + "success": False, + "message": "Synchronization failed" + } + + # Find all entities in project + all_project_entities = self.session.query( + self.entities_query.format(ft_project_id) + ).all() + + # Store entities by `id` and `parent_id` + entities_dict = collections.defaultdict(lambda: { + "children": list(), + "parent_id": None, + "entity": None, + "entity_type": None, + "name": None, + "custom_attributes": {}, + "hier_attrs": {}, + "avalon_attrs": {}, + "tasks": [] + }) + + for entity in all_project_entities: + parent_id = entity["parent_id"] + entity_type = entity.entity_type + entity_type_low = entity_type.lower() + if entity_type_low == "task": + entities_dict[parent_id]["tasks"].append(entity["name"]) + continue + + entity_id = entity["id"] + entities_dict[entity_id].update({ + "entity": entity, + "parent_id": parent_id, + "entity_type": entity_type_low, + "entity_type_orig": entity_type, + "name": entity["name"] + }) + entities_dict[parent_id]["children"].append(entity_id) + + entities_dict[ft_project_id]["entity"] = ft_project + entities_dict[ft_project_id]["entity_type"] = ( + ft_project.entity_type.lower() + ) + entities_dict[ft_project_id]["entity_type_orig"] = ( + ft_project.entity_type + ) + entities_dict[ft_project_id]["name"] = ft_project["full_name"] + + self.ft_project_id = ft_project_id + self.entities_dict = entities_dict + + @property + def avalon_ents_by_id(self): + if self._avalon_ents_by_id is None: + self._avalon_ents_by_id = {} + for entity in self.avalon_entities: + self._avalon_ents_by_id[str(entity["_id"])] = entity + + return self._avalon_ents_by_id + + @property + def avalon_ents_by_ftrack_id(self): + if self._avalon_ents_by_ftrack_id is None: + self._avalon_ents_by_ftrack_id = {} + for entity in self.avalon_entities: + key = entity.get("data", {}).get("ftrackId") + if not key: + continue + self._avalon_ents_by_ftrack_id[key] = str(entity["_id"]) + + return self._avalon_ents_by_ftrack_id + + @property + def avalon_ents_by_name(self): + if self._avalon_ents_by_name is None: + self._avalon_ents_by_name = {} + for entity in self.avalon_entities: + self._avalon_ents_by_name[entity["name"]] = str(entity["_id"]) + + return self._avalon_ents_by_name + + @property + def avalon_ents_by_parent_id(self): + if self._avalon_ents_by_parent_id is None: + self._avalon_ents_by_parent_id = collections.defaultdict(list) + for entity in self.avalon_entities: + parent_id = entity["data"]["visualParent"] + if parent_id is not None: + parent_id = str(parent_id) + self._avalon_ents_by_parent_id[parent_id].append(entity) + + return self._avalon_ents_by_parent_id + + @property + def avalon_archived_ents(self): + if self._avalon_archived_ents is None: + self._avalon_archived_ents = [ + ent for ent in self.dbcon.find({"type": "archived_asset"}) + ] + return self._avalon_archived_ents + + @property + def avalon_archived_by_name(self): + if self._avalon_archived_by_name is None: + self._avalon_archived_by_name = collections.defaultdict(list) + for ent in self.avalon_archived_ents: + self._avalon_archived_by_name[ent["name"]].append(ent) + return self._avalon_archived_by_name + + @property + def avalon_archived_by_id(self): + if self._avalon_archived_by_id is None: + self._avalon_archived_by_id = { + str(ent["_id"]): ent for ent in self.avalon_archived_ents + } + return self._avalon_archived_by_id + + @property + def avalon_archived_by_parent_id(self): + if self._avalon_archived_by_parent_id is None: + self._avalon_archived_by_parent_id = collections.defaultdict(list) + for entity in self.avalon_archived_ents: + parent_id = entity["data"]["visualParent"] + if parent_id is not None: + parent_id = str(parent_id) + self._avalon_archived_by_parent_id[parent_id].append(entity) + + return self._avalon_archived_by_parent_id + + @property + def subsets_by_parent_id(self): + if self._subsets_by_parent_id is None: + self._subsets_by_parent_id = collections.defaultdict(list) + for subset in self.dbcon.find({"type": "subset"}): + self._subsets_by_parent_id[str(subset["parent"])].append( + subset + ) + + return self._subsets_by_parent_id + + @property + def changeability_by_mongo_id(self): + if self._changeability_by_mongo_id is None: + self._changeability_by_mongo_id = collections.defaultdict( + lambda: True + ) + self._changeability_by_mongo_id[self.avalon_project_id] = False + self._bubble_changeability(list(self.subsets_by_parent_id.keys())) + return self._changeability_by_mongo_id + + @property + def all_ftrack_names(self): + return [ + ent_dict["name"] for ent_dict in self.entities_dict.values() if ( + ent_dict.get("name") + ) + ] + + def duplicity_regex_check(self): + self.log.debug("* Checking duplicities and invalid symbols") + # Duplicity and regex check + entity_ids_by_name = {} + duplicates = [] + failed_regex = [] + task_names = {} + _schema_patterns = {} + for ftrack_id, entity_dict in self.entities_dict.items(): + regex_check = True + name = entity_dict["name"] + entity_type = entity_dict["entity_type"] + # Tasks must be checked too + for task_name in entity_dict["tasks"]: + passed = task_names.get(task_name) + if passed is None: + passed = check_regex( + task_name, "task", schema_patterns=_schema_patterns + ) + task_names[task_name] = passed + + if not passed: + self.tasks_failed_regex[task_name].append(ftrack_id) + + if name in entity_ids_by_name: + duplicates.append(name) + else: + entity_ids_by_name[name] = [] + regex_check = check_regex( + name, entity_type, schema_patterns=_schema_patterns + ) + + entity_ids_by_name[name].append(ftrack_id) + if not regex_check: + failed_regex.append(name) + + for name in failed_regex: + self.failed_regex[name] = entity_ids_by_name[name] + + for name in duplicates: + self.duplicates[name] = entity_ids_by_name[name] + + self.filter_by_duplicate_regex() + + def filter_by_duplicate_regex(self): + filter_queue = queue.Queue() + failed_regex_msg = "{} - Entity has invalid symbols in the name" + duplicate_msg = "There are multiple entities with the name: \"{}\":" + + for ids in self.failed_regex.values(): + for id in ids: + ent_path = self.get_ent_path(id) + self.log.warning(failed_regex_msg.format(ent_path)) + filter_queue.put(id) + + for name, ids in self.duplicates.items(): + self.log.warning(duplicate_msg.format(name)) + for id in ids: + ent_path = self.get_ent_path(id) + self.log.warning(ent_path) + filter_queue.put(id) + + filtered_ids = [] + while not filter_queue.empty(): + ftrack_id = filter_queue.get() + if ftrack_id in filtered_ids: + continue + + entity_dict = self.entities_dict.pop(ftrack_id, {}) + if not entity_dict: + continue + + self.all_filtered_entities[ftrack_id] = entity_dict + parent_id = entity_dict.get("parent_id") + if parent_id and parent_id in self.entities_dict: + if ftrack_id in self.entities_dict[parent_id]["children"]: + self.entities_dict[parent_id]["children"].remove(ftrack_id) + + filtered_ids.append(ftrack_id) + for child_id in entity_dict.get("children", []): + filter_queue.put(child_id) + + for name, ids in self.tasks_failed_regex.items(): + for id in ids: + if id not in self.entities_dict: + continue + self.entities_dict[id]["tasks"].remove(name) + ent_path = self.get_ent_path(id) + self.log.warning(failed_regex_msg.format( + "/".join([ent_path, name]) + )) + + def filter_by_ignore_sync(self): + # skip filtering if `ignore_sync` attribute do not exist + if self.entities_dict[self.ft_project_id]["avalon_attrs"].get( + self.ignore_custom_attr_key, "_notset_" + ) == "_notset_": + return + + self.filter_queue = queue.Queue() + self.filter_queue.put((self.ft_project_id, False)) + while not self.filter_queue.empty(): + parent_id, remove = self.filter_queue.get() + if remove: + parent_dict = self.entities_dict.pop(parent_id, {}) + self.all_filtered_entities[parent_id] = parent_dict + self.filtered_ids.append(parent_id) + else: + parent_dict = self.entities_dict.get(parent_id, {}) + + for child_id in parent_dict.get("children", []): + # keep original `remove` value for all childs + _remove = (remove is True) + if not _remove: + if self.entities_dict[child_id]["avalon_attrs"].get( + self.ignore_custom_attr_key + ): + self.entities_dict[parent_id]["children"].remove( + child_id + ) + _remove = True + self.filter_queue.put((child_id, _remove)) + + def filter_by_selection(self, event): + # BUGGY!!!! cause that entities are in deleted list + # TODO may be working when filtering happen after preparations + # - But this part probably does not have any functional reason + # - Time of synchronization probably won't be changed much + selected_ids = [] + for entity in event["data"]["selection"]: + # Skip if project is in selection + if entity["entityType"] == "show": + return + selected_ids.append(entity["entityId"]) + + sync_ids = [self.ft_project_id] + parents_queue = queue.Queue() + children_queue = queue.Queue() + for id in selected_ids: + # skip if already filtered with ignore sync custom attribute + if id in self.filtered_ids: + continue + + parents_queue.put(id) + children_queue.put(id) + + while not parents_queue.empty(): + id = parents_queue.get() + while True: + # Stops when parent is in sync_ids + if id in self.filtered_ids or id in sync_ids or id is None: + break + sync_ids.append(id) + id = self.entities_dict[id]["parent_id"] + + while not children_queue.empty(): + parent_id = children_queue.get() + for child_id in self.entities_dict[parent_id]["children"]: + if child_id in sync_ids or child_id in self.filtered_ids: + continue + sync_ids.append(child_id) + children_queue.put(child_id) + + # separate not selected and to process entities + for key, value in self.entities_dict.items(): + if key not in sync_ids: + self.not_selected_ids.append(key) + + for id in self.not_selected_ids: + # pop from entities + value = self.entities_dict.pop(id) + # remove entity from parent's children + parent_id = value["parent_id"] + if parent_id not in sync_ids: + continue + + self.entities_dict[parent_id]["children"].remove(id) + + def set_cutom_attributes(self): + self.log.debug("* Preparing custom attributes") + # Get custom attributes and values + custom_attrs, hier_attrs = get_avalon_attr(self.session) + ent_types = self.session.query("select id, name from ObjectType").all() + ent_types_by_name = { + ent_type["name"]: ent_type["id"] for ent_type in ent_types + } + + attrs = set() + # store default values per entity type + attrs_per_entity_type = collections.defaultdict(dict) + avalon_attrs = collections.defaultdict(dict) + # store also custom attribute configuration id for future use (create) + attrs_per_entity_type_ca_id = collections.defaultdict(dict) + avalon_attrs_ca_id = collections.defaultdict(dict) + + for cust_attr in custom_attrs: + key = cust_attr["key"] + attrs.add(cust_attr["id"]) + ca_ent_type = cust_attr["entity_type"] + if key.startswith("avalon_"): + if ca_ent_type == "show": + avalon_attrs[ca_ent_type][key] = cust_attr["default"] + avalon_attrs_ca_id[ca_ent_type][key] = cust_attr["id"] + elif ca_ent_type == "task": + obj_id = cust_attr["object_type_id"] + avalon_attrs[obj_id][key] = cust_attr["default"] + avalon_attrs_ca_id[obj_id][key] = cust_attr["id"] + continue + + if ca_ent_type == "show": + attrs_per_entity_type[ca_ent_type][key] = cust_attr["default"] + attrs_per_entity_type_ca_id[ca_ent_type][key] = cust_attr["id"] + elif ca_ent_type == "task": + obj_id = cust_attr["object_type_id"] + attrs_per_entity_type[obj_id][key] = cust_attr["default"] + attrs_per_entity_type_ca_id[obj_id][key] = cust_attr["id"] + + obj_id_ent_type_map = {} + sync_ids = [] + for entity_id, entity_dict in self.entities_dict.items(): + sync_ids.append(entity_id) + entity_type = entity_dict["entity_type"] + entity_type_orig = entity_dict["entity_type_orig"] + + if entity_type == "project": + attr_key = "show" + else: + map_key = obj_id_ent_type_map.get(entity_type_orig) + if not map_key: + # Put space between capitals + # (e.g. 'AssetBuild' -> 'Asset Build') + map_key = re.sub( + r"(\w)([A-Z])", r"\1 \2", entity_type_orig + ) + obj_id_ent_type_map[entity_type_orig] = map_key + + # Get object id of entity type + attr_key = ent_types_by_name.get(map_key) + + # Backup soluction when id is not found by prequeried objects + if not attr_key: + query = "ObjectType where name is \"{}\"".format(map_key) + attr_key = self.session.query(query).one()["id"] + ent_types_by_name[map_key] = attr_key + + prepared_attrs = attrs_per_entity_type.get(attr_key) + prepared_avalon_attr = avalon_attrs.get(attr_key) + prepared_attrs_ca_id = attrs_per_entity_type_ca_id.get(attr_key) + prepared_avalon_attr_ca_id = avalon_attrs_ca_id.get(attr_key) + if prepared_attrs: + self.entities_dict[entity_id]["custom_attributes"] = ( + prepared_attrs.copy() + ) + if prepared_attrs_ca_id: + self.entities_dict[entity_id]["custom_attributes_id"] = ( + prepared_attrs_ca_id.copy() + ) + if prepared_avalon_attr: + self.entities_dict[entity_id]["avalon_attrs"] = ( + prepared_avalon_attr.copy() + ) + if prepared_avalon_attr_ca_id: + self.entities_dict[entity_id]["avalon_attrs_id"] = ( + prepared_avalon_attr_ca_id.copy() + ) + + # TODO query custom attributes by entity_id + entity_ids_joined = ", ".join([ + "\"{}\"".format(id) for id in sync_ids + ]) + attributes_joined = ", ".join([ + "\"{}\"".format(name) for name in attrs + ]) + + cust_attr_query = ( + "select value, entity_id from ContextCustomAttributeValue " + "where entity_id in ({}) and configuration_id in ({})" + ) + call_expr = [{ + "action": "query", + "expression": cust_attr_query.format( + entity_ids_joined, attributes_joined + ) + }] + if hasattr(self.session, "call"): + [values] = self.session.call(call_expr) + else: + [values] = self.session._call(call_expr) + + for value in values["data"]: + entity_id = value["entity_id"] + key = value["configuration"]["key"] + store_key = "custom_attributes" + if key.startswith("avalon_"): + store_key = "avalon_attrs" + self.entities_dict[entity_id][store_key][key] = value["value"] + + # process hierarchical attributes + self.set_hierarchical_attribute(hier_attrs, sync_ids) + + def set_hierarchical_attribute(self, hier_attrs, sync_ids): + # collect all hierarchical attribute keys + # and prepare default values to project + attribute_names = [] + attribute_ids = [] + for attr in hier_attrs: + key = attr["key"] + attribute_ids.append(attr["id"]) + attribute_names.append(key) + + store_key = "hier_attrs" + if key.startswith("avalon_"): + store_key = "avalon_attrs" + + self.entities_dict[self.ft_project_id][store_key][key] = ( + attr["default"] + ) + + # Prepare dict with all hier keys and None values + prepare_dict = {} + prepare_dict_avalon = {} + for attr in attribute_names: + if attr.startswith("avalon_"): + prepare_dict_avalon[attr] = None + else: + prepare_dict[attr] = None + + for id, entity_dict in self.entities_dict.items(): + # Skip project because has stored defaults at the moment + if entity_dict["entity_type"] == "project": + continue + entity_dict["hier_attrs"] = prepare_dict.copy() + for key, val in prepare_dict_avalon.items(): + entity_dict["avalon_attrs"][key] = val + + # Prepare values to query + entity_ids_joined = ", ".join([ + "\"{}\"".format(id) for id in sync_ids + ]) + attributes_joined = ", ".join([ + "\"{}\"".format(name) for name in attribute_ids + ]) + call_expr = [{ + "action": "query", + "expression": ( + "select value, entity_id from CustomAttributeValue " + "where entity_id in ({}) and configuration_id in ({})" + ).format(entity_ids_joined, attributes_joined) + }] + if hasattr(self.session, "call"): + [values] = self.session.call(call_expr) + else: + [values] = self.session._call(call_expr) + + avalon_hier = [] + for value in values["data"]: + if value["value"] is None: + continue + entity_id = value["entity_id"] + key = value["configuration"]["key"] + store_key = "hier_attrs" + if key.startswith("avalon_"): + store_key = "avalon_attrs" + avalon_hier.append(key) + self.entities_dict[entity_id][store_key][key] = value["value"] + + # Get dictionary with not None hierarchical values to pull to childs + top_id = self.ft_project_id + project_values = {} + for key, value in self.entities_dict[top_id]["hier_attrs"].items(): + if value is not None: + project_values[key] = value + + for key in avalon_hier: + value = self.entities_dict[top_id]["avalon_attrs"][key] + if value is not None: + project_values[key] = value + + hier_down_queue = queue.Queue() + hier_down_queue.put((project_values, top_id)) + + while not hier_down_queue.empty(): + hier_values, parent_id = hier_down_queue.get() + for child_id in self.entities_dict[parent_id]["children"]: + _hier_values = hier_values.copy() + for name in attribute_names: + store_key = "hier_attrs" + if name.startswith("avalon_"): + store_key = "avalon_attrs" + value = self.entities_dict[child_id][store_key][name] + if value is not None: + _hier_values[name] = value + + self.entities_dict[child_id]["hier_attrs"].update(_hier_values) + hier_down_queue.put((_hier_values, child_id)) + + def remove_from_archived(self, mongo_id): + entity = self.avalon_archived_by_id.pop(mongo_id, None) + if not entity: + return + + if self._avalon_archived_ents is not None: + if entity in self._avalon_archived_ents: + self._avalon_archived_ents.remove(entity) + + if self._avalon_archived_by_name is not None: + name = entity["name"] + if name in self._avalon_archived_by_name: + name_ents = self._avalon_archived_by_name[name] + if entity in name_ents: + if len(name_ents) == 1: + self._avalon_archived_by_name.pop(name) + else: + self._avalon_archived_by_name[name].remove(entity) + + # TODO use custom None instead of __NOTSET__ + if self._avalon_archived_by_parent_id is not None: + parent_id = entity.get("data", {}).get( + "visualParent", "__NOTSET__" + ) + if parent_id is not None: + parent_id = str(parent_id) + + if parent_id in self._avalon_archived_by_parent_id: + parent_list = self._avalon_archived_by_parent_id[parent_id] + if entity not in parent_list: + self._avalon_archived_by_parent_id[parent_id].remove( + entity + ) + + def prepare_ftrack_ent_data(self): + not_set_ids = [] + for id, entity_dict in self.entities_dict.items(): + entity = entity_dict["entity"] + if entity is None: + not_set_ids.append(id) + continue + + self.entities_dict[id]["final_entity"] = {} + self.entities_dict[id]["final_entity"]["name"] = ( + entity_dict["name"] + ) + data = {} + data["ftrackId"] = entity["id"] + data["entityType"] = entity_dict["entity_type_orig"] + + for key, val in entity_dict.get("custom_attributes", []).items(): + data[key] = val + + for key, val in entity_dict.get("hier_attrs", []).items(): + data[key] = val + + if id == self.ft_project_id: + data["code"] = entity["name"] + self.entities_dict[id]["final_entity"]["data"] = data + self.entities_dict[id]["final_entity"]["type"] = "project" + + proj_schema = entity["project_schema"] + task_types = proj_schema["_task_type_schema"]["types"] + proj_apps, warnings = get_project_apps( + (data.get("applications") or []) + ) + for msg, items in warnings.items(): + if not msg or not items: + continue + self.report_items["warning"][msg] = items + + self.entities_dict[id]["final_entity"]["config"] = { + "tasks": [{"name": tt["name"]} for tt in task_types], + "apps": proj_apps + } + continue + + ent_path_items = [ent["name"] for ent in entity["link"]] + parents = ent_path_items[1:len(ent_path_items)-1:] + hierarchy = "" + if len(parents) > 0: + hierarchy = os.path.sep.join(parents) + + data["parents"] = parents + data["hierarchy"] = hierarchy + data["tasks"] = self.entities_dict[id].pop("tasks", []) + self.entities_dict[id]["final_entity"]["data"] = data + self.entities_dict[id]["final_entity"]["type"] = "asset" + + if not_set_ids: + self.log.debug(( + "- Debug information: Filtering bug, there are empty dicts" + "in entities dict (functionality should not be affected) <{}>" + ).format("| ".join(not_set_ids))) + for id in not_set_ids: + self.entities_dict.pop(id) + + def get_ent_path(self, ftrack_id): + ent_path = self._ent_paths_by_ftrack_id.get(ftrack_id) + if not ent_path: + entity = self.entities_dict[ftrack_id]["entity"] + ent_path = "/".join( + [ent["name"] for ent in entity["link"]] + ) + self._ent_paths_by_ftrack_id[ftrack_id] = ent_path + + return ent_path + + def prepare_avalon_entities(self, ft_project_name): + self.log.debug(( + "* Preparing avalon entities " + "(separate to Create, Update and Deleted groups)" + )) + # Avalon entities + self.dbcon.install() + self.dbcon.Session["AVALON_PROJECT"] = ft_project_name + avalon_project = self.dbcon.find_one({"type": "project"}) + avalon_entities = self.dbcon.find({"type": "asset"}) + self.avalon_project = avalon_project + self.avalon_entities = avalon_entities + + ftrack_avalon_mapper = {} + avalon_ftrack_mapper = {} + create_ftrack_ids = [] + update_ftrack_ids = [] + + same_mongo_id = [] + all_mongo_ids = {} + for ftrack_id, entity_dict in self.entities_dict.items(): + mongo_id = entity_dict["avalon_attrs"].get(CustAttrIdKey) + if not mongo_id: + continue + if mongo_id in all_mongo_ids: + same_mongo_id.append(mongo_id) + else: + all_mongo_ids[mongo_id] = [] + all_mongo_ids[mongo_id].append(ftrack_id) + + if avalon_project: + mongo_id = str(avalon_project["_id"]) + ftrack_avalon_mapper[self.ft_project_id] = mongo_id + avalon_ftrack_mapper[mongo_id] = self.ft_project_id + update_ftrack_ids.append(self.ft_project_id) + else: + create_ftrack_ids.append(self.ft_project_id) + + # make it go hierarchically + prepare_queue = queue.Queue() + + for child_id in self.entities_dict[self.ft_project_id]["children"]: + prepare_queue.put(child_id) + + while not prepare_queue.empty(): + ftrack_id = prepare_queue.get() + for child_id in self.entities_dict[ftrack_id]["children"]: + prepare_queue.put(child_id) + + entity_dict = self.entities_dict[ftrack_id] + ent_path = self.get_ent_path(ftrack_id) + + mongo_id = entity_dict["avalon_attrs"].get(CustAttrIdKey) + av_ent_by_mongo_id = self.avalon_ents_by_id.get(mongo_id) + if av_ent_by_mongo_id: + av_ent_ftrack_id = av_ent_by_mongo_id.get("data", {}).get( + "ftrackId" + ) + is_right = False + else_match_better = False + if av_ent_ftrack_id and av_ent_ftrack_id == ftrack_id: + is_right = True + + elif mongo_id not in same_mongo_id: + is_right = True + + else: + ftrack_ids_with_same_mongo = all_mongo_ids[mongo_id] + for _ftrack_id in ftrack_ids_with_same_mongo: + if _ftrack_id == av_ent_ftrack_id: + continue + + _entity_dict = self.entities_dict[_ftrack_id] + _mongo_id = _entity_dict["avalon_attrs"][CustAttrIdKey] + _av_ent_by_mongo_id = self.avalon_ents_by_id.get( + _mongo_id + ) + _av_ent_ftrack_id = _av_ent_by_mongo_id.get( + "data", {} + ).get("ftrackId") + if _av_ent_ftrack_id == ftrack_id: + else_match_better = True + break + + if not is_right and not else_match_better: + entity = entity_dict["entity"] + ent_path_items = [ent["name"] for ent in entity["link"]] + parents = ent_path_items[1:len(ent_path_items)-1:] + av_parents = av_ent_by_mongo_id["data"]["parents"] + if av_parents == parents: + is_right = True + else: + name = entity_dict["name"] + av_name = av_ent_by_mongo_id["name"] + if name == av_name: + is_right = True + + if is_right: + self.log.debug( + "Existing (by MongoID) <{}>".format(ent_path) + ) + ftrack_avalon_mapper[ftrack_id] = mongo_id + avalon_ftrack_mapper[mongo_id] = ftrack_id + update_ftrack_ids.append(ftrack_id) + continue + + mongo_id = self.avalon_ents_by_ftrack_id.get(ftrack_id) + if not mongo_id: + mongo_id = self.avalon_ents_by_name.get(entity_dict["name"]) + if mongo_id: + self.log.debug( + "Existing (by matching name) <{}>".format(ent_path) + ) + else: + self.log.debug( + "Existing (by FtrackID in mongo) <{}>".format(ent_path) + ) + + if mongo_id: + ftrack_avalon_mapper[ftrack_id] = mongo_id + avalon_ftrack_mapper[mongo_id] = ftrack_id + update_ftrack_ids.append(ftrack_id) + continue + + self.log.debug("New <{}>".format(ent_path)) + create_ftrack_ids.append(ftrack_id) + + deleted_entities = [] + for mongo_id in self.avalon_ents_by_id: + if mongo_id in avalon_ftrack_mapper: + continue + deleted_entities.append(mongo_id) + + av_ent = self.avalon_ents_by_id[mongo_id] + av_ent_path_items = [p for p in av_ent["data"]["parents"]] + av_ent_path_items.append(av_ent["name"]) + self.log.debug("Deleted <{}>".format("/".join(av_ent_path_items))) + + self.ftrack_avalon_mapper = ftrack_avalon_mapper + self.avalon_ftrack_mapper = avalon_ftrack_mapper + self.create_ftrack_ids = create_ftrack_ids + self.update_ftrack_ids = update_ftrack_ids + self.deleted_entities = deleted_entities + + self.log.debug(( + "Ftrack -> Avalon comparison: New <{}> " + "| Existing <{}> | Deleted <{}>" + ).format( + len(create_ftrack_ids), + len(update_ftrack_ids), + len(deleted_entities) + )) + + def filter_with_children(self, ftrack_id): + if ftrack_id not in self.entities_dict: + return + ent_dict = self.entities_dict[ftrack_id] + parent_id = ent_dict["parent_id"] + self.entities_dict[parent_id]["children"].remove(ftrack_id) + + children_queue = queue.Queue() + children_queue.put(ftrack_id) + while not children_queue.empty(): + _ftrack_id = children_queue.get() + entity_dict = self.entities_dict.pop(_ftrack_id, {"children": []}) + for child_id in entity_dict["children"]: + children_queue.put(child_id) + + def prepare_changes(self): + self.log.debug("* Preparing changes for avalon/ftrack") + hierarchy_changing_ids = [] + ignore_keys = collections.defaultdict(list) + + update_queue = queue.Queue() + for ftrack_id in self.update_ftrack_ids: + update_queue.put(ftrack_id) + + while not update_queue.empty(): + ftrack_id = update_queue.get() + if ftrack_id == self.ft_project_id: + changes = self.prepare_project_changes() + if changes: + self.updates[self.avalon_project_id] = changes + continue + + ftrack_ent_dict = self.entities_dict[ftrack_id] + + # *** check parents + parent_check = False + + ftrack_parent_id = ftrack_ent_dict["parent_id"] + avalon_id = self.ftrack_avalon_mapper[ftrack_id] + avalon_entity = self.avalon_ents_by_id[avalon_id] + avalon_parent_id = avalon_entity["data"]["visualParent"] + if avalon_parent_id is not None: + avalon_parent_id = str(avalon_parent_id) + + ftrack_parent_mongo_id = self.ftrack_avalon_mapper[ + ftrack_parent_id + ] + + # if parent is project + if (ftrack_parent_mongo_id == avalon_parent_id) or ( + ftrack_parent_id == self.ft_project_id and + avalon_parent_id is None + ): + parent_check = True + + # check name + ftrack_name = ftrack_ent_dict["name"] + avalon_name = avalon_entity["name"] + name_check = ftrack_name == avalon_name + + # IDEAL STATE: both parent and name check passed + if parent_check and name_check: + continue + + # If entity is changeable then change values of parent or name + if self.changeability_by_mongo_id[avalon_id]: + # TODO logging + if not parent_check: + if ftrack_parent_mongo_id == str(self.avalon_project_id): + new_parent_name = self.entities_dict[ + self.ft_project_id]["name"] + new_parent_id = None + else: + new_parent_name = self.avalon_ents_by_id[ + ftrack_parent_mongo_id]["name"] + new_parent_id = ObjectId(ftrack_parent_mongo_id) + + if avalon_parent_id == str(self.avalon_project_id): + old_parent_name = self.entities_dict[ + self.ft_project_id]["name"] + else: + old_parent_name = self.avalon_ents_by_id[ + ftrack_parent_mongo_id]["name"] + + self.updates[avalon_id]["data"] = { + "visualParent": new_parent_id + } + ignore_keys[ftrack_id].append("data.visualParent") + self.log.debug(( + "Avalon entity \"{}\" changed parent \"{}\" -> \"{}\"" + ).format(avalon_name, old_parent_name, new_parent_name)) + + if not name_check: + self.updates[avalon_id]["name"] = ftrack_name + ignore_keys[ftrack_id].append("name") + self.log.debug( + "Avalon entity \"{}\" was renamed to \"{}\"".format( + avalon_name, ftrack_name + ) + ) + continue + + # parents and hierarchy must be recalculated + hierarchy_changing_ids.append(ftrack_id) + + # Parent is project if avalon_parent_id is set to None + if avalon_parent_id is None: + avalon_parent_id = str(self.avalon_project_id) + + if not name_check: + ent_path = self.get_ent_path(ftrack_id) + # TODO report + # TODO logging + self.entities_dict[ftrack_id]["name"] = avalon_name + self.entities_dict[ftrack_id]["entity"]["name"] = ( + avalon_name + ) + self.entities_dict[ftrack_id]["final_entity"]["name"] = ( + avalon_name + ) + self.log.warning("Name was changed back to {} <{}>".format( + avalon_name, ent_path + )) + self._ent_paths_by_ftrack_id.pop(ftrack_id, None) + msg = ( + " It is not possible to change" + " the name of an entity or it's parents, " + " if it already contained published data." + ) + self.report_items["warning"][msg].append(ent_path) + + # skip parent oricessing if hierarchy didn't change + if parent_check: + continue + + # Logic when parenting(hierarchy) has changed and should not + old_ftrack_parent_id = self.avalon_ftrack_mapper.get( + avalon_parent_id + ) + + # If last ftrack parent id from mongo entity exist then just + # remap paren_id on entity + if old_ftrack_parent_id: + # TODO report + # TODO logging + ent_path = self.get_ent_path(ftrack_id) + msg = ( + " It is not possible" + " to change the hierarchy of an entity or it's parents," + " if it already contained published data." + ) + self.report_items["warning"][msg].append(ent_path) + self.log.warning(( + " Entity contains published data so it was moved" + " back to it's original hierarchy <{}>" + ).format(ent_path)) + self.entities_dict[ftrack_id]["entity"]["parent_id"] = ( + old_ftrack_parent_id + ) + self.entities_dict[ftrack_id]["parent_id"] = ( + old_ftrack_parent_id + ) + self.entities_dict[old_ftrack_parent_id][ + "children" + ].append(ftrack_id) + + continue + + old_parent_ent = self.avalon_ents_by_id.get(avalon_parent_id) + if not old_parent_ent: + old_parent_ent = self.avalon_archived_by_id.get( + avalon_parent_id + ) + + # TODO report + # TODO logging + if not old_parent_ent: + self.log.warning(( + "Parent entity was not found by id" + " - Trying to find by parent name" + )) + ent_path = self.get_ent_path(ftrack_id) + + parents = avalon_entity["data"]["parents"] + parent_name = parents[-1] + matching_entity_id = None + for id, entity_dict in self.entities_dict.items(): + if entity_dict["name"] == parent_name: + matching_entity_id = id + break + + if matching_entity_id is None: + # TODO logging + # TODO report (turn off auto-sync?) + self.log.error(( + "The entity contains published data but it was moved" + " to a different place in the hierarchy and it's" + " previous parent cannot be found." + " It's impossible to solve this programmatically <{}>" + ).format(ent_path)) + msg = ( + " Hierarchy of an entity" + " can't be changed due to published data and missing" + " previous parent" + ) + self.report_items["error"][msg].append(ent_path) + self.filter_with_children(ftrack_id) + continue + + matching_ent_dict = self.entities_dict.get(matching_entity_id) + match_ent_parents = matching_ent_dict.get( + "final_entity", {}).get( + "data", {}).get( + "parents", ["__NOTSET__"] + ) + # TODO logging + # TODO report + if ( + len(match_ent_parents) >= len(parents) or + match_ent_parents[:-1] != parents + ): + ent_path = self.get_ent_path(ftrack_id) + self.log.error(( + "The entity contains published data but it was moved" + " to a different place in the hierarchy and it's" + " previous parents were moved too." + " It's impossible to solve this programmatically <{}>" + ).format(ent_path)) + msg = ( + " Hierarchy of an entity" + " can't be changed due to published data and scrambled" + "hierarchy" + ) + continue + + old_parent_ent = matching_ent_dict["final_entity"] + + parent_id = self.ft_project_id + entities_to_create = [] + # TODO logging + self.log.warning( + "Ftrack entities must be recreated because they were deleted," + " but they contain published data." + ) + + _avalon_ent = old_parent_ent + + self.updates[avalon_parent_id] = {"type": "asset"} + success = True + while True: + _vis_par = _avalon_ent["data"]["visualParent"] + _name = _avalon_ent["name"] + if _name in self.all_ftrack_names: + av_ent_path_items = _avalon_ent["data"]["parents"] + av_ent_path_items.append(_name) + av_ent_path = "/".join(av_ent_path_items) + # TODO report + # TODO logging + self.log.error(( + "Can't recreate the entity in Ftrack because an entity" + " with the same name already exists in a different" + " place in the hierarchy <{}>" + ).format(av_ent_path)) + msg = ( + " Hierarchy of an entity" + " can't be changed. I contains published data and it's" + " previous parent had a name, that is duplicated at a " + " different hierarchy level" + ) + self.report_items["error"][msg].append(av_ent_path) + self.filter_with_children(ftrack_id) + success = False + break + + entities_to_create.append(_avalon_ent) + if _vis_par is None: + break + + _vis_par = str(_vis_par) + _mapped = self.avalon_ftrack_mapper.get(_vis_par) + if _mapped: + parent_id = _mapped + break + + _avalon_ent = self.avalon_ents_by_id.get(_vis_par) + if not _avalon_ent: + _avalon_ent = self.avalon_archived_by_id.get(_vis_par) + + if success is False: + continue + + new_entity_id = None + for av_entity in reversed(entities_to_create): + new_entity_id = self.create_ftrack_ent_from_avalon_ent( + av_entity, parent_id + ) + update_queue.put(new_entity_id) + + if new_entity_id: + ftrack_ent_dict["entity"]["parent_id"] = new_entity_id + + if hierarchy_changing_ids: + self.reload_parents(hierarchy_changing_ids) + + for ftrack_id in self.update_ftrack_ids: + if ftrack_id == self.ft_project_id: + continue + + avalon_id = self.ftrack_avalon_mapper[ftrack_id] + avalon_entity = self.avalon_ents_by_id[avalon_id] + + avalon_attrs = self.entities_dict[ftrack_id]["avalon_attrs"] + if ( + CustAttrIdKey not in avalon_attrs or + avalon_attrs[CustAttrIdKey] != avalon_id + ): + configuration_id = self.entities_dict[ftrack_id][ + "avalon_attrs_id"][CustAttrIdKey] + + _entity_key = collections.OrderedDict({ + "configuration_id": configuration_id, + "entity_id": ftrack_id + }) + + self.session.recorded_operations.push( + ftrack_api.operation.UpdateEntityOperation( + "ContextCustomAttributeValue", + _entity_key, + "value", + ftrack_api.symbol.NOT_SET, + avalon_id + ) + ) + # check rest of data + data_changes = self.compare_dict( + self.entities_dict[ftrack_id]["final_entity"], + avalon_entity, + ignore_keys[ftrack_id] + ) + if data_changes: + self.updates[avalon_id] = self.merge_dicts( + data_changes, + self.updates[avalon_id] + ) + + def synchronize(self): + self.log.debug("* Synchronization begins") + avalon_project_id = self.ftrack_avalon_mapper.get(self.ft_project_id) + if avalon_project_id: + self.avalon_project_id = ObjectId(avalon_project_id) + + # remove filtered ftrack ids from create/update list + for ftrack_id in self.all_filtered_entities: + if ftrack_id in self.create_ftrack_ids: + self.create_ftrack_ids.remove(ftrack_id) + elif ftrack_id in self.update_ftrack_ids: + self.update_ftrack_ids.remove(ftrack_id) + + self.log.debug("* Processing entities for archivation") + self.delete_entities() + + self.log.debug("* Processing new entities") + # Create not created entities + for ftrack_id in self.create_ftrack_ids: + # CHECK it is possible that entity was already created + # because is parent of another entity which was processed first + if ftrack_id in self.ftrack_avalon_mapper: + continue + self.create_avalon_entity(ftrack_id) + + if len(self.create_list) > 0: + self.dbcon.insert_many(self.create_list) + + self.session.commit() + + self.log.debug("* Processing entities for update") + self.prepare_changes() + self.update_entities() + self.session.commit() + + def create_avalon_entity(self, ftrack_id): + if ftrack_id == self.ft_project_id: + self.create_avalon_project() + return + + entity_dict = self.entities_dict[ftrack_id] + parent_ftrack_id = entity_dict["parent_id"] + avalon_parent = None + if parent_ftrack_id != self.ft_project_id: + avalon_parent = self.ftrack_avalon_mapper.get(parent_ftrack_id) + # if not avalon_parent: + # self.create_avalon_entity(parent_ftrack_id) + # avalon_parent = self.ftrack_avalon_mapper[parent_ftrack_id] + avalon_parent = ObjectId(avalon_parent) + + # avalon_archived_by_id avalon_archived_by_name + current_id = ( + entity_dict["avalon_attrs"].get(CustAttrIdKey) or "" + ).strip() + mongo_id = current_id + name = entity_dict["name"] + + # Check if exist archived asset in mongo - by ID + unarchive = False + unarchive_id = self.check_unarchivation(ftrack_id, mongo_id, name) + if unarchive_id is not None: + unarchive = True + mongo_id = unarchive_id + + item = entity_dict["final_entity"] + try: + new_id = ObjectId(mongo_id) + if mongo_id in self.avalon_ftrack_mapper: + new_id = ObjectId() + except InvalidId: + new_id = ObjectId() + + item["_id"] = new_id + item["parent"] = self.avalon_project_id + item["schema"] = EntitySchemas["asset"] + item["data"]["visualParent"] = avalon_parent + + new_id_str = str(new_id) + self.ftrack_avalon_mapper[ftrack_id] = new_id_str + self.avalon_ftrack_mapper[new_id_str] = ftrack_id + + self._avalon_ents_by_id[new_id_str] = item + self._avalon_ents_by_ftrack_id[ftrack_id] = new_id_str + self._avalon_ents_by_name[item["name"]] = new_id_str + + if current_id != new_id_str: + # store mongo id to ftrack entity + configuration_id = self.entities_dict[ftrack_id][ + "avalon_attrs_id" + ][CustAttrIdKey] + _entity_key = collections.OrderedDict({ + "configuration_id": configuration_id, + "entity_id": ftrack_id }) - except Exception as e: - log.warning('Error with application {0} - {1}'.format(app, e)) - return apps - - -def avalon_check_name(entity, in_schema=None): - default_pattern = "^[a-zA-Z0-9_.]*$" - - name = entity["name"] - schema_name = "asset-3.0" - - if in_schema: - schema_name = in_schema - elif entity.entity_type.lower() == "project": - name = entity["full_name"] - schema_name = "project-2.0" - - schema_obj = avalon.schema._cache.get(schema_name + ".json") - name_pattern = schema_obj.get("properties", {}).get("name", {}).get( - "pattern", default_pattern - ) - if not re.match(name_pattern, name): - msg = "\"{}\" includes unsupported symbols like \"dash\" or \"space\"" - raise ValueError(msg.format(name)) - - -def show_errors(obj, event, errors): - title = 'Hey You! You raised few Errors! (*look below*)' - items = [] - splitter = {'type': 'label', 'value': '---'} - for error in errors: - for key, message in error.items(): - error_title = { - 'type': 'label', - 'value': '# {}'.format(key) - } - error_message = { - 'type': 'label', - 'value': '

{}

'.format(message) - } - if len(items) > 0: - items.append(splitter) - items.append(error_title) - items.append(error_message) - obj.log.error( - '{}: {}'.format(key, message) + self.session.recorded_operations.push( + ftrack_api.operation.UpdateEntityOperation( + "ContextCustomAttributeValue", + _entity_key, + "value", + ftrack_api.symbol.NOT_SET, + new_id_str + ) ) - obj.show_interface(items, title, event=event) + + if unarchive is False: + self.create_list.append(item) + return + # If unarchive then replace entity data in database + self.dbcon.replace_one({"_id": new_id}, item) + self.remove_from_archived(mongo_id) + av_ent_path_items = item["data"]["parents"] + av_ent_path_items.append(item["name"]) + av_ent_path = "/".join(av_ent_path_items) + self.log.debug("Entity was unarchived <{}>".format(av_ent_path)) + + def check_unarchivation(self, ftrack_id, mongo_id, name): + archived_by_id = self.avalon_archived_by_id.get(mongo_id) + archived_by_name = self.avalon_archived_by_name.get(name) + + # if not found in archived then skip + if not archived_by_id and not archived_by_name: + return None + + entity_dict = self.entities_dict[ftrack_id] + + if archived_by_id: + # if is changeable then unarchive (nothing to check here) + if self.changeability_by_mongo_id[mongo_id]: + return mongo_id + + # TODO replace `__NOTSET__` with custom None constant + archived_parent_id = archived_by_id["data"].get( + "visualParent", "__NOTSET__" + ) + archived_parents = archived_by_id["data"].get("parents") + archived_name = archived_by_id["name"] + + if ( + archived_name != entity_dict["name"] or + archived_parents != entity_dict["final_entity"]["data"][ + "parents" + ] + ): + return None + + return mongo_id + + # First check if there is any that have same parents + for archived in archived_by_name: + mongo_id = str(archived["_id"]) + archived_parents = archived.get("data", {}).get("parents") + if ( + archived_parents == entity_dict["final_entity"]["data"][ + "parents" + ] + ): + return mongo_id + + # Secondly try to find more close to current ftrack entity + first_changeable = None + for archived in archived_by_name: + mongo_id = str(archived["_id"]) + if not self.changeability_by_mongo_id[mongo_id]: + continue + + if first_changeable is None: + first_changeable = mongo_id + + ftrack_parent_id = entity_dict["parent_id"] + map_ftrack_parent_id = self.ftrack_avalon_mapper.get( + ftrack_parent_id + ) + + # TODO replace `__NOTSET__` with custom None constant + archived_parent_id = archived.get("data", {}).get( + "visualParent", "__NOTSET__" + ) + if archived_parent_id is not None: + archived_parent_id = str(archived_parent_id) + + # skip if parent is archived - How this should be possible? + parent_entity = self.avalon_ents_by_id.get(archived_parent_id) + if ( + parent_entity and ( + map_ftrack_parent_id is not None and + map_ftrack_parent_id == str(parent_entity["_id"]) + ) + ): + return mongo_id + # Last return first changeable with same name (or None) + return first_changeable + + def create_avalon_project(self): + project_item = self.entities_dict[self.ft_project_id]["final_entity"] + mongo_id = ( + self.entities_dict[self.ft_project_id]["avalon_attrs"].get( + CustAttrIdKey + ) or "" + ).strip() + + try: + new_id = ObjectId(mongo_id) + except InvalidId: + new_id = ObjectId() + + project_name = self.entities_dict[self.ft_project_id]["name"] + project_item["_id"] = new_id + project_item["parent"] = None + project_item["schema"] = EntitySchemas["project"] + project_item["config"]["schema"] = EntitySchemas["config"] + project_item["config"]["template"] = ( + get_avalon_project_template(project_name) + ) + + self.ftrack_avalon_mapper[self.ft_project_id] = new_id + self.avalon_ftrack_mapper[new_id] = self.ft_project_id + + self.avalon_project_id = new_id + + self._avalon_ents_by_id[str(new_id)] = project_item + if self._avalon_ents_by_ftrack_id is None: + self._avalon_ents_by_ftrack_id = {} + self._avalon_ents_by_ftrack_id[self.ft_project_id] = str(new_id) + if self._avalon_ents_by_name is None: + self._avalon_ents_by_name = {} + self._avalon_ents_by_name[project_item["name"]] = str(new_id) + + self.create_list.append(project_item) + + # store mongo id to ftrack entity + entity = self.entities_dict[self.ft_project_id]["entity"] + entity["custom_attributes"][CustAttrIdKey] = str(new_id) + + def _bubble_changeability(self, unchangeable_ids): + unchangeable_queue = queue.Queue() + for entity_id in unchangeable_ids: + unchangeable_queue.put((entity_id, False)) + + processed_parents_ids = [] + subsets_to_remove = [] + while not unchangeable_queue.empty(): + entity_id, child_is_archived = unchangeable_queue.get() + # skip if already processed + if entity_id in processed_parents_ids: + continue + + entity = self.avalon_ents_by_id.get(entity_id) + # if entity is not archived but unchageable child was then skip + # - archived entities should not affect not archived? + if entity and child_is_archived: + continue + + # set changeability of current entity to False + self._changeability_by_mongo_id[entity_id] = False + processed_parents_ids.append(entity_id) + # if not entity then is probably archived + if not entity: + entity = self.avalon_archived_by_id.get(entity_id) + child_is_archived = True + + if not entity: + # if entity is not found then it is subset without parent + if entity_id in unchangeable_ids: + subsets_to_remove.append(entity_id) + else: + # TODO logging - What is happening here? + self.log.warning(( + "Avalon contains entities without valid parents that" + " lead to Project (should not cause errors)" + " - MongoId <{}>" + ).format(str(entity_id))) + continue + + # skip if parent is project + parent_id = entity["data"]["visualParent"] + if parent_id is None: + continue + unchangeable_queue.put((str(parent_id), child_is_archived)) + + self._delete_subsets_without_asset(subsets_to_remove) + + def _delete_subsets_without_asset(self, not_existing_parents): + subset_ids = [] + version_ids = [] + repre_ids = [] + to_delete = [] + + for parent_id in not_existing_parents: + subsets = self.subsets_by_parent_id.get(parent_id) + if not subsets: + continue + for subset in subsets: + if subset.get("type") != "subset": + continue + subset_ids.append(subset["_id"]) + + db_subsets = self.dbcon.find({ + "_id": {"$in": subset_ids}, + "type": "subset" + }) + if not db_subsets: + return + + db_versions = self.dbcon.find({ + "parent": {"$in": subset_ids}, + "type": "version" + }) + if db_versions: + version_ids = [ver["_id"] for ver in db_versions] + + db_repres = self.dbcon.find({ + "parent": {"$in": version_ids}, + "type": "representation" + }) + if db_repres: + repre_ids = [repre["_id"] for repre in db_repres] + + to_delete.extend(subset_ids) + to_delete.extend(version_ids) + to_delete.extend(repre_ids) + + self.dbcon.delete_many({"_id": {"$in": to_delete}}) + + # Probably deprecated + def _check_changeability(self, parent_id=None): + for entity in self.avalon_ents_by_parent_id[parent_id]: + mongo_id = str(entity["_id"]) + is_changeable = self._changeability_by_mongo_id.get(mongo_id) + if is_changeable is not None: + continue + + self._check_changeability(mongo_id) + is_changeable = True + for child in self.avalon_ents_by_parent_id[parent_id]: + if not self._changeability_by_mongo_id[str(child["_id"])]: + is_changeable = False + break + + if is_changeable is True: + is_changeable = (mongo_id in self.subsets_by_parent_id) + self._changeability_by_mongo_id[mongo_id] = is_changeable + + def update_entities(self): + mongo_changes_bulk = [] + for mongo_id, changes in self.updates.items(): + filter = {"_id": ObjectId(mongo_id)} + change_data = from_dict_to_set(changes) + mongo_changes_bulk.append(UpdateOne(filter, change_data)) + + if not mongo_changes_bulk: + # TODO LOG + return + self.dbcon.bulk_write(mongo_changes_bulk) + + def reload_parents(self, hierarchy_changing_ids): + parents_queue = queue.Queue() + parents_queue.put((self.ft_project_id, [], False)) + while not parents_queue.empty(): + ftrack_id, parent_parents, changed = parents_queue.get() + _parents = parent_parents.copy() + if ftrack_id not in hierarchy_changing_ids and not changed: + if ftrack_id != self.ft_project_id: + _parents.append(self.entities_dict[ftrack_id]["name"]) + for child_id in self.entities_dict[ftrack_id]["children"]: + parents_queue.put((child_id, _parents, changed)) + continue + + changed = True + parents = [par for par in _parents] + hierarchy = "/".join(parents) + self.entities_dict[ftrack_id][ + "final_entity"]["data"]["parents"] = parents + self.entities_dict[ftrack_id][ + "final_entity"]["data"]["hierarchy"] = hierarchy + + _parents.append(self.entities_dict[ftrack_id]["name"]) + for child_id in self.entities_dict[ftrack_id]["children"]: + parents_queue.put((child_id, _parents, changed)) + + if ftrack_id in self.create_ftrack_ids: + mongo_id = self.ftrack_avalon_mapper[ftrack_id] + if "data" not in self.updates[mongo_id]: + self.updates[mongo_id]["data"] = {} + self.updates[mongo_id]["data"]["parents"] = parents + self.updates[mongo_id]["data"]["hierarchy"] = hierarchy + + def prepare_project_changes(self): + ftrack_ent_dict = self.entities_dict[self.ft_project_id] + ftrack_entity = ftrack_ent_dict["entity"] + avalon_code = self.avalon_project["data"]["code"] + # TODO Is possible to sync if full name was changed? + # if ftrack_ent_dict["name"] != self.avalon_project["name"]: + # ftrack_entity["full_name"] = avalon_name + # self.entities_dict[self.ft_project_id]["name"] = avalon_name + # self.entities_dict[self.ft_project_id]["final_entity"][ + # "name" + # ] = avalon_name + + # TODO logging + # TODO report + # TODO May this happen? Is possible to change project code? + if ftrack_entity["name"] != avalon_code: + ftrack_entity["name"] = avalon_code + self.entities_dict[self.ft_project_id]["final_entity"]["data"][ + "code" + ] = avalon_code + self.session.commit() + sub_msg = ( + "Project code was changed back to \"{}\"".format(avalon_code) + ) + msg = ( + "It is not possible to change" + " project code after synchronization" + ) + self.report_items["warning"][msg] = sub_msg + self.log.warning(sub_msg) + + return self.compare_dict( + self.entities_dict[self.ft_project_id]["final_entity"], + self.avalon_project + ) + + def compare_dict(self, dict_new, dict_old, _ignore_keys=[]): + # _ignore_keys may be used for keys nested dict like"data.visualParent" + changes = {} + ignore_keys = [] + for key_val in _ignore_keys: + key_items = key_val.split(".") + if len(key_items) == 1: + ignore_keys.append(key_items[0]) + + for key, value in dict_new.items(): + if key in ignore_keys: + continue + + if key not in dict_old: + changes[key] = value + continue + + if isinstance(value, dict): + if not isinstance(dict_old[key], dict): + changes[key] = value + continue + + _new_ignore_keys = [] + for key_val in _ignore_keys: + key_items = key_val.split(".") + if len(key_items) <= 1: + continue + _new_ignore_keys.append(".".join(key_items[1:])) + + _changes = self.compare_dict( + value, dict_old[key], _new_ignore_keys + ) + if _changes: + changes[key] = _changes + continue + + if value != dict_old[key]: + changes[key] = value + + return changes + + def merge_dicts(self, dict_new, dict_old): + for key, value in dict_new.items(): + if key not in dict_old: + dict_old[key] = value + continue + + if isinstance(value, dict): + dict_old[key] = self.merge_dicts(value, dict_old[key]) + continue + + dict_old[key] = value + + return dict_old + + def delete_entities(self): + if not self.deleted_entities: + return + # Try to order so child is not processed before parent + deleted_entities = [] + _deleted_entities = [id for id in self.deleted_entities] + + while True: + if not _deleted_entities: + break + _ready = [] + for mongo_id in _deleted_entities: + ent = self.avalon_ents_by_id[mongo_id] + vis_par = ent["data"]["visualParent"] + if ( + vis_par is not None and + str(vis_par) in _deleted_entities + ): + continue + _ready.append(mongo_id) + + for id in _ready: + deleted_entities.append(id) + _deleted_entities.remove(id) + + delete_ids = [] + for mongo_id in deleted_entities: + # delete if they are deletable + if self.changeability_by_mongo_id[mongo_id]: + delete_ids.append(ObjectId(mongo_id)) + continue + + # check if any new created entity match same entity + # - name and parents must match + deleted_entity = self.avalon_ents_by_id[mongo_id] + name = deleted_entity["name"] + parents = deleted_entity["data"]["parents"] + similar_ent_id = None + for ftrack_id in self.create_ftrack_ids: + _ent_final = self.entities_dict[ftrack_id]["final_entity"] + if _ent_final["name"] != name: + continue + if _ent_final["data"]["parents"] != parents: + continue + + # If in create is "same" then we can "archive" current + # since will be unarchived in create method + similar_ent_id = ftrack_id + break + + # If similar entity(same name and parents) is in create + # entities list then just change from create to update + if similar_ent_id is not None: + self.create_ftrack_ids.remove(similar_ent_id) + self.update_ftrack_ids.append(similar_ent_id) + self.avalon_ftrack_mapper[mongo_id] = similar_ent_id + self.ftrack_avalon_mapper[similar_ent_id] = mongo_id + continue + + found_by_name_id = None + for ftrack_id, ent_dict in self.entities_dict.items(): + if not ent_dict.get("name"): + continue + + if name == ent_dict["name"]: + found_by_name_id = ftrack_id + break + + if found_by_name_id is not None: + # * THESE conditins are too complex to implement in first stage + # - probably not possible to solve if this happen + # if found_by_name_id in self.create_ftrack_ids: + # # reparent entity of the new one create? + # pass + # + # elif found_by_name_id in self.update_ftrack_ids: + # found_mongo_id = self.ftrack_avalon_mapper[found_by_name_id] + # + # ent_dict = self.entities_dict[found_by_name_id] + + # TODO report - CRITICAL entity with same name alread exists in + # different hierarchy - can't recreate entity + continue + + _vis_parent = str(deleted_entity["data"]["visualParent"]) + if _vis_parent is None: + _vis_parent = self.avalon_project_id + ftrack_parent_id = self.avalon_ftrack_mapper[_vis_parent] + self.create_ftrack_ent_from_avalon_ent( + deleted_entity, ftrack_parent_id + ) + + filter = {"_id": {"$in": delete_ids}, "type": "asset"} + self.dbcon.update_many(filter, {"$set": {"type": "archived_asset"}}) + + def create_ftrack_ent_from_avalon_ent(self, av_entity, parent_id): + new_entity = None + parent_entity = self.entities_dict[parent_id]["entity"] + + _name = av_entity["name"] + _type = av_entity["data"].get("entityType", "folder") + + self.log.debug(( + "Re-ceating deleted entity {} <{}>" + ).format(_name, _type)) + + new_entity = self.session.create(_type, { + "name": _name, + "parent": parent_entity + }) + + final_entity = {} + for k, v in av_entity.items(): + final_entity[k] = v + + if final_entity.get("type") != "asset": + final_entity["type"] = "asset" + + new_entity_id = new_entity["id"] + new_entity_data = { + "entity": new_entity, + "parent_id": parent_id, + "entity_type": _type.lower(), + "entity_type_orig": _type, + "name": _name, + "final_entity": final_entity + } + for k, v in new_entity_data.items(): + self.entities_dict[new_entity_id][k] = v + + p_chilren = self.entities_dict[parent_id]["children"] + if new_entity_id not in p_chilren: + self.entities_dict[parent_id]["children"].append(new_entity_id) + + cust_attr, hier_attrs = get_avalon_attr(self.session) + for _attr in cust_attr: + key = _attr["key"] + if key not in av_entity["data"]: + continue + + if key not in new_entity["custom_attributes"]: + continue + + value = av_entity["data"][key] + if not value: + continue + + new_entity["custom_attributes"][key] = value + + av_entity_id = str(av_entity["_id"]) + new_entity["custom_attributes"][CustAttrIdKey] = av_entity_id + + self.ftrack_avalon_mapper[new_entity_id] = av_entity_id + self.avalon_ftrack_mapper[av_entity_id] = new_entity_id + + self.session.commit() + + ent_path = self.get_ent_path(new_entity_id) + msg = ( + "Deleted entity was recreated because it or its children" + " contain published data" + ) + + self.report_items["info"][msg].append(ent_path) + + return new_entity_id + + def regex_duplicate_interface(self): + items = [] + if self.failed_regex or self.tasks_failed_regex: + subtitle = "Entity names contain prohibited symbols:" + items.append({ + "type": "label", + "value": "# {}".format(subtitle) + }) + items.append({ + "type": "label", + "value": ( + "

NOTE: You can use Letters( a-Z )," + " Numbers( 0-9 ) and Underscore( _ )

" + ) + }) + log_msgs = [] + for name, ids in self.failed_regex.items(): + error_title = { + "type": "label", + "value": "## {}".format(name) + } + items.append(error_title) + paths = [] + for entity_id in ids: + ent_path = self.get_ent_path(entity_id) + paths.append(ent_path) + + error_message = { + "type": "label", + "value": '

{}

'.format("
".join(paths)) + } + items.append(error_message) + log_msgs.append("<{}> ({})".format(name, ",".join(paths))) + + for name, ids in self.tasks_failed_regex.items(): + error_title = { + "type": "label", + "value": "## Task: {}".format(name) + } + items.append(error_title) + paths = [] + for entity_id in ids: + ent_path = self.get_ent_path(entity_id) + ent_path = "/".join([ent_path, name]) + paths.append(ent_path) + + error_message = { + "type": "label", + "value": '

{}

'.format("
".join(paths)) + } + items.append(error_message) + log_msgs.append("<{}> ({})".format(name, ",".join(paths))) + + self.log.warning("{}{}".format(subtitle, ", ".join(log_msgs))) + + if self.duplicates: + subtitle = "Duplicated entity names:" + items.append({ + "type": "label", + "value": "# {}".format(subtitle) + }) + items.append({ + "type": "label", + "value": ( + "

NOTE: It is not allowed to use the same name" + " for multiple entities in the same project

" + ) + }) + log_msgs = [] + for name, ids in self.duplicates.items(): + error_title = { + "type": "label", + "value": "## {}".format(name) + } + items.append(error_title) + paths = [] + for entity_id in ids: + ent_path = self.get_ent_path(entity_id) + paths.append(ent_path) + + error_message = { + "type": "label", + "value": '

{}

'.format("
".join(paths)) + } + items.append(error_message) + log_msgs.append("<{}> ({})".format(name, ", ".join(paths))) + + self.log.warning("{}{}".format(subtitle, ", ".join(log_msgs))) + + return items + + def report(self): + items = [] + project_name = self.entities_dict[self.ft_project_id]["name"] + title = "Synchronization report ({}):".format(project_name) + + keys = ["error", "warning", "info"] + for key in keys: + subitems = [] + if key == "warning": + for _item in self.regex_duplicate_interface(): + subitems.append(_item) + + for msg, _items in self.report_items[key].items(): + if not _items: + continue + + subitems.append({ + "type": "label", + "value": "# {}".format(msg) + }) + if isinstance(_items, str): + _items = [_items] + subitems.append({ + "type": "label", + "value": '

{}

'.format("
".join(_items)) + }) + + if items and subitems: + items.append(self.report_splitter) + + items.extend(subitems) + + return { + "items": items, + "title": title, + "success": False, + "message": "Synchronization Finished" + } diff --git a/pype/ftrack/lib/ftrack_base_handler.py b/pype/ftrack/lib/ftrack_base_handler.py index 40294da230..8329505ffb 100644 --- a/pype/ftrack/lib/ftrack_base_handler.py +++ b/pype/ftrack/lib/ftrack_base_handler.py @@ -2,8 +2,7 @@ import functools import time from pypeapp import Logger import ftrack_api -from ftrack_api import session as fa_session -from pype.ftrack.ftrack_server import session_processor +from pype.ftrack.ftrack_server.lib import SocketSession class MissingPermision(Exception): @@ -42,7 +41,7 @@ class BaseHandler(object): self.log = Logger().get_logger(self.__class__.__name__) if not( isinstance(session, ftrack_api.session.Session) or - isinstance(session, session_processor.ProcessSession) + isinstance(session, SocketSession) ): raise Exception(( "Session object entered with args is instance of \"{}\"" @@ -243,7 +242,7 @@ class BaseHandler(object): _entities is None or _entities[0].get( 'link', None - ) == fa_session.ftrack_api.symbol.NOT_SET + ) == ftrack_api.symbol.NOT_SET ): _entities = self._get_entities(event) @@ -447,7 +446,7 @@ class BaseHandler(object): 'applicationId=ftrack.client.web and user.id="{0}"' ).format(user_id) self.session.event_hub.publish( - fa_session.ftrack_api.event.base.Event( + ftrack_api.event.base.Event( topic='ftrack.action.trigger-user-interface', data=dict( type='message', @@ -495,8 +494,8 @@ class BaseHandler(object): if not user: raise TypeError(( - 'Ftrack user with {} "{}" was not found!'.format(key, value) - )) + 'Ftrack user with {} "{}" was not found!' + ).format(key, value)) user_id = user['id'] @@ -505,7 +504,7 @@ class BaseHandler(object): ).format(user_id) self.session.event_hub.publish( - fa_session.ftrack_api.event.base.Event( + ftrack_api.event.base.Event( topic='ftrack.action.trigger-user-interface', data=dict( type='widget', @@ -533,7 +532,7 @@ class BaseHandler(object): else: first = False - subtitle = {'type': 'label', 'value':'

{}

'.format(key)} + subtitle = {'type': 'label', 'value': '

{}

'.format(key)} items.append(subtitle) if isinstance(value, list): for item in value: @@ -593,7 +592,7 @@ class BaseHandler(object): # Create and trigger event session.event_hub.publish( - fa_session.ftrack_api.event.base.Event( + ftrack_api.event.base.Event( topic=topic, data=_event_data, source=dict(user=_user_data) @@ -614,7 +613,7 @@ class BaseHandler(object): if not source and event: source = event.get("source") # Create and trigger event - event = fa_session.ftrack_api.event.base.Event( + event = ftrack_api.event.base.Event( topic=topic, data=event_data, source=source diff --git a/pype/ftrack/tray/ftrack_module.py b/pype/ftrack/tray/ftrack_module.py index 8da97da56b..250872f239 100644 --- a/pype/ftrack/tray/ftrack_module.py +++ b/pype/ftrack/tray/ftrack_module.py @@ -1,26 +1,27 @@ import os -import json -import threading import time -from Qt import QtCore, QtGui, QtWidgets +import datetime +import threading +from Qt import QtCore, QtWidgets import ftrack_api -from pypeapp import style -from pype.ftrack import FtrackServer, check_ftrack_url, credentials +from ..ftrack_server.lib import check_ftrack_url +from ..ftrack_server import socket_thread +from ..lib import credentials from . import login_dialog -from pype import api as pype +from pypeapp import Logger -log = pype.Logger().get_logger("FtrackModule", "ftrack") +log = Logger().get_logger("FtrackModule", "ftrack") class FtrackModule: def __init__(self, main_parent=None, parent=None): self.parent = parent self.widget_login = login_dialog.Login_Dialog_ui(self) - self.action_server = FtrackServer('action') self.thread_action_server = None + self.thread_socket_server = None self.thread_timer = None self.bool_logged = False @@ -75,14 +76,6 @@ class FtrackModule: # Actions part def start_action_server(self): - self.bool_action_thread_running = True - self.set_menu_visibility() - if ( - self.thread_action_server is not None and - self.bool_action_thread_running is False - ): - self.stop_action_server() - if self.thread_action_server is None: self.thread_action_server = threading.Thread( target=self.set_action_server @@ -90,35 +83,114 @@ class FtrackModule: self.thread_action_server.start() def set_action_server(self): - first_check = True - while self.bool_action_thread_running is True: - if not check_ftrack_url(os.environ['FTRACK_SERVER']): - if first_check: - log.warning( - "Could not connect to Ftrack server" - ) - first_check = False + if self.bool_action_server_running: + return + + self.bool_action_server_running = True + self.bool_action_thread_running = False + + ftrack_url = os.environ['FTRACK_SERVER'] + + parent_file_path = os.path.dirname( + os.path.dirname(os.path.realpath(__file__)) + ) + + min_fail_seconds = 5 + max_fail_count = 3 + wait_time_after_max_fail = 10 + + # Threads data + thread_name = "ActionServerThread" + thread_port = 10021 + subprocess_path = ( + "{}/ftrack_server/sub_user_server.py".format(parent_file_path) + ) + if self.thread_socket_server is not None: + self.thread_socket_server.stop() + self.thread_socket_server.join() + self.thread_socket_server = None + + last_failed = datetime.datetime.now() + failed_count = 0 + + ftrack_accessible = False + printed_ftrack_error = False + + # Main loop + while True: + if not self.bool_action_server_running: + log.debug("Action server was pushed to stop.") + break + + # Check if accessible Ftrack and Mongo url + if not ftrack_accessible: + ftrack_accessible = check_ftrack_url(ftrack_url) + + # Run threads only if Ftrack is accessible + if not ftrack_accessible: + if not printed_ftrack_error: + log.warning("Can't access Ftrack {}".format(ftrack_url)) + + if self.thread_socket_server is not None: + self.thread_socket_server.stop() + self.thread_socket_server.join() + self.thread_socket_server = None + self.bool_action_thread_running = False + self.set_menu_visibility() + + printed_ftrack_error = True + time.sleep(1) continue - log.info( - "Connected to Ftrack server. Running actions session" - ) - try: - self.bool_action_server_running = True + + printed_ftrack_error = False + + # Run backup thread which does not requeire mongo to work + if self.thread_socket_server is None: + if failed_count < max_fail_count: + self.thread_socket_server = socket_thread.SocketThread( + thread_name, thread_port, subprocess_path + ) + self.thread_socket_server.start() + self.bool_action_thread_running = True + self.set_menu_visibility() + + elif failed_count == max_fail_count: + log.warning(( + "Action server failed {} times." + " I'll try to run again {}s later" + ).format( + str(max_fail_count), str(wait_time_after_max_fail)) + ) + failed_count += 1 + + elif (( + datetime.datetime.now() - last_failed + ).seconds > wait_time_after_max_fail): + failed_count = 0 + + # If thread failed test Ftrack and Mongo connection + elif not self.thread_socket_server.isAlive(): + self.thread_socket_server.join() + self.thread_socket_server = None + ftrack_accessible = False + + self.bool_action_thread_running = False self.set_menu_visibility() - self.action_server.run_server() - if self.bool_action_thread_running: - log.debug("Ftrack action server has stopped") - except Exception: - log.warning( - "Ftrack Action server crashed. Trying to connect again", - exc_info=True - ) - self.bool_action_server_running = False - self.set_menu_visibility() - first_check = True + + _last_failed = datetime.datetime.now() + delta_time = (_last_failed - last_failed).seconds + if delta_time < min_fail_seconds: + failed_count += 1 + else: + failed_count = 0 + last_failed = _last_failed + + time.sleep(1) self.bool_action_thread_running = False + self.bool_action_server_running = False + self.set_menu_visibility() def reset_action_server(self): self.stop_action_server() @@ -126,16 +198,18 @@ class FtrackModule: def stop_action_server(self): try: - self.bool_action_thread_running = False - self.action_server.stop_session() + self.bool_action_server_running = False + if self.thread_socket_server is not None: + self.thread_socket_server.stop() + self.thread_socket_server.join() + self.thread_socket_server = None + if self.thread_action_server is not None: self.thread_action_server.join() self.thread_action_server = None log.info("Ftrack action server was forced to stop") - self.bool_action_server_running = False - self.set_menu_visibility() except Exception: log.warning( "Error has happened during Killing action server", @@ -201,9 +275,9 @@ class FtrackModule: self.stop_timer_thread() return - self.aRunActionS.setVisible(not self.bool_action_thread_running) + self.aRunActionS.setVisible(not self.bool_action_server_running) self.aResetActionS.setVisible(self.bool_action_thread_running) - self.aStopActionS.setVisible(self.bool_action_thread_running) + self.aStopActionS.setVisible(self.bool_action_server_running) if self.bool_timer_event is False: self.start_timer_thread() diff --git a/pype/lib.py b/pype/lib.py index e41f9eb8bc..f26395d930 100644 --- a/pype/lib.py +++ b/pype/lib.py @@ -1,14 +1,12 @@ import os import re import logging -import importlib import itertools import contextlib import subprocess import inspect - -import avalon.io as io +from avalon import io import avalon.api import avalon @@ -16,21 +14,38 @@ log = logging.getLogger(__name__) # Special naming case for subprocess since its a built-in method. -def _subprocess(args): +def _subprocess(*args, **kwargs): """Convenience method for getting output errors for subprocess.""" - proc = subprocess.Popen( - args, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - stdin=subprocess.PIPE, - env=os.environ - ) + # make sure environment contains only strings + if not kwargs.get("env"): + filtered_env = {k: str(v) for k, v in os.environ.items()} + else: + filtered_env = {k: str(v) for k, v in kwargs.get("env").items()} - output = proc.communicate()[0] + # set overrides + kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE) + kwargs['stderr'] = kwargs.get('stderr', subprocess.STDOUT) + kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE) + kwargs['env'] = filtered_env + + proc = subprocess.Popen(*args, **kwargs) + + output, error = proc.communicate() + + if output: + output = output.decode("utf-8") + output += "\n" + for line in output.strip().split("\n"): + log.info(line) + + if error: + error = error.decode("utf-8") + error += "\n" + for line in error.strip().split("\n"): + log.error(line) if proc.returncode != 0: - log.error(output) raise ValueError("\"{}\" was not successful: {}".format(args, output)) return output @@ -181,9 +196,13 @@ def any_outdated(): if representation in checked: continue - representation_doc = io.find_one({"_id": io.ObjectId(representation), - "type": "representation"}, - projection={"parent": True}) + representation_doc = io.find_one( + { + "_id": io.ObjectId(representation), + "type": "representation" + }, + projection={"parent": True} + ) if representation_doc and not is_latest(representation_doc): return True elif not representation_doc: @@ -293,27 +312,38 @@ def switch_item(container, representation_name = representation["name"] # Find the new one - asset = io.find_one({"name": asset_name, "type": "asset"}) + asset = io.find_one({ + "name": asset_name, + "type": "asset" + }) assert asset, ("Could not find asset in the database with the name " "'%s'" % asset_name) - subset = io.find_one({"name": subset_name, - "type": "subset", - "parent": asset["_id"]}) + subset = io.find_one({ + "name": subset_name, + "type": "subset", + "parent": asset["_id"] + }) assert subset, ("Could not find subset in the database with the name " "'%s'" % subset_name) - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[('name', -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[('name', -1)] + ) assert version, "Could not find a version for {}.{}".format( asset_name, subset_name ) - representation = io.find_one({"name": representation_name, - "type": "representation", - "parent": version["_id"]}) + representation = io.find_one({ + "name": representation_name, + "type": "representation", + "parent": version["_id"]} + ) assert representation, ("Could not find representation in the database with" " the name '%s'" % representation_name) @@ -351,7 +381,10 @@ def get_asset(asset_name=None): if not asset_name: asset_name = avalon.api.Session["AVALON_ASSET"] - asset_document = io.find_one({"name": asset_name, "type": "asset"}) + asset_document = io.find_one({ + "name": asset_name, + "type": "asset" + }) if not asset_document: raise TypeError("Entity \"{}\" was not found in DB".format(asset_name)) @@ -523,8 +556,7 @@ def get_subsets(asset_name, from avalon import io # query asset from db - asset_io = io.find_one({"type": "asset", - "name": asset_name}) + asset_io = io.find_one({"type": "asset", "name": asset_name}) # check if anything returned assert asset_io, "Asset not existing. \ @@ -548,14 +580,20 @@ def get_subsets(asset_name, # Process subsets for subset in subsets: if not version: - version_sel = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version_sel = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) else: assert isinstance(version, int), "version needs to be `int` type" - version_sel = io.find_one({"type": "version", - "parent": subset["_id"], - "name": int(version)}) + version_sel = io.find_one({ + "type": "version", + "parent": subset["_id"], + "name": int(version) + }) find_dict = {"type": "representation", "parent": version_sel["_id"]} diff --git a/pype/logging/gui/app.py b/pype/logging/gui/app.py index 7cee280158..9767077f80 100644 --- a/pype/logging/gui/app.py +++ b/pype/logging/gui/app.py @@ -33,5 +33,7 @@ class LogsWindow(QtWidgets.QWidget): def on_selection_changed(self): index = self.logs_widget.selected_log() + if not index or not index.isValid(): + return node = index.data(self.logs_widget.model.NodeRole) self.log_detail.set_detail(node) diff --git a/pype/logging/gui/widgets.py b/pype/logging/gui/widgets.py index 66692c2c65..10aad3c282 100644 --- a/pype/logging/gui/widgets.py +++ b/pype/logging/gui/widgets.py @@ -1,11 +1,7 @@ -import datetime -import inspect +import getpass from Qt import QtCore, QtWidgets, QtGui -from PyQt5.QtCore import QVariant from .models import LogModel -from .lib import preserve_states - class SearchComboBox(QtWidgets.QComboBox): """Searchable ComboBox with empty placeholder value as first value""" @@ -53,6 +49,7 @@ class SearchComboBox(QtWidgets.QComboBox): return text + class CheckableComboBox2(QtWidgets.QComboBox): def __init__(self, parent=None): super(CheckableComboBox, self).__init__(parent) @@ -96,9 +93,11 @@ class SelectableMenu(QtWidgets.QMenu): else: super(SelectableMenu, self).mouseReleaseEvent(event) + class CustomCombo(QtWidgets.QWidget): selection_changed = QtCore.Signal() + checked_changed = QtCore.Signal(bool) def __init__(self, title, parent=None): super(CustomCombo, self).__init__(parent) @@ -127,12 +126,27 @@ class CustomCombo(QtWidgets.QWidget): self.toolmenu.clear() self.addItems(items) + def select_items(self, items, ignore_input=False): + if not isinstance(items, list): + items = [items] + + for action in self.toolmenu.actions(): + check = True + if ( + action.text() in items and ignore_input or + action.text() not in items and not ignore_input + ): + check = False + + action.setChecked(check) + def addItems(self, items): for item in items: action = self.toolmenu.addAction(item) action.setCheckable(True) - action.setChecked(True) self.toolmenu.addAction(action) + action.setChecked(True) + action.triggered.connect(self.checked_changed) def items(self): for action in self.toolmenu.actions(): @@ -186,15 +200,42 @@ class CheckableComboBox(QtWidgets.QComboBox): for text, checked in items: text_item = QtGui.QStandardItem(text) checked_item = QtGui.QStandardItem() - checked_item.setData(QVariant(checked), QtCore.Qt.CheckStateRole) + checked_item.setData( + QtCore.QVariant(checked), QtCore.Qt.CheckStateRole + ) self.model.appendRow([text_item, checked_item]) +class FilterLogModel(QtCore.QSortFilterProxyModel): + sub_dict = ["$gt", "$lt", "$not"] + def __init__(self, key_values, parent=None): + super(FilterLogModel, self).__init__(parent) + self.allowed_key_values = key_values + + def filterAcceptsRow(self, row, parent): + """ + Reimplemented from base class. + """ + model = self.sourceModel() + for key, values in self.allowed_key_values.items(): + col_indx = model.COLUMNS.index(key) + value = model.index(row, col_indx, parent).data( + QtCore.Qt.DisplayRole + ) + if value not in values: + return False + return True + + class LogsWidget(QtWidgets.QWidget): """A widget that lists the published subsets for an asset""" active_changed = QtCore.Signal() + _level_order = [ + "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL" + ] + def __init__(self, parent=None): super(LogsWidget, self).__init__(parent=parent) @@ -202,47 +243,45 @@ class LogsWidget(QtWidgets.QWidget): filter_layout = QtWidgets.QHBoxLayout() - # user_filter = SearchComboBox(self, "Users") user_filter = CustomCombo("Users", self) users = model.dbcon.distinct("user") user_filter.populate(users) - user_filter.selection_changed.connect(self.user_changed) + user_filter.checked_changed.connect(self.user_changed) + user_filter.select_items(getpass.getuser()) level_filter = CustomCombo("Levels", self) - # levels = [(level, True) for level in model.dbcon.distinct("level")] levels = model.dbcon.distinct("level") - level_filter.addItems(levels) + _levels = [] + for level in self._level_order: + if level in levels: + _levels.append(level) + level_filter.populate(_levels) + level_filter.checked_changed.connect(self.level_changed) - date_from_label = QtWidgets.QLabel("From:") - date_filter_from = QtWidgets.QDateTimeEdit() - - date_from_layout = QtWidgets.QVBoxLayout() - date_from_layout.addWidget(date_from_label) - date_from_layout.addWidget(date_filter_from) - - # now = datetime.datetime.now() - # QtCore.QDateTime(now.year, now.month, now.day, now.hour, now.minute, second = 0, msec = 0, timeSpec = 0) - date_to_label = QtWidgets.QLabel("To:") - date_filter_to = QtWidgets.QDateTimeEdit() - - date_to_layout = QtWidgets.QVBoxLayout() - date_to_layout.addWidget(date_to_label) - date_to_layout.addWidget(date_filter_to) + # date_from_label = QtWidgets.QLabel("From:") + # date_filter_from = QtWidgets.QDateTimeEdit() + # + # date_from_layout = QtWidgets.QVBoxLayout() + # date_from_layout.addWidget(date_from_label) + # date_from_layout.addWidget(date_filter_from) + # + # date_to_label = QtWidgets.QLabel("To:") + # date_filter_to = QtWidgets.QDateTimeEdit() + # + # date_to_layout = QtWidgets.QVBoxLayout() + # date_to_layout.addWidget(date_to_label) + # date_to_layout.addWidget(date_filter_to) filter_layout.addWidget(user_filter) filter_layout.addWidget(level_filter) + filter_layout.setAlignment(QtCore.Qt.AlignLeft) - filter_layout.addLayout(date_from_layout) - filter_layout.addLayout(date_to_layout) + # filter_layout.addLayout(date_from_layout) + # filter_layout.addLayout(date_to_layout) view = QtWidgets.QTreeView(self) view.setAllColumnsShowFocus(True) - # # Set view delegates - # time_delegate = PrettyTimeDelegate() - # column = model.COLUMNS.index("time") - # view.setItemDelegateForColumn(column, time_delegate) - layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) layout.addLayout(filter_layout) @@ -255,34 +294,54 @@ class LogsWidget(QtWidgets.QWidget): QtCore.Qt.AscendingOrder ) - view.setModel(model) + key_val = { + "user": users, + "level": levels + } + proxy_model = FilterLogModel(key_val, view) + proxy_model.setSourceModel(model) + view.setModel(proxy_model) view.customContextMenuRequested.connect(self.on_context_menu) view.selectionModel().selectionChanged.connect(self.active_changed) - # user_filter.connect() - # TODO remove if nothing will affect... - # header = self.view.header() + # WARNING this is cool but slows down widget a lot + # header = view.header() # # Enforce the columns to fit the data (purely cosmetic) # if Qt.__binding__ in ("PySide2", "PyQt5"): # header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents) # else: # header.setResizeMode(QtWidgets.QHeaderView.ResizeToContents) - # Set signals - # prepare model.refresh() # Store to memory self.model = model + self.proxy_model = proxy_model self.view = view self.user_filter = user_filter + self.level_filter = level_filter def user_changed(self): + valid_actions = [] for action in self.user_filter.items(): - print(action) + if action.isChecked(): + valid_actions.append(action.text()) + + self.proxy_model.allowed_key_values["user"] = valid_actions + self.proxy_model.invalidate() + + def level_changed(self): + valid_actions = [] + for action in self.level_filter.items(): + if action.isChecked(): + valid_actions.append(action.text()) + + self.proxy_model.allowed_key_values["level"] = valid_actions + self.proxy_model.invalidate() + def on_context_menu(self, point): # TODO will be any actions? it's ready diff --git a/pype/maya/__init__.py b/pype/maya/__init__.py index b4dbc52bc8..f027893a0e 100644 --- a/pype/maya/__init__.py +++ b/pype/maya/__init__.py @@ -162,6 +162,7 @@ def on_open(_): # Validate FPS after update_task_from_path to # ensure it is using correct FPS for the asset lib.validate_fps() + lib.fix_incompatible_containers() if any_outdated(): log.warning("Scene has outdated content.") diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 0890d3863e..ec39b3556e 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2318,6 +2318,25 @@ def get_attr_in_layer(attr, layer): return cmds.getAttr(attr) +def fix_incompatible_containers(): + """Return whether the current scene has any outdated content""" + + host = avalon.api.registered_host() + for container in host.ls(): + loader = container['loader'] + + print(container['loader']) + + if loader in ["MayaAsciiLoader", + "AbcLoader", + "ModelLoader", + "CameraLoader", + "RigLoader", + "FBXLoader"]: + cmds.setAttr(container["objectName"] + ".loader", + "ReferenceLoader", type="string") + + def _null(*args): pass diff --git a/pype/maya/menu.py b/pype/maya/menu.py index 5254337f03..806944c117 100644 --- a/pype/maya/menu.py +++ b/pype/maya/menu.py @@ -15,12 +15,13 @@ log = logging.getLogger(__name__) def _get_menu(): """Return the menu instance if it currently exists in Maya""" - app = QtWidgets.QApplication.instance() - widgets = dict((w.objectName(), w) for w in app.allWidgets()) + widgets = dict(( + w.objectName(), w) for w in QtWidgets.QApplication.allWidgets()) menu = widgets.get(self._menu) return menu + def deferred(): log.info("Attempting to install scripts menu..") diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py index b7dbf69510..dfd61f4b39 100644 --- a/pype/nuke/__init__.py +++ b/pype/nuke/__init__.py @@ -33,40 +33,41 @@ if os.getenv("PYBLISH_GUI", None): pyblish.register_gui(os.getenv("PYBLISH_GUI", None)) -class NukeHandler(logging.Handler): - ''' - Nuke Handler - emits logs into nuke's script editor. - warning will emit nuke.warning() - critical and fatal would popup msg dialog to alert of the error. - ''' +# class NukeHandler(logging.Handler): +# ''' +# Nuke Handler - emits logs into nuke's script editor. +# warning will emit nuke.warning() +# critical and fatal would popup msg dialog to alert of the error. +# ''' +# +# def __init__(self): +# logging.Handler.__init__(self) +# self.set_name("Pype_Nuke_Handler") +# +# def emit(self, record): +# # Formated message: +# msg = self.format(record) +# +# if record.levelname.lower() in [ +# # "warning", +# "critical", +# "fatal", +# "error" +# ]: +# msg = self.format(record) +# nuke.message(msg) +# +# +# '''Adding Nuke Logging Handler''' +# log.info([handler.get_name() for handler in logging.root.handlers[:]]) +# nuke_handler = NukeHandler() +# if nuke_handler.get_name() \ +# not in [handler.get_name() +# for handler in logging.root.handlers[:]]: +# logging.getLogger().addHandler(nuke_handler) +# logging.getLogger().setLevel(logging.INFO) +# log.info([handler.get_name() for handler in logging.root.handlers[:]]) - def __init__(self): - logging.Handler.__init__(self) - self.set_name("Pype_Nuke_Handler") - - def emit(self, record): - # Formated message: - msg = self.format(record) - - if record.levelname.lower() in [ - # "warning", - "critical", - "fatal", - "error" - ]: - msg = self.format(record) - nuke.message(msg) - - -'''Adding Nuke Logging Handler''' -log.info([handler.get_name() for handler in logging.root.handlers[:]]) -nuke_handler = NukeHandler() -if nuke_handler.get_name() \ - not in [handler.get_name() - for handler in logging.root.handlers[:]]: - logging.getLogger().addHandler(nuke_handler) - logging.getLogger().setLevel(logging.INFO) -log.info([handler.get_name() for handler in logging.root.handlers[:]]) def reload_config(): """Attempt to reload pipeline at run-time. @@ -112,7 +113,9 @@ def install(): # Disable all families except for the ones we explicitly want to see family_states = [ "write", - "review" + "review", + "nukenodes" + "gizmo" ] avalon.data["familiesStateDefault"] = False diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 157af9019d..db1a5919c3 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -6,6 +6,7 @@ from collections import OrderedDict from avalon import api, io, lib import avalon.nuke +from avalon.nuke import lib as anlib import pype.api as pype import nuke @@ -20,7 +21,6 @@ from .presets import ( from .presets import ( get_anatomy ) -# TODO: remove get_anatomy and import directly Anatomy() here from pypeapp import Logger log = Logger().get_logger(__name__, "nuke") @@ -49,8 +49,6 @@ def checkInventoryVersions(): and check if the node is having actual version. If not then it will color it to red. """ - # TODO: make it for all nodes not just Read (Loader - # get all Loader nodes by avalon attribute metadata for each in nuke.allNodes(): if each.Class() == 'Read': @@ -92,7 +90,6 @@ def checkInventoryVersions(): def writes_version_sync(): ''' Callback synchronizing version of publishable write nodes ''' - # TODO: make it work with new write node group try: rootVersion = pype.get_version_from_path(nuke.root().name()) padding = len(rootVersion) @@ -105,6 +102,10 @@ def writes_version_sync(): for each in nuke.allNodes(): if each.Class() == 'Write': + # check if the node is avalon tracked + if "AvalonTab" not in each.knobs(): + continue + avalon_knob_data = avalon.nuke.get_avalon_knob_data( each, ['avalon:', 'ak:']) @@ -125,7 +126,8 @@ def writes_version_sync(): os.makedirs(os.path.dirname(node_new_file), 0o766) except Exception as e: log.warning( - "Write node: `{}` has no version in path: {}".format(each.name(), e)) + "Write node: `{}` has no version in path: {}".format( + each.name(), e)) def version_up_script(): @@ -178,9 +180,12 @@ def format_anatomy(data): try: padding = int(anatomy.templates['render']['padding']) except KeyError as e: - log.error("`padding` key is not in `render` " - "Anatomy template. Please, add it there and restart " - "the pipeline (padding: \"4\"): `{}`".format(e)) + msg = ("`padding` key is not in `render` " + "Anatomy template. Please, add it there and restart " + "the pipeline (padding: \"4\"): `{}`").format(e) + + log.error(msg) + nuke.message(msg) version = data.get("version", None) if not version: @@ -260,7 +265,9 @@ def create_write_node(name, data, input=None, prenodes=None): anatomy_filled = format_anatomy(data) except Exception as e: - log.error("problem with resolving anatomy tepmlate: {}".format(e)) + msg = "problem with resolving anatomy tepmlate: {}".format(e) + log.error(msg) + nuke.message(msg) # build file path to workfiles fpath = str(anatomy_filled["work"]["folder"]).replace("\\", "/") @@ -538,8 +545,11 @@ class WorkfileSettings(object): viewer_dict (dict): adjustments from presets ''' - assert isinstance(viewer_dict, dict), log.error( - "set_viewers_colorspace(): argument should be dictionary") + if not isinstance(viewer_dict, dict): + msg = "set_viewers_colorspace(): argument should be dictionary" + log.error(msg) + nuke.message(msg) + return filter_knobs = [ "viewerProcess", @@ -587,8 +597,10 @@ class WorkfileSettings(object): root_dict (dict): adjustmensts from presets ''' - assert isinstance(root_dict, dict), log.error( - "set_root_colorspace(): argument should be dictionary") + if not isinstance(root_dict, dict): + msg = "set_root_colorspace(): argument should be dictionary" + log.error(msg) + nuke.message(msg) log.debug(">> root_dict: {}".format(root_dict)) @@ -635,8 +647,11 @@ class WorkfileSettings(object): ''' # TODO: complete this function so any write node in # scene will have fixed colorspace following presets for the project - assert isinstance(write_dict, dict), log.error( - "set_root_colorspace(): argument should be dictionary") + if not isinstance(write_dict, dict): + msg = "set_root_colorspace(): argument should be dictionary" + nuke.message(msg) + log.error(msg) + return log.debug("__ set_writes_colorspace(): {}".format(write_dict)) @@ -648,25 +663,28 @@ class WorkfileSettings(object): try: self.set_root_colorspace(nuke_colorspace["root"]) except AttributeError: - log.error( - "set_colorspace(): missing `root` settings in template") + msg = "set_colorspace(): missing `root` settings in template" + try: self.set_viewers_colorspace(nuke_colorspace["viewer"]) except AttributeError: - log.error( - "set_colorspace(): missing `viewer` settings in template") + msg = "set_colorspace(): missing `viewer` settings in template" + nuke.message(msg) + log.error(msg) try: self.set_writes_colorspace(nuke_colorspace["write"]) except AttributeError: - log.error( - "set_colorspace(): missing `write` settings in template") + msg = "set_colorspace(): missing `write` settings in template" + nuke.message(msg) + log.error(msg) try: for key in nuke_colorspace: log.debug("Preset's colorspace key: {}".format(key)) except TypeError: - log.error("Nuke is not in templates! \n\n\n" - "contact your supervisor!") + msg = "Nuke is not in templates! Contact your supervisor!" + nuke.message(msg) + log.error(msg) def reset_frame_range_handles(self): """Set frame range to current asset""" @@ -702,9 +720,11 @@ class WorkfileSettings(object): frame_start = int(data["frameStart"]) - handle_start frame_end = int(data["frameEnd"]) + handle_end + self._root_node["lock_range"].setValue(False) self._root_node["fps"].setValue(fps) self._root_node["first_frame"].setValue(frame_start) self._root_node["last_frame"].setValue(frame_end) + self._root_node["lock_range"].setValue(True) # setting active viewers try: @@ -751,13 +771,13 @@ class WorkfileSettings(object): } if any(x for x in data.values() if x is None): - log.error( - "Missing set shot attributes in DB." - "\nContact your supervisor!." - "\n\nWidth: `{width}`" - "\nHeight: `{height}`" - "\nPixel Asspect: `{pixel_aspect}`".format(**data) - ) + msg = ("Missing set shot attributes in DB." + "\nContact your supervisor!." + "\n\nWidth: `{width}`" + "\nHeight: `{height}`" + "\nPixel Asspect: `{pixel_aspect}`").format(**data) + log.error(msg) + nuke.message(msg) bbox = self._asset_entity.get('data', {}).get('crop') @@ -774,10 +794,10 @@ class WorkfileSettings(object): ) except Exception as e: bbox = None - log.error( - "{}: {} \nFormat:Crop need to be set with dots, example: " - "0.0.1920.1080, /nSetting to default".format(__name__, e) - ) + msg = ("{}:{} \nFormat:Crop need to be set with dots, example: " + "0.0.1920.1080, /nSetting to default").format(__name__, e) + log.error(msg) + nuke.message(msg) existing_format = None for format in nuke.formats(): @@ -1190,3 +1210,454 @@ class BuildWorkfile(WorkfileSettings): def position_up(self, multiply=1): self.ypos -= (self.ypos_size * multiply) + self.ypos_gap + + +class ExporterReview: + """ + Base class object for generating review data from Nuke + + Args: + klass (pyblish.plugin): pyblish plugin parent + instance (pyblish.instance): instance of pyblish context + + """ + _temp_nodes = [] + data = dict({ + "representations": list() + }) + + def __init__(self, + klass, + instance + ): + + self.log = klass.log + self.instance = instance + self.path_in = self.instance.data.get("path", None) + self.staging_dir = self.instance.data["stagingDir"] + self.collection = self.instance.data.get("collection", None) + + def get_file_info(self): + if self.collection: + self.log.debug("Collection: `{}`".format(self.collection)) + # get path + self.fname = os.path.basename(self.collection.format( + "{head}{padding}{tail}")) + self.fhead = self.collection.format("{head}") + + # get first and last frame + self.first_frame = min(self.collection.indexes) + self.last_frame = max(self.collection.indexes) + if "slate" in self.instance.data["families"]: + self.first_frame += 1 + else: + self.fname = os.path.basename(self.path_in) + self.fhead = os.path.splitext(self.fname)[0] + "." + self.first_frame = self.instance.data.get("frameStart", None) + self.last_frame = self.instance.data.get("frameEnd", None) + + if "#" in self.fhead: + self.fhead = self.fhead.replace("#", "")[:-1] + + def get_representation_data(self, tags=None, range=False): + add_tags = [] + if tags: + add_tags = tags + + repre = { + 'name': self.name, + 'ext': self.ext, + 'files': self.file, + "stagingDir": self.staging_dir, + "anatomy_template": "publish", + "tags": [self.name.replace("_", "-")] + add_tags + } + + if range: + repre.update({ + "frameStart": self.first_frame, + "frameEnd": self.last_frame, + }) + + self.data["representations"].append(repre) + + def get_view_process_node(self): + """ + Will get any active view process. + + Arguments: + self (class): in object definition + + Returns: + nuke.Node: copy node of Input Process node + """ + anlib.reset_selection() + ipn_orig = None + for v in [n for n in nuke.allNodes() + if "Viewer" in n.Class()]: + ip = v['input_process'].getValue() + ipn = v['input_process_node'].getValue() + if "VIEWER_INPUT" not in ipn and ip: + ipn_orig = nuke.toNode(ipn) + ipn_orig.setSelected(True) + + if ipn_orig: + # copy selected to clipboard + nuke.nodeCopy('%clipboard%') + # reset selection + anlib.reset_selection() + # paste node and selection is on it only + nuke.nodePaste('%clipboard%') + # assign to variable + ipn = nuke.selectedNode() + + return ipn + + def clean_nodes(self): + for node in self._temp_nodes: + nuke.delete(node) + self.log.info("Deleted nodes...") + + +class ExporterReviewLut(ExporterReview): + """ + Generator object for review lut from Nuke + + Args: + klass (pyblish.plugin): pyblish plugin parent + instance (pyblish.instance): instance of pyblish context + + + """ + def __init__(self, + klass, + instance, + name=None, + ext=None, + cube_size=None, + lut_size=None, + lut_style=None): + # initialize parent class + ExporterReview.__init__(self, klass, instance) + + # deal with now lut defined in viewer lut + if hasattr(klass, "viewer_lut_raw"): + self.viewer_lut_raw = klass.viewer_lut_raw + else: + self.viewer_lut_raw = False + + self.name = name or "baked_lut" + self.ext = ext or "cube" + self.cube_size = cube_size or 32 + self.lut_size = lut_size or 1024 + self.lut_style = lut_style or "linear" + + # set frame start / end and file name to self + self.get_file_info() + + self.log.info("File info was set...") + + self.file = self.fhead + self.name + ".{}".format(self.ext) + self.path = os.path.join( + self.staging_dir, self.file).replace("\\", "/") + + def generate_lut(self): + # ---------- start nodes creation + + # CMSTestPattern + cms_node = nuke.createNode("CMSTestPattern") + cms_node["cube_size"].setValue(self.cube_size) + # connect + self._temp_nodes.append(cms_node) + self.previous_node = cms_node + self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes)) + + # Node View Process + ipn = self.get_view_process_node() + if ipn is not None: + # connect + ipn.setInput(0, self.previous_node) + self._temp_nodes.append(ipn) + self.previous_node = ipn + self.log.debug("ViewProcess... `{}`".format(self._temp_nodes)) + + if not self.viewer_lut_raw: + # OCIODisplay + dag_node = nuke.createNode("OCIODisplay") + # connect + dag_node.setInput(0, self.previous_node) + self._temp_nodes.append(dag_node) + self.previous_node = dag_node + self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes)) + + # GenerateLUT + gen_lut_node = nuke.createNode("GenerateLUT") + gen_lut_node["file"].setValue(self.path) + gen_lut_node["file_type"].setValue(".{}".format(self.ext)) + gen_lut_node["lut1d"].setValue(self.lut_size) + gen_lut_node["style1d"].setValue(self.lut_style) + # connect + gen_lut_node.setInput(0, self.previous_node) + self._temp_nodes.append(gen_lut_node) + self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes)) + + # ---------- end nodes creation + + # Export lut file + nuke.execute( + gen_lut_node.name(), + int(self.first_frame), + int(self.first_frame)) + + self.log.info("Exported...") + + # ---------- generate representation data + self.get_representation_data() + + self.log.debug("Representation... `{}`".format(self.data)) + + # ---------- Clean up + self.clean_nodes() + + return self.data + + +class ExporterReviewMov(ExporterReview): + """ + Metaclass for generating review mov files + + Args: + klass (pyblish.plugin): pyblish plugin parent + instance (pyblish.instance): instance of pyblish context + + """ + def __init__(self, + klass, + instance, + name=None, + ext=None, + ): + # initialize parent class + ExporterReview.__init__(self, klass, instance) + + # passing presets for nodes to self + if hasattr(klass, "nodes"): + self.nodes = klass.nodes + else: + self.nodes = {} + + # deal with now lut defined in viewer lut + if hasattr(klass, "viewer_lut_raw"): + self.viewer_lut_raw = klass.viewer_lut_raw + else: + self.viewer_lut_raw = False + + self.name = name or "baked" + self.ext = ext or "mov" + + # set frame start / end and file name to self + self.get_file_info() + + self.log.info("File info was set...") + + self.file = self.fhead + self.name + ".{}".format(self.ext) + self.path = os.path.join( + self.staging_dir, self.file).replace("\\", "/") + + def render(self, render_node_name): + self.log.info("Rendering... ") + # Render Write node + nuke.execute( + render_node_name, + int(self.first_frame), + int(self.last_frame)) + + self.log.info("Rendered...") + + def save_file(self): + import shutil + with anlib.maintained_selection(): + self.log.info("Saving nodes as file... ") + # create nk path + path = os.path.splitext(self.path)[0] + ".nk" + # save file to the path + shutil.copyfile(self.instance.context.data["currentFile"], path) + + self.log.info("Nodes exported...") + return path + + def generate_mov(self, farm=False): + # ---------- start nodes creation + + # Read node + r_node = nuke.createNode("Read") + r_node["file"].setValue(self.path_in) + r_node["first"].setValue(self.first_frame) + r_node["origfirst"].setValue(self.first_frame) + r_node["last"].setValue(self.last_frame) + r_node["origlast"].setValue(self.last_frame) + # connect + self._temp_nodes.append(r_node) + self.previous_node = r_node + self.log.debug("Read... `{}`".format(self._temp_nodes)) + + # View Process node + ipn = self.get_view_process_node() + if ipn is not None: + # connect + ipn.setInput(0, self.previous_node) + self._temp_nodes.append(ipn) + self.previous_node = ipn + self.log.debug("ViewProcess... `{}`".format(self._temp_nodes)) + + if not self.viewer_lut_raw: + # OCIODisplay node + dag_node = nuke.createNode("OCIODisplay") + # connect + dag_node.setInput(0, self.previous_node) + self._temp_nodes.append(dag_node) + self.previous_node = dag_node + self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes)) + + # Write node + write_node = nuke.createNode("Write") + self.log.debug("Path: {}".format(self.path)) + write_node["file"].setValue(self.path) + write_node["file_type"].setValue(self.ext) + write_node["meta_codec"].setValue("ap4h") + write_node["mov64_codec"].setValue("ap4h") + write_node["mov64_write_timecode"].setValue(1) + write_node["raw"].setValue(1) + # connect + write_node.setInput(0, self.previous_node) + self._temp_nodes.append(write_node) + self.log.debug("Write... `{}`".format(self._temp_nodes)) + # ---------- end nodes creation + + # ---------- render or save to nk + if farm: + nuke.scriptSave() + path_nk = self.save_file() + self.data.update({ + "bakeScriptPath": path_nk, + "bakeWriteNodeName": write_node.name(), + "bakeRenderPath": self.path + }) + else: + self.render(write_node.name()) + # ---------- generate representation data + self.get_representation_data( + tags=["review", "delete"], + range=True + ) + + self.log.debug("Representation... `{}`".format(self.data)) + + # ---------- Clean up + self.clean_nodes() + nuke.scriptSave() + return self.data + + +def get_dependent_nodes(nodes): + """Get all dependent nodes connected to the list of nodes. + + Looking for connections outside of the nodes in incoming argument. + + Arguments: + nodes (list): list of nuke.Node objects + + Returns: + connections_in: dictionary of nodes and its dependencies + connections_out: dictionary of nodes and its dependency + """ + + connections_in = dict() + connections_out = dict() + node_names = [n.name() for n in nodes] + for node in nodes: + inputs = node.dependencies() + outputs = node.dependent() + # collect all inputs outside + test_in = [(i, n) for i, n in enumerate(inputs) + if n.name() not in node_names] + if test_in: + connections_in.update({ + node: test_in + }) + # collect all outputs outside + test_out = [i for i in outputs if i.name() not in node_names] + if test_out: + # only one dependent node is allowed + connections_out.update({ + node: test_out[-1] + }) + + return connections_in, connections_out + + +def find_free_space_to_paste_nodes( + nodes, + group=nuke.root(), + direction="right", + offset=300): + """ + For getting coordinates in DAG (node graph) for placing new nodes + + Arguments: + nodes (list): list of nuke.Node objects + group (nuke.Node) [optional]: object in which context it is + direction (str) [optional]: where we want it to be placed + [left, right, top, bottom] + offset (int) [optional]: what offset it is from rest of nodes + + Returns: + xpos (int): x coordinace in DAG + ypos (int): y coordinace in DAG + """ + if len(nodes) == 0: + return 0, 0 + + group_xpos = list() + group_ypos = list() + + # get local coordinates of all nodes + nodes_xpos = [n.xpos() for n in nodes] + \ + [n.xpos() + n.screenWidth() for n in nodes] + + nodes_ypos = [n.ypos() for n in nodes] + \ + [n.ypos() + n.screenHeight() for n in nodes] + + # get complete screen size of all nodes to be placed in + nodes_screen_width = max(nodes_xpos) - min(nodes_xpos) + nodes_screen_heigth = max(nodes_ypos) - min(nodes_ypos) + + # get screen size (r,l,t,b) of all nodes in `group` + with group: + group_xpos = [n.xpos() for n in nuke.allNodes() if n not in nodes] + \ + [n.xpos() + n.screenWidth() for n in nuke.allNodes() + if n not in nodes] + group_ypos = [n.ypos() for n in nuke.allNodes() if n not in nodes] + \ + [n.ypos() + n.screenHeight() for n in nuke.allNodes() + if n not in nodes] + + # calc output left + if direction in "left": + xpos = min(group_xpos) - abs(nodes_screen_width) - abs(offset) + ypos = min(group_ypos) + return xpos, ypos + # calc output right + if direction in "right": + xpos = max(group_xpos) + abs(offset) + ypos = min(group_ypos) + return xpos, ypos + # calc output top + if direction in "top": + xpos = min(group_xpos) + ypos = min(group_ypos) - abs(nodes_screen_heigth) - abs(offset) + return xpos, ypos + # calc output bottom + if direction in "bottom": + xpos = min(group_xpos) + ypos = max(group_ypos) + abs(offset) + return xpos, ypos diff --git a/pype/nuke/presets.py b/pype/nuke/presets.py index e0c12e2671..a413ccc878 100644 --- a/pype/nuke/presets.py +++ b/pype/nuke/presets.py @@ -1,6 +1,6 @@ from pype import api as pype from pypeapp import Anatomy, config - +import nuke log = pype.Logger().get_logger(__name__, "nuke") @@ -28,7 +28,7 @@ def get_node_dataflow_preset(**kwarg): families = kwarg.get("families", []) preset = kwarg.get("preset", None) # omit < 2.0.0v - assert any([host, cls]), log.error( + assert any([host, cls]), nuke.message( "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) nuke_dataflow = get_dataflow_preset().get(str(host), None) @@ -56,8 +56,10 @@ def get_node_colorspace_preset(**kwarg): families = kwarg.get("families", []) preset = kwarg.get("preset", None) # omit < 2.0.0v - assert any([host, cls]), log.error( - "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) + if not any([host, cls]): + msg = "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__) + log.error(msg) + nuke.message(msg) nuke_colorspace = get_colorspace_preset().get(str(host), None) nuke_colorspace_node = nuke_colorspace.get(str(cls), None) diff --git a/pype/nuke/utils.py b/pype/nuke/utils.py new file mode 100644 index 0000000000..7583221696 --- /dev/null +++ b/pype/nuke/utils.py @@ -0,0 +1,64 @@ +import os +import nuke +from avalon.nuke import lib as anlib + + +def get_node_outputs(node): + ''' + Return a dictionary of the nodes and pipes that are connected to node + ''' + dep_dict = {} + dependencies = node.dependent(nuke.INPUTS | nuke.HIDDEN_INPUTS) + for d in dependencies: + dep_dict[d] = [] + for i in range(d.inputs()): + if d.input(i) == node: + dep_dict[d].append(i) + return dep_dict + + +def is_node_gizmo(node): + ''' + return True if node is gizmo + ''' + return 'gizmo_file' in node.knobs() + + +def gizmo_is_nuke_default(gizmo): + '''Check if gizmo is in default install path''' + plug_dir = os.path.join(os.path.dirname( + nuke.env['ExecutablePath']), 'plugins') + return gizmo.filename().startswith(plug_dir) + + +def bake_gizmos_recursively(in_group=nuke.Root()): + """Converting a gizmo to group + + Argumets: + is_group (nuke.Node)[optonal]: group node or all nodes + """ + # preserve selection after all is done + with anlib.maintained_selection(): + # jump to the group + with in_group: + for node in nuke.allNodes(): + if is_node_gizmo(node) and not gizmo_is_nuke_default(node): + with node: + outputs = get_node_outputs(node) + group = node.makeGroup() + # Reconnect inputs and outputs if any + if outputs: + for n, pipes in outputs.items(): + for i in pipes: + n.setInput(i, group) + for i in range(node.inputs()): + group.setInput(i, node.input(i)) + # set node position and name + group.setXYpos(node.xpos(), node.ypos()) + name = node.name() + nuke.delete(node) + group.setName(name) + node = group + + if node.Class() == "Group": + bake_gizmos_recursively(node) diff --git a/pype/nukestudio/workio.py b/pype/nukestudio/workio.py index 1681d8a2ab..c7484b826b 100644 --- a/pype/nukestudio/workio.py +++ b/pype/nukestudio/workio.py @@ -22,19 +22,16 @@ def has_unsaved_changes(): def save_file(filepath): + file = os.path.basename(filepath) project = hiero.core.projects()[-1] - # close `Untitled` project - if "Untitled" not in project.name(): - log.info("Saving project: `{}`".format(project.name())) + if project: + log.info("Saving project: `{}` as '{}'".format(project.name(), file)) project.saveAs(filepath) - elif not project: + else: log.info("Creating new project...") project = hiero.core.newProject() project.saveAs(filepath) - else: - log.info("Dropping `Untitled` project...") - return def open_file(filepath): diff --git a/pype/plugins/aport/publish/collect_context.py b/pype/plugins/aport/publish/collect_context.py index 2aaa89fd05..35811d6378 100644 --- a/pype/plugins/aport/publish/collect_context.py +++ b/pype/plugins/aport/publish/collect_context.py @@ -1,9 +1,6 @@ import os import pyblish.api -from avalon import ( - io, - api as avalon -) +from avalon import api as avalon from pype import api as pype import json from pathlib import Path diff --git a/pype/plugins/blender/create/create_model.py b/pype/plugins/blender/create/create_model.py new file mode 100644 index 0000000000..7301073f05 --- /dev/null +++ b/pype/plugins/blender/create/create_model.py @@ -0,0 +1,32 @@ +"""Create a model asset.""" + +import bpy + +from avalon import api +from avalon.blender import Creator, lib + + +class CreateModel(Creator): + """Polygonal static geometry""" + + name = "modelMain" + label = "Model" + family = "model" + icon = "cube" + + def process(self): + import pype.blender + + asset = self.data["asset"] + subset = self.data["subset"] + name = pype.blender.plugin.model_name(asset, subset) + collection = bpy.data.collections.new(name=name) + bpy.context.scene.collection.children.link(collection) + self.data['task'] = api.Session.get('AVALON_TASK') + lib.imprint(collection, self.data) + + if (self.options or {}).get("useSelection"): + for obj in lib.get_selection(): + collection.objects.link(obj) + + return collection diff --git a/pype/plugins/blender/load/load_model.py b/pype/plugins/blender/load/load_model.py new file mode 100644 index 0000000000..bd6db17650 --- /dev/null +++ b/pype/plugins/blender/load/load_model.py @@ -0,0 +1,315 @@ +"""Load a model asset in Blender.""" + +import logging +from pathlib import Path +from pprint import pformat +from typing import Dict, List, Optional + +import avalon.blender.pipeline +import bpy +import pype.blender +from avalon import api + +logger = logging.getLogger("pype").getChild("blender").getChild("load_model") + + +class BlendModelLoader(pype.blender.AssetLoader): + """Load models from a .blend file. + + Because they come from a .blend file we can simply link the collection that + contains the model. There is no further need to 'containerise' it. + + Warning: + Loading the same asset more then once is not properly supported at the + moment. + """ + + families = ["model"] + representations = ["blend"] + + label = "Link Model" + icon = "code-fork" + color = "orange" + + @staticmethod + def _get_lib_collection(name: str, libpath: Path) -> Optional[bpy.types.Collection]: + """Find the collection(s) with name, loaded from libpath. + + Note: + It is assumed that only 1 matching collection is found. + """ + for collection in bpy.data.collections: + if collection.name != name: + continue + if collection.library is None: + continue + if not collection.library.filepath: + continue + collection_lib_path = str(Path(bpy.path.abspath(collection.library.filepath)).resolve()) + normalized_libpath = str(Path(bpy.path.abspath(str(libpath))).resolve()) + if collection_lib_path == normalized_libpath: + return collection + return None + + @staticmethod + def _collection_contains_object( + collection: bpy.types.Collection, object: bpy.types.Object + ) -> bool: + """Check if the collection contains the object.""" + for obj in collection.objects: + if obj == object: + return True + return False + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + + libpath = self.fname + asset = context["asset"]["name"] + subset = context["subset"]["name"] + lib_container = pype.blender.plugin.model_name(asset, subset) + container_name = pype.blender.plugin.model_name( + asset, subset, namespace + ) + relative = bpy.context.preferences.filepaths.use_relative_paths + + with bpy.data.libraries.load( + libpath, link=True, relative=relative + ) as (_, data_to): + data_to.collections = [lib_container] + + scene = bpy.context.scene + instance_empty = bpy.data.objects.new( + container_name, None + ) + if not instance_empty.get("avalon"): + instance_empty["avalon"] = dict() + avalon_info = instance_empty["avalon"] + avalon_info.update({"container_name": container_name}) + scene.collection.objects.link(instance_empty) + instance_empty.instance_type = 'COLLECTION' + container = bpy.data.collections[lib_container] + container.name = container_name + instance_empty.instance_collection = container + container.make_local() + avalon.blender.pipeline.containerise_existing( + container, + name, + namespace, + context, + self.__class__.__name__, + ) + + nodes = list(container.objects) + nodes.append(container) + nodes.append(instance_empty) + self[:] = nodes + return nodes + + def update(self, container: Dict, representation: Dict): + """Update the loaded asset. + + This will remove all objects of the current collection, load the new + ones and add them to the collection. + If the objects of the collection are used in another collection they + will not be removed, only unlinked. Normally this should not be the + case though. + + Warning: + No nested collections are supported at the moment! + """ + collection = bpy.data.collections.get( + container["objectName"] + ) + libpath = Path(api.get_representation_path(representation)) + extension = libpath.suffix.lower() + + logger.debug( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert collection, ( + f"The asset is not loaded: {container['objectName']}" + ) + assert not (collection.children), ( + "Nested collections are not supported." + ) + assert libpath, ( + "No existing library file found for {container['objectName']}" + ) + assert libpath.is_file(), ( + f"The file doesn't exist: {libpath}" + ) + assert extension in pype.blender.plugin.VALID_EXTENSIONS, ( + f"Unsupported file: {libpath}" + ) + collection_libpath = ( + self._get_library_from_container(collection).filepath + ) + normalized_collection_libpath = ( + str(Path(bpy.path.abspath(collection_libpath)).resolve()) + ) + normalized_libpath = ( + str(Path(bpy.path.abspath(str(libpath))).resolve()) + ) + logger.debug( + "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_collection_libpath, + normalized_libpath, + ) + if normalized_collection_libpath == normalized_libpath: + logger.info("Library already loaded, not updating...") + return + # Let Blender's garbage collection take care of removing the library + # itself after removing the objects. + objects_to_remove = set() + collection_objects = list() + collection_objects[:] = collection.objects + for obj in collection_objects: + # Unlink every object + collection.objects.unlink(obj) + remove_obj = True + for coll in [ + coll for coll in bpy.data.collections + if coll != collection + ]: + if ( + coll.objects and + self._collection_contains_object(coll, obj) + ): + remove_obj = False + if remove_obj: + objects_to_remove.add(obj) + + for obj in objects_to_remove: + # Only delete objects that are not used elsewhere + bpy.data.objects.remove(obj) + + instance_empties = [ + obj for obj in collection.users_dupli_group + if obj.name in collection.name + ] + if instance_empties: + instance_empty = instance_empties[0] + container_name = instance_empty["avalon"]["container_name"] + + relative = bpy.context.preferences.filepaths.use_relative_paths + with bpy.data.libraries.load( + str(libpath), link=True, relative=relative + ) as (_, data_to): + data_to.collections = [container_name] + + new_collection = self._get_lib_collection(container_name, libpath) + if new_collection is None: + raise ValueError( + "A matching collection '{container_name}' " + "should have been found in: {libpath}" + ) + + for obj in new_collection.objects: + collection.objects.link(obj) + bpy.data.collections.remove(new_collection) + # Update the representation on the collection + avalon_prop = collection[avalon.blender.pipeline.AVALON_PROPERTY] + avalon_prop["representation"] = str(representation["_id"]) + + def remove(self, container: Dict) -> bool: + """Remove an existing container from a Blender scene. + + Arguments: + container (avalon-core:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted. + + Warning: + No nested collections are supported at the moment! + """ + collection = bpy.data.collections.get( + container["objectName"] + ) + if not collection: + return False + assert not (collection.children), ( + "Nested collections are not supported." + ) + instance_parents = list(collection.users_dupli_group) + instance_objects = list(collection.objects) + for obj in instance_objects + instance_parents: + bpy.data.objects.remove(obj) + bpy.data.collections.remove(collection) + + return True + + +class CacheModelLoader(pype.blender.AssetLoader): + """Load cache models. + + Stores the imported asset in a collection named after the asset. + + Note: + At least for now it only supports Alembic files. + """ + + families = ["model"] + representations = ["abc"] + + label = "Link Model" + icon = "code-fork" + color = "orange" + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + raise NotImplementedError("Loading of Alembic files is not yet implemented.") + # TODO (jasper): implement Alembic import. + + libpath = self.fname + asset = context["asset"]["name"] + subset = context["subset"]["name"] + # TODO (jasper): evaluate use of namespace which is 'alien' to Blender. + lib_container = container_name = ( + pype.blender.plugin.model_name(asset, subset, namespace) + ) + relative = bpy.context.preferences.filepaths.use_relative_paths + + with bpy.data.libraries.load( + libpath, link=True, relative=relative + ) as (data_from, data_to): + data_to.collections = [lib_container] + + scene = bpy.context.scene + instance_empty = bpy.data.objects.new( + container_name, None + ) + scene.collection.objects.link(instance_empty) + instance_empty.instance_type = 'COLLECTION' + collection = bpy.data.collections[lib_container] + collection.name = container_name + instance_empty.instance_collection = collection + + nodes = list(collection.objects) + nodes.append(collection) + nodes.append(instance_empty) + self[:] = nodes + return nodes diff --git a/pype/plugins/blender/publish/collect_current_file.py b/pype/plugins/blender/publish/collect_current_file.py new file mode 100644 index 0000000000..a097c72047 --- /dev/null +++ b/pype/plugins/blender/publish/collect_current_file.py @@ -0,0 +1,16 @@ +import bpy + +import pyblish.api + + +class CollectBlenderCurrentFile(pyblish.api.ContextPlugin): + """Inject the current working file into context""" + + order = pyblish.api.CollectorOrder - 0.5 + label = "Blender Current File" + hosts = ['blender'] + + def process(self, context): + """Inject the current working file""" + current_file = bpy.data.filepath + context.data['currentFile'] = current_file diff --git a/pype/plugins/blender/publish/collect_model.py b/pype/plugins/blender/publish/collect_model.py new file mode 100644 index 0000000000..ee10eaf7f2 --- /dev/null +++ b/pype/plugins/blender/publish/collect_model.py @@ -0,0 +1,53 @@ +import typing +from typing import Generator + +import bpy + +import avalon.api +import pyblish.api +from avalon.blender.pipeline import AVALON_PROPERTY + + +class CollectModel(pyblish.api.ContextPlugin): + """Collect the data of a model.""" + + hosts = ["blender"] + label = "Collect Model" + order = pyblish.api.CollectorOrder + + @staticmethod + def get_model_collections() -> Generator: + """Return all 'model' collections. + + Check if the family is 'model' and if it doesn't have the + representation set. If the representation is set, it is a loaded model + and we don't want to publish it. + """ + for collection in bpy.data.collections: + avalon_prop = collection.get(AVALON_PROPERTY) or dict() + if (avalon_prop.get('family') == 'model' + and not avalon_prop.get('representation')): + yield collection + + def process(self, context): + """Collect the models from the current Blender scene.""" + collections = self.get_model_collections() + for collection in collections: + avalon_prop = collection[AVALON_PROPERTY] + asset = avalon_prop['asset'] + family = avalon_prop['family'] + subset = avalon_prop['subset'] + task = avalon_prop['task'] + name = f"{asset}_{subset}" + instance = context.create_instance( + name=name, + family=family, + families=[family], + subset=subset, + asset=asset, + task=task, + ) + members = list(collection.objects) + members.append(collection) + instance[:] = members + self.log.debug(instance.data) diff --git a/pype/plugins/blender/publish/extract_model.py b/pype/plugins/blender/publish/extract_model.py new file mode 100644 index 0000000000..501c4d9d5c --- /dev/null +++ b/pype/plugins/blender/publish/extract_model.py @@ -0,0 +1,47 @@ +import os +import avalon.blender.workio + +import pype.api + + +class ExtractModel(pype.api.Extractor): + """Extract as model.""" + + label = "Model" + hosts = ["blender"] + families = ["model"] + optional = True + + def process(self, instance): + # Define extract output file path + + stagingdir = self.staging_dir(instance) + filename = f"{instance.name}.blend" + filepath = os.path.join(stagingdir, filename) + + # Perform extraction + self.log.info("Performing extraction..") + + # Just save the file to a temporary location. At least for now it's no + # problem to have (possibly) extra stuff in the file. + avalon.blender.workio.save_file(filepath, copy=True) + # + # # Store reference for integration + # if "files" not in instance.data: + # instance.data["files"] = list() + # + # # instance.data["files"].append(filename) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'blend', + 'ext': 'blend', + 'files': filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + + + self.log.info("Extracted instance '%s' to: %s", instance.name, representation) diff --git a/pype/plugins/blender/publish/validate_mesh_has_uv.py b/pype/plugins/blender/publish/validate_mesh_has_uv.py new file mode 100644 index 0000000000..b71a40ad8f --- /dev/null +++ b/pype/plugins/blender/publish/validate_mesh_has_uv.py @@ -0,0 +1,49 @@ +from typing import List + +import bpy + +import pyblish.api +import pype.blender.action + + +class ValidateMeshHasUvs(pyblish.api.InstancePlugin): + """Validate that the current mesh has UV's.""" + + order = pyblish.api.ValidatorOrder + hosts = ["blender"] + families = ["model"] + category = "geometry" + label = "Mesh Has UV's" + actions = [pype.blender.action.SelectInvalidAction] + optional = True + + @staticmethod + def has_uvs(obj: bpy.types.Object) -> bool: + """Check if an object has uv's.""" + if not obj.data.uv_layers: + return False + for uv_layer in obj.data.uv_layers: + for polygon in obj.data.polygons: + for loop_index in polygon.loop_indices: + if not uv_layer.data[loop_index].uv: + return False + + return True + + @classmethod + def get_invalid(cls, instance) -> List: + invalid = [] + # TODO (jasper): only check objects in the collection that will be published? + for obj in [ + obj for obj in bpy.data.objects if obj.type == 'MESH' + ]: + # Make sure we are in object mode. + bpy.ops.object.mode_set(mode='OBJECT') + if not cls.has_uvs(obj): + invalid.append(obj) + return invalid + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError(f"Meshes found in instance without valid UV's: {invalid}") diff --git a/pype/plugins/blender/publish/validate_mesh_no_negative_scale.py b/pype/plugins/blender/publish/validate_mesh_no_negative_scale.py new file mode 100644 index 0000000000..7e3b38dd19 --- /dev/null +++ b/pype/plugins/blender/publish/validate_mesh_no_negative_scale.py @@ -0,0 +1,35 @@ +from typing import List + +import bpy + +import pyblish.api +import pype.blender.action + + +class ValidateMeshNoNegativeScale(pyblish.api.Validator): + """Ensure that meshes don't have a negative scale.""" + + order = pyblish.api.ValidatorOrder + hosts = ["blender"] + families = ["model"] + label = "Mesh No Negative Scale" + actions = [pype.blender.action.SelectInvalidAction] + + @staticmethod + def get_invalid(instance) -> List: + invalid = [] + # TODO (jasper): only check objects in the collection that will be published? + for obj in [ + obj for obj in bpy.data.objects if obj.type == 'MESH' + ]: + if any(v < 0 for v in obj.scale): + invalid.append(obj) + + return invalid + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError( + f"Meshes found in instance with negative scale: {invalid}" + ) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_comments.py b/pype/plugins/ftrack/_unused_publish/integrate_ftrack_comments.py similarity index 92% rename from pype/plugins/ftrack/publish/integrate_ftrack_comments.py rename to pype/plugins/ftrack/_unused_publish/integrate_ftrack_comments.py index 9d0b7b3ab9..4be9f7fc3a 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_comments.py +++ b/pype/plugins/ftrack/_unused_publish/integrate_ftrack_comments.py @@ -7,8 +7,9 @@ class IntegrateFtrackComments(pyblish.api.InstancePlugin): """Create comments in Ftrack.""" order = pyblish.api.IntegratorOrder - label = "Integrate Comments to Ftrack." + label = "Integrate Comments to Ftrack" families = ["shot"] + enabled = False def process(self, instance): session = instance.context.data["ftrackSession"] diff --git a/pype/plugins/ftrack/publish/collect_ftrack_api.py b/pype/plugins/ftrack/publish/collect_ftrack_api.py index d09baec676..f79d74453b 100644 --- a/pype/plugins/ftrack/publish/collect_ftrack_api.py +++ b/pype/plugins/ftrack/publish/collect_ftrack_api.py @@ -23,25 +23,43 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): # Collect session session = ftrack_api.Session() + self.log.debug("Ftrack user: \"{0}\"".format(session.api_user)) context.data["ftrackSession"] = session # Collect task - project = os.environ.get('AVALON_PROJECT', '') - asset = os.environ.get('AVALON_ASSET', '') - task = os.environ.get('AVALON_TASK', None) - self.log.debug(task) + project_name = os.environ.get('AVALON_PROJECT', '') + asset_name = os.environ.get('AVALON_ASSET', '') + task_name = os.environ.get('AVALON_TASK', None) + + # Find project entity + project_query = 'Project where full_name is "{0}"'.format(project_name) + self.log.debug("Project query: < {0} >".format(project_query)) + project_entity = session.query(project_query).one() + self.log.debug("Project found: {0}".format(project_entity)) + + # Find asset entity + entity_query = ( + 'TypedContext where project_id is "{0}"' + ' and name is "{1}"' + ).format(project_entity["id"], asset_name) + self.log.debug("Asset entity query: < {0} >".format(entity_query)) + asset_entity = session.query(entity_query).one() + self.log.debug("Asset found: {0}".format(asset_entity)) + + # Find task entity if task is set + if task_name: + task_query = ( + 'Task where name is "{0}" and parent_id is "{1}"' + ).format(task_name, asset_entity["id"]) + self.log.debug("Task entity query: < {0} >".format(task_query)) + task_entity = session.query(task_query).one() + self.log.debug("Task entity found: {0}".format(task_entity)) - if task: - result = session.query('Task where\ - project.full_name is "{0}" and\ - name is "{1}" and\ - parent.name is "{2}"'.format(project, task, asset)).one() - context.data["ftrackTask"] = result else: - result = session.query('TypedContext where\ - project.full_name is "{0}" and\ - name is "{1}"'.format(project, asset)).one() - context.data["ftrackEntity"] = result + task_entity = None + self.log.warning("Task name is not set.") - self.log.info(result) + context.data["ftrackProject"] = asset_entity + context.data["ftrackEntity"] = asset_entity + context.data["ftrackTask"] = task_entity diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_api.py b/pype/plugins/ftrack/publish/integrate_ftrack_api.py index 9fe4fddebf..cd94b2a150 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_api.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_api.py @@ -77,6 +77,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): info_msg = "Created new {entity_type} with data: {data}" info_msg += ", metadata: {metadata}." + used_asset_versions = [] # Iterate over components and publish for data in instance.data.get("ftrackComponentsList", []): @@ -144,8 +145,14 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): "version": 0, "asset": asset_entity, } - - assetversion_data.update(data.get("assetversion_data", {})) + _assetversion_data = data.get("assetversion_data", {}) + assetversion_cust_attrs = _assetversion_data.pop( + "custom_attributes", {} + ) + asset_version_comment = _assetversion_data.pop( + "comment", None + ) + assetversion_data.update(_assetversion_data) assetversion_entity = session.query( self.query("AssetVersion", assetversion_data) @@ -182,6 +189,36 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): existing_assetversion_metadata.update(assetversion_metadata) assetversion_entity["metadata"] = existing_assetversion_metadata + # Add comment + if asset_version_comment: + assetversion_entity["comment"] = asset_version_comment + try: + session.commit() + except Exception: + session.rollback() + self.log.warning(( + "Comment was not possible to set for AssetVersion" + "\"{0}\". Can't set it's value to: \"{1}\"" + ).format( + assetversion_entity["id"], str(asset_version_comment) + )) + + # Adding Custom Attributes + for attr, val in assetversion_cust_attrs.items(): + if attr in assetversion_entity["custom_attributes"]: + try: + assetversion_entity["custom_attributes"][attr] = val + session.commit() + continue + except Exception: + session.rollback() + + self.log.warning(( + "Custom Attrubute \"{0}\"" + " is not available for AssetVersion <{1}>." + " Can't set it's value to: \"{2}\"" + ).format(attr, assetversion_entity["id"], str(val))) + # Have to commit the version and asset, because location can't # determine the final location without. try: @@ -350,3 +387,14 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): tp, value, tb = sys.exc_info() session.rollback() six.reraise(tp, value, tb) + + if assetversion_entity not in used_asset_versions: + used_asset_versions.append(assetversion_entity) + + asset_versions_key = "ftrackIntegratedAssetVersions" + if asset_versions_key not in instance.data: + instance.data[asset_versions_key] = [] + + for asset_version in used_asset_versions: + if asset_version not in instance.data[asset_versions_key]: + instance.data[asset_versions_key].append(asset_version) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py index f504a52f9e..78583b0a2f 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py @@ -28,7 +28,8 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): 'plate': 'img', 'audio': 'audio', 'workfile': 'scene', - 'animation': 'cache' + 'animation': 'cache', + 'image': 'img' } def process(self, instance): @@ -115,6 +116,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): }, "assetversion_data": { "version": version_number, + "comment": instance.context.data.get("comment", "") }, "component_data": component_data, "component_path": comp['published_path'], @@ -123,6 +125,16 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin): "thumbnail": comp['thumbnail'] } + # Add custom attributes for AssetVersion + assetversion_cust_attrs = {} + intent_val = instance.context.data.get("intent") + if intent_val: + assetversion_cust_attrs["intent"] = intent_val + + component_item["assetversion_data"]["custom_attributes"] = ( + assetversion_cust_attrs + ) + componentList.append(component_item) # Create copy with ftrack.unmanaged location if thumb or prev if comp.get('thumbnail') or comp.get('preview') \ diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py new file mode 100644 index 0000000000..f7fb5addbb --- /dev/null +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -0,0 +1,51 @@ +import sys +import pyblish.api +import six + + +class IntegrateFtrackNote(pyblish.api.InstancePlugin): + """Create comments in Ftrack.""" + + # Must be after integrate asset new + order = pyblish.api.IntegratorOrder + 0.4999 + label = "Integrate Ftrack note" + families = ["ftrack"] + optional = True + + def process(self, instance): + comment = (instance.context.data.get("comment") or "").strip() + if not comment: + self.log.info("Comment is not set.") + return + + self.log.debug("Comment is set to {}".format(comment)) + + asset_versions_key = "ftrackIntegratedAssetVersions" + asset_versions = instance.data.get(asset_versions_key) + if not asset_versions: + self.log.info("There are any integrated AssetVersions") + return + + session = instance.context.data["ftrackSession"] + user = session.query( + "User where username is \"{}\"".format(session.api_user) + ).first() + if not user: + self.log.warning( + "Was not able to query current User {}".format( + session.api_user + ) + ) + + for asset_version in asset_versions: + asset_version.create_note(comment, author=user) + + try: + session.commit() + self.log.debug("Note added to AssetVersion \"{}\"".format( + str(asset_version) + )) + except Exception: + tp, value, tb = sys.exc_info() + session.rollback() + six.reraise(tp, value, tb) diff --git a/pype/plugins/ftrack/publish/integrate_remove_components.py b/pype/plugins/ftrack/publish/integrate_remove_components.py index bad50f7200..26cac0f1ae 100644 --- a/pype/plugins/ftrack/publish/integrate_remove_components.py +++ b/pype/plugins/ftrack/publish/integrate_remove_components.py @@ -11,13 +11,13 @@ class IntegrateCleanComponentData(pyblish.api.InstancePlugin): label = 'Clean component data' families = ["ftrack"] optional = True - active = True + active = False def process(self, instance): for comp in instance.data['representations']: self.log.debug('component {}'.format(comp)) - + if "%" in comp['published_path'] or "#" in comp['published_path']: continue diff --git a/pype/plugins/global/publish/collect_comment.py b/pype/plugins/global/publish/collect_comment.py index 22970665a1..062142ace9 100644 --- a/pype/plugins/global/publish/collect_comment.py +++ b/pype/plugins/global/publish/collect_comment.py @@ -15,4 +15,5 @@ class CollectComment(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder def process(self, context): - context.data["comment"] = "" + comment = (context.data.get("comment") or "").strip() + context.data["comment"] = comment diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 39481e216b..6c06229304 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -12,7 +12,6 @@ import os import re import copy import json -from pprint import pformat import pyblish.api from avalon import api @@ -54,10 +53,6 @@ def collect(root, patterns=[pattern], minimum_items=1) - # Ignore any remainders - if remainder: - print("Skipping remainder {}".format(remainder)) - # Exclude any frames outside start and end frame. for collection in collections: for index in list(collection.indexes): @@ -71,7 +66,7 @@ def collect(root, # Keep only collections that have at least a single frame collections = [c for c in collections if c.indexes] - return collections + return collections, remainder class CollectRenderedFrames(pyblish.api.ContextPlugin): @@ -95,11 +90,22 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): """ - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.0001 targets = ["filesequence"] label = "RenderedFrames" def process(self, context): + pixel_aspect = 1 + resolution_width = 1920 + resolution_height = 1080 + lut_path = None + slate_frame = None + families_data = None + baked_mov_path = None + subset = None + version = None + frame_start = 0 + frame_end = 0 if os.environ.get("PYPE_PUBLISH_PATHS"): paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep) self.log.info("Collecting paths: {}".format(paths)) @@ -117,12 +123,18 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): try: data = json.load(f) except Exception as exc: - self.log.error("Error loading json: " - "{} - Exception: {}".format(path, exc)) + self.log.error( + "Error loading json: " + "{} - Exception: {}".format(path, exc) + ) raise cwd = os.path.dirname(path) root_override = data.get("root") + frame_start = int(data.get("frameStart")) + frame_end = int(data.get("frameEnd")) + subset = data.get("subset") + if root_override: if os.path.isabs(root_override): root = root_override @@ -144,6 +156,18 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): self.log.info("setting session using metadata") api.Session.update(session) os.environ.update(session) + instance = metadata.get("instance") + if instance: + instance_family = instance.get("family") + pixel_aspect = instance.get("pixelAspect", 1) + resolution_width = instance.get("resolutionWidth", 1920) + resolution_height = instance.get("resolutionHeight", 1080) + lut_path = instance.get("lutPath", None) + baked_mov_path = instance.get("bakeRenderPath") + families_data = instance.get("families") + slate_frame = instance.get("slateFrame") + version = instance.get("version") + else: # Search in directory @@ -151,88 +175,279 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): root = path self.log.info("Collecting: {}".format(root)) + regex = data.get("regex") + if baked_mov_path: + regex = "^{}.*$".format(subset) + if regex: self.log.info("Using regex: {}".format(regex)) - collections = collect(root=root, - regex=regex, - exclude_regex=data.get("exclude_regex"), - frame_start=data.get("frameStart"), - frame_end=data.get("frameEnd")) + if "slate" in families_data: + frame_start -= 1 + + collections, remainder = collect( + root=root, + regex=regex, + exclude_regex=data.get("exclude_regex"), + frame_start=frame_start, + frame_end=frame_end, + ) self.log.info("Found collections: {}".format(collections)) - - if data.get("subset"): - # If subset is provided for this json then it must be a single - # collection. - if len(collections) > 1: - self.log.error("Forced subset can only work with a single " - "found sequence") - raise RuntimeError("Invalid sequence") + self.log.info("Found remainder: {}".format(remainder)) fps = data.get("fps", 25) + # adding publish comment and intent to context + context.data["comment"] = data.get("comment", "") + context.data["intent"] = data.get("intent", "") + + if data.get("user"): + context.data["user"] = data["user"] + + if data.get("version"): + version = data.get("version") + # Get family from the data families = data.get("families", ["render"]) if "render" not in families: families.append("render") if "ftrack" not in families: families.append("ftrack") - if "review" not in families: - families.append("review") + if "write" in instance_family: + families.append("write") + if families_data and "slate" in families_data: + families.append("slate") - for collection in collections: - instance = context.create_instance(str(collection)) - self.log.info("Collection: %s" % list(collection)) + if data.get("attachTo"): + # we need to attach found collections to existing + # subset version as review represenation. - # Ensure each instance gets a unique reference to the data + for attach in data.get("attachTo"): + self.log.info( + "Attaching render {}:v{}".format( + attach["subset"], attach["version"])) + instance = context.create_instance( + attach["subset"]) + instance.data.update( + { + "name": attach["subset"], + "version": attach["version"], + "family": 'review', + "families": ['review', 'ftrack'], + "asset": data.get( + "asset", api.Session["AVALON_ASSET"]), + "stagingDir": root, + "frameStart": frame_start, + "frameEnd": frame_end, + "fps": fps, + "source": data.get("source", ""), + "pixelAspect": pixel_aspect, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height + }) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + for collection in collections: + self.log.info( + " - adding representation: {}".format( + str(collection)) + ) + ext = collection.tail.lstrip(".") + + representation = { + "name": ext, + "ext": "{}".format(ext), + "files": list(collection), + "stagingDir": root, + "anatomy_template": "render", + "fps": fps, + "tags": ["review"], + } + instance.data["representations"].append( + representation) + + elif subset: + # if we have subset - add all collections and known + # reminder as representations + + # take out review family if mov path + # this will make imagesequence none review + + if baked_mov_path: + self.log.info( + "Baked mov is available {}".format( + baked_mov_path)) + families.append("review") + + if session['AVALON_APP'] == "maya": + families.append("review") + + self.log.info( + "Adding representations to subset {}".format( + subset)) + + instance = context.create_instance(subset) data = copy.deepcopy(data) - # If no subset provided, get it from collection's head - subset = data.get("subset", collection.head.rstrip("_. ")) - - # If no start or end frame provided, get it from collection - indices = list(collection.indexes) - start = data.get("frameStart", indices[0]) - end = data.get("frameEnd", indices[-1]) - - # root = os.path.normpath(root) - # self.log.info("Source: {}}".format(data.get("source", ""))) - - ext = list(collection)[0].split('.')[-1] - - instance.data.update({ - "name": str(collection), - "family": families[0], # backwards compatibility / pyblish - "families": list(families), - "subset": subset, - "asset": data.get("asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, - "frameStart": start, - "frameEnd": end, - "fps": fps, - "source": data.get('source', '') - }) - instance.append(collection) - instance.context.data['fps'] = fps + instance.data.update( + { + "name": subset, + "family": families[0], + "families": list(families), + "subset": subset, + "asset": data.get( + "asset", api.Session["AVALON_ASSET"]), + "stagingDir": root, + "frameStart": frame_start, + "frameEnd": frame_end, + "fps": fps, + "source": data.get("source", ""), + "pixelAspect": pixel_aspect, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "slateFrame": slate_frame, + "version": version + } + ) if "representations" not in instance.data: instance.data["representations"] = [] - representation = { - 'name': ext, - 'ext': '{}'.format(ext), - 'files': list(collection), - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ['review'] - } - instance.data["representations"].append(representation) + for collection in collections: + self.log.info(" - {}".format(str(collection))) - if data.get('user'): - context.data["user"] = data['user'] + ext = collection.tail.lstrip(".") - self.log.debug("Collected instance:\n" - "{}".format(pformat(instance.data))) + if "slate" in instance.data["families"]: + frame_start += 1 + + representation = { + "name": ext, + "ext": "{}".format(ext), + "files": list(collection), + "frameStart": frame_start, + "frameEnd": frame_end, + "stagingDir": root, + "anatomy_template": "render", + "fps": fps, + "tags": ["review"] if not baked_mov_path else [], + } + instance.data["representations"].append( + representation) + + # filter out only relevant mov in case baked available + self.log.debug("__ remainder {}".format(remainder)) + if baked_mov_path: + remainder = [r for r in remainder + if r in baked_mov_path] + self.log.debug("__ remainder {}".format(remainder)) + + # process reminders + for rem in remainder: + # add only known types to representation + if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']: + self.log.info(" . {}".format(rem)) + + if "slate" in instance.data["families"]: + frame_start += 1 + + tags = ["review"] + + if baked_mov_path: + tags.append("delete") + + representation = { + "name": rem.split(".")[-1], + "ext": "{}".format(rem.split(".")[-1]), + "files": rem, + "stagingDir": root, + "frameStart": frame_start, + "anatomy_template": "render", + "fps": fps, + "tags": tags + } + instance.data["representations"].append( + representation) + + else: + # we have no subset so we take every collection and create one + # from it + for collection in collections: + instance = context.create_instance(str(collection)) + self.log.info("Creating subset from: %s" % str(collection)) + + # Ensure each instance gets a unique reference to the data + data = copy.deepcopy(data) + + # If no subset provided, get it from collection's head + subset = data.get("subset", collection.head.rstrip("_. ")) + + # If no start or end frame provided, get it from collection + indices = list(collection.indexes) + start = data.get("frameStart", indices[0]) + end = data.get("frameEnd", indices[-1]) + + ext = list(collection)[0].split(".")[-1] + + if "review" not in families: + families.append("review") + + instance.data.update( + { + "name": str(collection), + "family": families[0], # backwards compatibility + "families": list(families), + "subset": subset, + "asset": data.get( + "asset", api.Session["AVALON_ASSET"]), + "stagingDir": root, + "frameStart": start, + "frameEnd": end, + "fps": fps, + "source": data.get("source", ""), + "pixelAspect": pixel_aspect, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "version": version + } + ) + if lut_path: + instance.data.update({"lutPath": lut_path}) + + instance.append(collection) + instance.context.data["fps"] = fps + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + "name": ext, + "ext": "{}".format(ext), + "files": list(collection), + "frameStart": start, + "frameEnd": end, + "stagingDir": root, + "anatomy_template": "render", + "fps": fps, + "tags": ["review"], + } + instance.data["representations"].append(representation) + + # temporary ... allow only beauty on ftrack + if session['AVALON_APP'] == "maya": + AOV_filter = ['beauty'] + for aov in AOV_filter: + if aov not in instance.data['subset']: + instance.data['families'].remove('review') + instance.data['families'].remove('ftrack') + representation["tags"].remove('review') + + self.log.debug( + "__ representations {}".format( + instance.data["representations"])) + self.log.debug( + "__ instance.data {}".format(instance.data)) diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py index b80ca4ae1b..383944e293 100644 --- a/pype/plugins/global/publish/collect_templates.py +++ b/pype/plugins/global/publish/collect_templates.py @@ -31,32 +31,44 @@ class CollectTemplates(pyblish.api.InstancePlugin): asset_name = instance.data["asset"] project_name = api.Session["AVALON_PROJECT"] - project = io.find_one({"type": "project", - "name": project_name}, - projection={"config": True, "data": True}) + project = io.find_one( + { + "type": "project", + "name": project_name + }, + projection={"config": True, "data": True} + ) template = project["config"]["template"]["publish"] anatomy = instance.context.data['anatomy'] - asset = io.find_one({"type": "asset", - "name": asset_name, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project["_id"] + }) assert asset, ("No asset found by the name '{}' " "in project '{}'".format(asset_name, project_name)) silo = asset.get('silo') - subset = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset["_id"]}) + subset = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset["_id"] + }) # assume there is no version yet, we start at `1` version = None version_number = 1 if subset is not None: - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) # if there is a subset there ought to be version if version is not None: @@ -75,8 +87,19 @@ class CollectTemplates(pyblish.api.InstancePlugin): "asset": asset_name, "subset": subset_name, "version": version_number, - "hierarchy": hierarchy, - "representation": "TEMP"} + "hierarchy": hierarchy.replace("\\", "/"), + "representation": "TEMP")} + + resolution_width = instance.data.get("resolutionWidth") + resolution_height = instance.data.get("resolutionHeight") + fps = instance.data.get("fps") + + if resolution_width: + template_data["resolution_width"] = resolution_width + if resolution_width: + template_data["resolution_height"] = resolution_height + if resolution_width: + template_data["fps"] = fps instance.data["template"] = template instance.data["assumedTemplateData"] = template_data @@ -85,3 +108,6 @@ class CollectTemplates(pyblish.api.InstancePlugin): instance.data["assumedDestination"] = os.path.dirname( (anatomy.format(template_data))["publish"]["path"] ) + self.log.info("Assumed Destination has been created...") + self.log.debug("__ assumedTemplateData: `{}`".format(instance.data["assumedTemplateData"])) + self.log.debug("__ template: `{}`".format(instance.data["template"])) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index b1569aaa45..8a96e66d27 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -1,8 +1,10 @@ import os import json +import copy import pype.api import pyblish +from pypeapp import config class ExtractBurnin(pype.api.Extractor): @@ -24,18 +26,50 @@ class ExtractBurnin(pype.api.Extractor): if "representations" not in instance.data: raise RuntimeError("Burnin needs already created mov to work on.") - # TODO: expand burnin data list to include all usefull keys - version = '' - if instance.context.data.get('version'): - version = "v" + str(instance.context.data['version']) + version = instance.context.data.get( + 'version', instance.data.get('version')) + frame_start = int(instance.data.get("frameStart") or 0) + frame_end = int(instance.data.get("frameEnd") or 1) + duration = frame_end - frame_start + 1 prep_data = { "username": instance.context.data['user'], "asset": os.environ['AVALON_ASSET'], "task": os.environ['AVALON_TASK'], - "start_frame": int(instance.data["frameStart"]), - "version": version + "frame_start": frame_start, + "frame_end": frame_end, + "duration": duration, + "version": int(version), + "comment": instance.context.data.get("comment", ""), + "intent": instance.context.data.get("intent", "") } + + # Add datetime data to preparation data + prep_data.update(config.get_datetime_data()) + + slate_frame_start = frame_start + slate_frame_end = frame_end + slate_duration = duration + + # exception for slate workflow + if "slate" in instance.data["families"]: + slate_frame_start = frame_start - 1 + slate_frame_end = frame_end + slate_duration = slate_frame_end - slate_frame_start + 1 + + prep_data.update({ + "slate_frame_start": slate_frame_start, + "slate_frame_end": slate_frame_end, + "slate_duration": slate_duration + }) + + # Update data with template data + template_data = instance.data.get("assumedTemplateData") or {} + prep_data.update(template_data) + + # get anatomy project + anatomy = instance.context.data['anatomy'] + self.log.debug("__ prep_data: {}".format(prep_data)) for i, repre in enumerate(instance.data["representations"]): self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre)) @@ -47,16 +81,28 @@ class ExtractBurnin(pype.api.Extractor): filename = "{0}".format(repre["files"]) name = "_burnin" - movieFileBurnin = filename.replace(".mov", "") + name + ".mov" + ext = os.path.splitext(filename)[1] + movieFileBurnin = filename.replace(ext, "") + name + ext - full_movie_path = os.path.join(os.path.normpath(stagingdir), repre["files"]) - full_burnin_path = os.path.join(os.path.normpath(stagingdir), movieFileBurnin) + full_movie_path = os.path.join( + os.path.normpath(stagingdir), repre["files"] + ) + full_burnin_path = os.path.join( + os.path.normpath(stagingdir), movieFileBurnin + ) self.log.debug("__ full_burnin_path: {}".format(full_burnin_path)) + # create copy of prep_data for anatomy formatting + _prep_data = copy.deepcopy(prep_data) + _prep_data["representation"] = repre["name"] + filled_anatomy = anatomy.format_all(_prep_data) + _prep_data["anatomy"] = filled_anatomy.get_solved() + burnin_data = { "input": full_movie_path.replace("\\", "/"), + "codec": repre.get("codec", []), "output": full_burnin_path.replace("\\", "/"), - "burnin_data": prep_data + "burnin_data": _prep_data } self.log.debug("__ burnin_data2: {}".format(burnin_data)) diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 8a1a0b5e68..4978649ba2 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -6,7 +6,7 @@ import pype.api class ExtractJpegEXR(pyblish.api.InstancePlugin): - """Resolve any dependency issies + """Resolve any dependency issues This plug-in resolves any paths which, if not updated might break the published file. @@ -20,6 +20,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): hosts = ["shell"] order = pyblish.api.ExtractorOrder families = ["imagesequence", "render", "write", "source"] + enabled = False def process(self, instance): start = instance.data.get("frameStart") @@ -28,51 +29,74 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): collected_frames = os.listdir(stagingdir) collections, remainder = clique.assemble(collected_frames) - input_file = ( - collections[0].format('{head}{padding}{tail}') % start - ) - full_input_path = os.path.join(stagingdir, input_file) - self.log.info("input {}".format(full_input_path)) + self.log.info("subset {}".format(instance.data['subset'])) + if 'crypto' in instance.data['subset']: + return - filename = collections[0].format('{head}') - if not filename.endswith('.'): - filename += "." - jpegFile = filename + "jpg" - full_output_path = os.path.join(stagingdir, jpegFile) + # get representation and loop them + representations = instance.data["representations"] - self.log.info("output {}".format(full_output_path)) + # filter out mov and img sequences + representations_new = representations[:] - config_data = instance.context.data['output_repre_config'] + for repre in representations: + self.log.debug(repre) + if 'review' not in repre['tags']: + return - proj_name = os.environ.get('AVALON_PROJECT', '__default__') - profile = config_data.get(proj_name, config_data['__default__']) + input_file = repre['files'][0] - jpeg_items = [] - jpeg_items.append( - os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) - # override file if already exists - jpeg_items.append("-y") - # use same input args like with mov - jpeg_items.extend(profile.get('input', [])) - # input file - jpeg_items.append("-i {}".format(full_input_path)) - # output file - jpeg_items.append(full_output_path) + # input_file = ( + # collections[0].format('{head}{padding}{tail}') % start + # ) + full_input_path = os.path.join(stagingdir, input_file) + self.log.info("input {}".format(full_input_path)) - subprocess_jpeg = " ".join(jpeg_items) + filename = os.path.splitext(input_file)[0] + if not filename.endswith('.'): + filename += "." + jpeg_file = filename + "jpg" + full_output_path = os.path.join(stagingdir, jpeg_file) - # run subprocess - self.log.debug("{}".format(subprocess_jpeg)) - pype.api.subprocess(subprocess_jpeg) + self.log.info("output {}".format(full_output_path)) - if "representations" not in instance.data: - instance.data["representations"] = [] + config_data = instance.context.data['output_repre_config'] - representation = { - 'name': 'jpg', - 'ext': 'jpg', - 'files': jpegFile, - "stagingDir": stagingdir, - "thumbnail": True - } - instance.data["representations"].append(representation) + proj_name = os.environ.get('AVALON_PROJECT', '__default__') + profile = config_data.get(proj_name, config_data['__default__']) + + jpeg_items = [] + jpeg_items.append( + os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) + # override file if already exists + jpeg_items.append("-y") + # use same input args like with mov + jpeg_items.extend(profile.get('input', [])) + # input file + jpeg_items.append("-i {}".format(full_input_path)) + # output file + jpeg_items.append(full_output_path) + + subprocess_jpeg = " ".join(jpeg_items) + + # run subprocess + self.log.debug("{}".format(subprocess_jpeg)) + pype.api.subprocess(subprocess_jpeg) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'thumbnail', + 'ext': 'jpg', + 'files': jpeg_file, + "stagingDir": stagingdir, + "thumbnail": True, + "tags": ['thumbnail'] + } + + # adding representation + self.log.debug("Adding: {}".format(representation)) + representations_new.append(representation) + + instance.data["representations"] = representations_new diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index de167710a5..a11f681e61 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -1,9 +1,7 @@ import os - import pyblish.api import clique import pype.api -from pypeapp import config class ExtractReview(pyblish.api.InstancePlugin): @@ -22,27 +20,35 @@ class ExtractReview(pyblish.api.InstancePlugin): families = ["review"] hosts = ["nuke", "maya", "shell"] + outputs = {} + ext_filter = [] + def process(self, instance): - # adding plugin attributes from presets - publish_presets = config.get_presets()["plugins"]["global"]["publish"] - plugin_attrs = publish_presets[self.__class__.__name__] - output_profiles = plugin_attrs.get("outputs", {}) + to_width = 1920 + to_height = 1080 + + output_profiles = self.outputs or {} inst_data = instance.data fps = inst_data.get("fps") start_frame = inst_data.get("frameStart") - - self.log.debug("Families In: `{}`".format(instance.data["families"])) + resolution_width = inst_data.get("resolutionWidth", to_width) + resolution_height = inst_data.get("resolutionHeight", to_height) + pixel_aspect = inst_data.get("pixelAspect", 1) + self.log.debug("Families In: `{}`".format(inst_data["families"])) # get representation and loop them - representations = instance.data["representations"] + representations = inst_data["representations"] # filter out mov and img sequences representations_new = representations[:] for repre in representations: - if repre['ext'] in plugin_attrs["ext_filter"]: + if repre['ext'] in self.ext_filter: tags = repre.get("tags", []) + if "thumbnail" in tags: + continue + self.log.info("Try repre: {}".format(repre)) if "review" in tags: @@ -54,10 +60,14 @@ class ExtractReview(pyblish.api.InstancePlugin): if not ext: ext = "mov" self.log.warning( - "`ext` attribute not in output profile. Setting to default ext: `mov`") + str("`ext` attribute not in output " + "profile. Setting to default ext: `mov`")) - self.log.debug("instance.families: {}".format(instance.data['families'])) - self.log.debug("profile.families: {}".format(profile['families'])) + self.log.debug( + "instance.families: {}".format( + instance.data['families'])) + self.log.debug( + "profile.families: {}".format(profile['families'])) if any(item in instance.data['families'] for item in profile['families']): if isinstance(repre["files"], list): @@ -92,8 +102,9 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.info("p_tags: `{}`".format(p_tags)) # add families [instance.data["families"].append(t) - for t in p_tags - if t not in instance.data["families"]] + for t in p_tags + if t not in instance.data["families"]] + # add to [new_tags.append(t) for t in p_tags if t not in new_tags] @@ -111,8 +122,9 @@ class ExtractReview(pyblish.api.InstancePlugin): # necessary input data # adds start arg only if image sequence if isinstance(repre["files"], list): - input_args.append("-start_number {0} -framerate {1}".format( - start_frame, fps)) + input_args.append( + "-start_number {0} -framerate {1}".format( + start_frame, fps)) input_args.append("-i {}".format(full_input_path)) @@ -147,21 +159,135 @@ class ExtractReview(pyblish.api.InstancePlugin): ) output_args = [] + codec_args = profile.get('codec', []) + output_args.extend(codec_args) # preset's output data output_args.extend(profile.get('output', [])) + # defining image ratios + resolution_ratio = float(resolution_width / ( + resolution_height * pixel_aspect)) + delivery_ratio = float(to_width) / float(to_height) + self.log.debug(resolution_ratio) + self.log.debug(delivery_ratio) + + # get scale factor + scale_factor = to_height / ( + resolution_height * pixel_aspect) + self.log.debug(scale_factor) + # letter_box - # TODO: add to documentation - lb = profile.get('letter_box', None) - if lb: - output_args.append( - "-filter:v drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black".format(lb)) + lb = profile.get('letter_box', 0) + if lb != 0: + ffmpet_width = to_width + ffmpet_height = to_height + if "reformat" not in p_tags: + lb /= pixel_aspect + if resolution_ratio != delivery_ratio: + ffmpet_width = resolution_width + ffmpet_height = int( + resolution_height * pixel_aspect) + else: + if resolution_ratio != delivery_ratio: + lb /= scale_factor + else: + lb /= pixel_aspect + + output_args.append(str( + "-filter:v scale={0}x{1}:flags=lanczos," + "setsar=1,drawbox=0:0:iw:" + "round((ih-(iw*(1/{2})))/2):t=fill:" + "c=black,drawbox=0:ih-round((ih-(iw*(" + "1/{2})))/2):iw:round((ih-(iw*(1/{2})))" + "/2):t=fill:c=black").format( + ffmpet_width, ffmpet_height, lb)) # In case audio is longer than video. output_args.append("-shortest") # output filename output_args.append(full_output_path) + + self.log.debug( + "__ pixel_aspect: `{}`".format(pixel_aspect)) + self.log.debug( + "__ resolution_width: `{}`".format( + resolution_width)) + self.log.debug( + "__ resolution_height: `{}`".format( + resolution_height)) + + # scaling none square pixels and 1920 width + if "reformat" in p_tags: + if resolution_ratio < delivery_ratio: + self.log.debug("lower then delivery") + width_scale = int(to_width * scale_factor) + width_half_pad = int(( + to_width - width_scale)/2) + height_scale = to_height + height_half_pad = 0 + else: + self.log.debug("heigher then delivery") + width_scale = to_width + width_half_pad = 0 + scale_factor = float(to_width) / float( + resolution_width) + self.log.debug(scale_factor) + height_scale = int( + resolution_height * scale_factor) + height_half_pad = int( + (to_height - height_scale)/2) + + self.log.debug( + "__ width_scale: `{}`".format(width_scale)) + self.log.debug( + "__ width_half_pad: `{}`".format( + width_half_pad)) + self.log.debug( + "__ height_scale: `{}`".format( + height_scale)) + self.log.debug( + "__ height_half_pad: `{}`".format( + height_half_pad)) + + scaling_arg = str( + "scale={0}x{1}:flags=lanczos," + "pad={2}:{3}:{4}:{5}:black,setsar=1" + ).format(width_scale, height_scale, + to_width, to_height, + width_half_pad, + height_half_pad + ) + + vf_back = self.add_video_filter_args( + output_args, scaling_arg) + # add it to output_args + output_args.insert(0, vf_back) + + # baking lut file application + lut_path = instance.data.get("lutPath") + if lut_path and ("bake-lut" in p_tags): + # removing Gama info as it is all baked in lut + gamma = next((g for g in input_args + if "-gamma" in g), None) + if gamma: + input_args.remove(gamma) + + # create lut argument + lut_arg = "lut3d=file='{}'".format( + lut_path.replace( + "\\", "/").replace(":/", "\\:/") + ) + lut_arg += ",colormatrix=bt601:bt709" + + vf_back = self.add_video_filter_args( + output_args, lut_arg) + # add it to output_args + output_args.insert(0, vf_back) + self.log.info("Added Lut to ffmpeg command") + self.log.debug( + "_ output_args: `{}`".format(output_args)) + mov_args = [ os.path.join( os.environ.get( @@ -183,7 +309,11 @@ class ExtractReview(pyblish.api.InstancePlugin): 'ext': ext, 'files': repr_file, "tags": new_tags, - "outputName": name + "outputName": name, + "codec": codec_args, + "_profile": profile, + "resolutionHeight": resolution_height, + "resolutionWidth": resolution_width, }) if repre_new.get('preview'): repre_new.pop("preview") @@ -207,3 +337,40 @@ class ExtractReview(pyblish.api.InstancePlugin): instance.data["representations"] = representations_new self.log.debug("Families Out: `{}`".format(instance.data["families"])) + + def add_video_filter_args(self, args, inserting_arg): + """ + Fixing video filter argumets to be one long string + + Args: + args (list): list of string arguments + inserting_arg (str): string argument we want to add + (without flag `-vf`) + + Returns: + str: long joined argument to be added back to list of arguments + + """ + # find all video format settings + vf_settings = [p for p in args + for v in ["-filter:v", "-vf"] + if v in p] + self.log.debug("_ vf_settings: `{}`".format(vf_settings)) + + # remove them from output args list + for p in vf_settings: + self.log.debug("_ remove p: `{}`".format(p)) + args.remove(p) + self.log.debug("_ args: `{}`".format(args)) + + # strip them from all flags + vf_fixed = [p.replace("-vf ", "").replace("-filter:v ", "") + for p in vf_settings] + + self.log.debug("_ vf_fixed: `{}`".format(vf_fixed)) + vf_fixed.insert(0, inserting_arg) + self.log.debug("_ vf_fixed: `{}`".format(vf_fixed)) + # create new video filter setting + vf_back = "-vf " + ",".join(vf_fixed) + + return vf_back diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py new file mode 100644 index 0000000000..9a720b77a9 --- /dev/null +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -0,0 +1,243 @@ +import os +import pype.api +import pyblish + + +class ExtractReviewSlate(pype.api.Extractor): + """ + Will add slate frame at the start of the video files + """ + + label = "Review with Slate frame" + order = pyblish.api.ExtractorOrder + 0.031 + families = ["slate"] + hosts = ["nuke", "maya", "shell"] + optional = True + + def process(self, instance): + inst_data = instance.data + if "representations" not in inst_data: + raise RuntimeError("Burnin needs already created mov to work on.") + + suffix = "_slate" + slate_path = inst_data.get("slateFrame") + ffmpeg_path = os.path.join(os.environ.get("FFMPEG_PATH", ""), "ffmpeg") + + to_width = 1920 + to_height = 1080 + resolution_width = inst_data.get("resolutionWidth", to_width) + resolution_height = inst_data.get("resolutionHeight", to_height) + pixel_aspect = inst_data.get("pixelAspect", 1) + fps = inst_data.get("fps") + + # defining image ratios + resolution_ratio = float(resolution_width / ( + resolution_height * pixel_aspect)) + delivery_ratio = float(to_width) / float(to_height) + self.log.debug(resolution_ratio) + self.log.debug(delivery_ratio) + + # get scale factor + scale_factor = to_height / ( + resolution_height * pixel_aspect) + self.log.debug(scale_factor) + + for i, repre in enumerate(inst_data["representations"]): + _remove_at_end = [] + self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre)) + + p_tags = repre.get("tags", []) + + if "slate-frame" not in p_tags: + continue + + stagingdir = repre["stagingDir"] + input_file = "{0}".format(repre["files"]) + + ext = os.path.splitext(input_file)[1] + output_file = input_file.replace(ext, "") + suffix + ext + + input_path = os.path.join( + os.path.normpath(stagingdir), repre["files"]) + self.log.debug("__ input_path: {}".format(input_path)) + _remove_at_end.append(input_path) + + output_path = os.path.join( + os.path.normpath(stagingdir), output_file) + self.log.debug("__ output_path: {}".format(output_path)) + + input_args = [] + output_args = [] + # overrides output file + input_args.append("-y") + # preset's input data + input_args.extend(repre["_profile"].get('input', [])) + input_args.append("-loop 1 -i {}".format(slate_path)) + input_args.extend([ + "-r {}".format(fps), + "-t 0.04"] + ) + + # output args + codec_args = repre["_profile"].get('codec', []) + output_args.extend(codec_args) + # preset's output data + output_args.extend(repre["_profile"].get('output', [])) + + # make sure colors are correct + output_args.extend([ + "-vf scale=out_color_matrix=bt709", + "-color_primaries bt709", + "-color_trc bt709", + "-colorspace bt709" + ]) + + # scaling none square pixels and 1920 width + if "reformat" in p_tags: + if resolution_ratio < delivery_ratio: + self.log.debug("lower then delivery") + width_scale = int(to_width * scale_factor) + width_half_pad = int(( + to_width - width_scale)/2) + height_scale = to_height + height_half_pad = 0 + else: + self.log.debug("heigher then delivery") + width_scale = to_width + width_half_pad = 0 + scale_factor = float(to_width) / float(resolution_width) + self.log.debug(scale_factor) + height_scale = int( + resolution_height * scale_factor) + height_half_pad = int( + (to_height - height_scale)/2) + + self.log.debug( + "__ width_scale: `{}`".format(width_scale)) + self.log.debug( + "__ width_half_pad: `{}`".format(width_half_pad)) + self.log.debug( + "__ height_scale: `{}`".format(height_scale)) + self.log.debug( + "__ height_half_pad: `{}`".format(height_half_pad)) + + scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format( + width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad + ) + + vf_back = self.add_video_filter_args( + output_args, scaling_arg) + # add it to output_args + output_args.insert(0, vf_back) + + slate_v_path = slate_path.replace(".png", ext) + output_args.append(slate_v_path) + _remove_at_end.append(slate_v_path) + + slate_args = [ + ffmpeg_path, + " ".join(input_args), + " ".join(output_args) + ] + slate_subprcs_cmd = " ".join(slate_args) + + # run slate generation subprocess + self.log.debug("Slate Executing: {}".format(slate_subprcs_cmd)) + slate_output = pype.api.subprocess(slate_subprcs_cmd) + self.log.debug("Slate Output: {}".format(slate_output)) + + # create ffmpeg concat text file path + conc_text_file = input_file.replace(ext, "") + "_concat" + ".txt" + conc_text_path = os.path.join( + os.path.normpath(stagingdir), conc_text_file) + _remove_at_end.append(conc_text_path) + self.log.debug("__ conc_text_path: {}".format(conc_text_path)) + + new_line = "\n" + with open(conc_text_path, "w") as conc_text_f: + conc_text_f.writelines([ + "file {}".format( + slate_v_path.replace("\\", "/")), + new_line, + "file {}".format(input_path.replace("\\", "/")) + ]) + + # concat slate and videos together + conc_input_args = ["-y", "-f concat", "-safe 0"] + conc_input_args.append("-i {}".format(conc_text_path)) + + conc_output_args = ["-c copy"] + conc_output_args.append(output_path) + + concat_args = [ + ffmpeg_path, + " ".join(conc_input_args), + " ".join(conc_output_args) + ] + concat_subprcs_cmd = " ".join(concat_args) + + # ffmpeg concat subprocess + self.log.debug("Executing concat: {}".format(concat_subprcs_cmd)) + concat_output = pype.api.subprocess(concat_subprcs_cmd) + self.log.debug("Output concat: {}".format(concat_output)) + + self.log.debug("__ repre[tags]: {}".format(repre["tags"])) + repre_update = { + "files": output_file, + "name": repre["name"], + "tags": [x for x in repre["tags"] if x != "delete"] + } + inst_data["representations"][i].update(repre_update) + self.log.debug( + "_ representation {}: `{}`".format( + i, inst_data["representations"][i])) + + # removing temp files + for f in _remove_at_end: + os.remove(f) + self.log.debug("Removed: `{}`".format(f)) + + # Remove any representations tagged for deletion. + for repre in inst_data.get("representations", []): + if "delete" in repre.get("tags", []): + self.log.debug("Removing representation: {}".format(repre)) + inst_data["representations"].remove(repre) + + self.log.debug(inst_data["representations"]) + + def add_video_filter_args(self, args, inserting_arg): + """ + Fixing video filter argumets to be one long string + + Args: + args (list): list of string arguments + inserting_arg (str): string argument we want to add + (without flag `-vf`) + + Returns: + str: long joined argument to be added back to list of arguments + + """ + # find all video format settings + vf_settings = [p for p in args + for v in ["-filter:v", "-vf"] + if v in p] + self.log.debug("_ vf_settings: `{}`".format(vf_settings)) + + # remove them from output args list + for p in vf_settings: + self.log.debug("_ remove p: `{}`".format(p)) + args.remove(p) + self.log.debug("_ args: `{}`".format(args)) + + # strip them from all flags + vf_fixed = [p.replace("-vf ", "").replace("-filter:v ", "") + for p in vf_settings] + + self.log.debug("_ vf_fixed: `{}`".format(vf_fixed)) + vf_fixed.insert(0, inserting_arg) + self.log.debug("_ vf_fixed: `{}`".format(vf_fixed)) + # create new video filter setting + vf_back = "-vf " + ",".join(vf_fixed) + + return vf_back diff --git a/pype/plugins/global/publish/integrate.py b/pype/plugins/global/publish/integrate.py index b2f273ec5c..87b9e1a9bd 100644 --- a/pype/plugins/global/publish/integrate.py +++ b/pype/plugins/global/publish/integrate.py @@ -84,9 +84,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin): project = io.find_one({"type": "project"}) - asset = io.find_one({"type": "asset", - "name": ASSET, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": ASSET, + "parent": project["_id"] + }) assert all([project, asset]), ("Could not find current project or " "asset '%s'" % ASSET) @@ -94,10 +96,14 @@ class IntegrateAsset(pyblish.api.InstancePlugin): subset = self.get_subset(asset, instance) # get next version - latest_version = io.find_one({"type": "version", - "parent": subset["_id"]}, - {"name": True}, - sort=[("name", -1)]) + latest_version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + {"name": True}, + sort=[("name", -1)] + ) next_version = 1 if latest_version is not None: @@ -318,9 +324,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin): def get_subset(self, asset, instance): - subset = io.find_one({"type": "subset", - "parent": asset["_id"], - "name": instance.data["subset"]}) + subset = io.find_one({ + "type": "subset", + "parent": asset["_id"], + "name": instance.data["subset"] + }) if subset is None: subset_name = instance.data["subset"] diff --git a/pype/plugins/global/publish/integrate_assumed_destination.py b/pype/plugins/global/publish/integrate_assumed_destination.py index a26529fc2c..d090e2711a 100644 --- a/pype/plugins/global/publish/integrate_assumed_destination.py +++ b/pype/plugins/global/publish/integrate_assumed_destination.py @@ -82,31 +82,40 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin): project_name = api.Session["AVALON_PROJECT"] a_template = anatomy.templates - project = io.find_one({"type": "project", - "name": project_name}, - projection={"config": True, "data": True}) + project = io.find_one( + {"type": "project", "name": project_name}, + projection={"config": True, "data": True} + ) template = a_template['publish']['path'] # anatomy = instance.context.data['anatomy'] - asset = io.find_one({"type": "asset", - "name": asset_name, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project["_id"] + }) assert asset, ("No asset found by the name '{}' " "in project '{}'".format(asset_name, project_name)) - subset = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset["_id"]}) + subset = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset["_id"] + }) # assume there is no version yet, we start at `1` version = None version_number = 1 if subset is not None: - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) # if there is a subset there ought to be version if version is not None: diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index 0a1a1fd031..6bb5bd8f14 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -7,6 +7,7 @@ import errno import pyblish.api from avalon import api, io from avalon.vendor import filelink + # this is needed until speedcopy for linux is fixed if sys.platform == "win32": from speedcopy import copyfile @@ -70,6 +71,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "audio", "yetiRig", "yeticache", + "nukenodes", + "gizmo", + "source", + "matchmove", + "image" "source", "assembly" ] @@ -149,9 +155,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): io.install() project = io.find_one({"type": "project"}) - asset = io.find_one({"type": "asset", - "name": ASSET, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": ASSET, + "parent": project["_id"] + }) assert all([project, asset]), ("Could not find current project or " "asset '%s'" % ASSET) @@ -159,10 +167,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): subset = self.get_subset(asset, instance) # get next version - latest_version = io.find_one({"type": "version", - "parent": subset["_id"]}, - {"name": True}, - sort=[("name", -1)]) + latest_version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + {"name": True}, + sort=[("name", -1)] + ) next_version = 1 if latest_version is not None: @@ -171,16 +183,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if instance.data.get('version'): next_version = int(instance.data.get('version')) - # self.log.info("Verifying version from assumed destination") - - # assumed_data = instance.data["assumedTemplateData"] - # assumed_version = assumed_data["version"] - # if assumed_version != next_version: - # raise AttributeError("Assumed version 'v{0:03d}' does not match" - # "next version in database " - # "('v{1:03d}')".format(assumed_version, - # next_version)) - self.log.debug("Next version: v{0:03d}".format(next_version)) version_data = self.create_version_data(context, instance) @@ -266,6 +268,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "version": int(version["name"]), "hierarchy": hierarchy} + resolution_width = repre.get("resolutionWidth") + resolution_height = repre.get("resolutionHeight") + fps = instance.data.get("fps") + + if resolution_width: + template_data["resolution_width"] = resolution_width + if resolution_width: + template_data["resolution_height"] = resolution_height + if resolution_width: + template_data["fps"] = fps + files = repre['files'] if repre.get('stagingDir'): stagingdir = repre['stagingDir'] @@ -315,10 +328,13 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): index_frame_start = None if repre.get("frameStart"): - frame_start_padding = len(str( - repre.get("frameEnd"))) + frame_start_padding = anatomy.templates["render"]["padding"] index_frame_start = int(repre.get("frameStart")) + # exception for slate workflow + if "slate" in instance.data["families"]: + index_frame_start -= 1 + dst_padding_exp = src_padding_exp dst_start_frame = None for i in src_collection.indexes: @@ -353,7 +369,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): dst_head, dst_start_frame, dst_tail).replace("..", ".") - repre['published_path'] = dst + repre['published_path'] = self.unc_convert(dst) else: # Single file @@ -382,10 +398,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): instance.data["transfers"].append([src, dst]) - repre['published_path'] = dst + repre['published_path'] = self.unc_convert(dst) self.log.debug("__ dst: {}".format(dst)) representation = { + "_id": io.ObjectId(), "schema": "pype:representation-2.0", "type": "representation", "parent": version_id, @@ -410,8 +427,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): } } + if repre.get("outputName"): + representation["context"]["output"] = repre['outputName'] + if sequence_repre and repre.get("frameStart"): - representation['context']['frame'] = repre.get("frameStart") + representation['context']['frame'] = src_padding_exp % int(repre.get("frameStart")) self.log.debug("__ representation: {}".format(representation)) destination_list.append(dst) @@ -425,6 +445,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("__ represNAME: {}".format(rep['name'])) self.log.debug("__ represPATH: {}".format(rep['published_path'])) io.insert_many(representations) + instance.data["published_representations"] = representations # self.log.debug("Representation: {}".format(representations)) self.log.info("Registered {} items".format(len(representations))) @@ -456,6 +477,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("Hardlinking file .. {} -> {}".format(src, dest)) self.hardlink_file(src, dest) + def unc_convert(self, path): + self.log.debug("> __ path: `{}`".format(path)) + drive, _path = os.path.splitdrive(path) + self.log.debug("> __ drive, _path: `{}`, `{}`".format(drive, _path)) + + if not os.path.exists(drive + "/"): + self.log.info("Converting to unc from environments ..") + + path_replace = os.getenv("PYPE_STUDIO_PROJECTS_PATH") + path_mount = os.getenv("PYPE_STUDIO_PROJECTS_MOUNT") + + if "/" in path_mount: + path = path.replace(path_mount[0:-1], path_replace) + else: + path = path.replace(path_mount, path_replace) + return path + def copy_file(self, src, dst): """ Copy given source to destination @@ -465,8 +503,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): Returns: None """ - src = os.path.normpath(src) - dst = os.path.normpath(dst) + src = self.unc_convert(src) + dst = self.unc_convert(dst) self.log.debug("Copying file .. {} -> {}".format(src, dst)) dirname = os.path.dirname(dst) @@ -487,6 +525,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): def hardlink_file(self, src, dst): dirname = os.path.dirname(dst) + + src = self.unc_convert(src) + dst = self.unc_convert(dst) + try: os.makedirs(dirname) except OSError as e: @@ -499,9 +541,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): filelink.create(src, dst, filelink.HARDLINK) def get_subset(self, asset, instance): - subset = io.find_one({"type": "subset", - "parent": asset["_id"], - "name": instance.data["subset"]}) + subset = io.find_one({ + "type": "subset", + "parent": asset["_id"], + "name": instance.data["subset"] + }) if subset is None: subset_name = instance.data["subset"] @@ -524,13 +568,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # add group if available if instance.data.get("subsetGroup"): - subset["data"].update( - {"subsetGroup": instance.data.get("subsetGroup")} - ) io.update_many({ 'type': 'subset', '_id': io.ObjectId(subset["_id"]) - }, {'$set': subset["data"]} + }, {'$set': {'data.subsetGroup': + instance.data.get('subsetGroup')}} ) return subset @@ -594,7 +636,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "source": source, "comment": context.data.get("comment"), "machine": context.data.get("machine"), - "fps": context.data.get("fps")} + "fps": context.data.get( + "fps", instance.data.get("fps"))} # Include optional data if present in optionals = [ diff --git a/pype/plugins/global/publish/integrate_rendered_frames.py b/pype/plugins/global/publish/integrate_rendered_frames.py index 086b03802e..5819051146 100644 --- a/pype/plugins/global/publish/integrate_rendered_frames.py +++ b/pype/plugins/global/publish/integrate_rendered_frames.py @@ -88,9 +88,11 @@ class IntegrateFrames(pyblish.api.InstancePlugin): project = io.find_one({"type": "project"}) - asset = io.find_one({"type": "asset", - "name": ASSET, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": ASSET, + "parent": project["_id"] + }) assert all([project, asset]), ("Could not find current project or " "asset '%s'" % ASSET) @@ -98,10 +100,14 @@ class IntegrateFrames(pyblish.api.InstancePlugin): subset = self.get_subset(asset, instance) # get next version - latest_version = io.find_one({"type": "version", - "parent": subset["_id"]}, - {"name": True}, - sort=[("name", -1)]) + latest_version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + {"name": True}, + sort=[("name", -1)] + ) next_version = 1 if latest_version is not None: @@ -251,9 +257,6 @@ class IntegrateFrames(pyblish.api.InstancePlugin): self.log.debug("path_to_save: {}".format(path_to_save)) - - - representation = { "schema": "pype:representation-2.0", "type": "representation", @@ -332,9 +335,11 @@ class IntegrateFrames(pyblish.api.InstancePlugin): def get_subset(self, asset, instance): - subset = io.find_one({"type": "subset", - "parent": asset["_id"], - "name": instance.data["subset"]}) + subset = io.find_one({ + "type": "subset", + "parent": asset["_id"], + "name": instance.data["subset"] + }) if subset is None: subset_name = instance.data["subset"] diff --git a/pype/plugins/global/publish/integrate_thumbnail.py b/pype/plugins/global/publish/integrate_thumbnail.py new file mode 100644 index 0000000000..1c4399b386 --- /dev/null +++ b/pype/plugins/global/publish/integrate_thumbnail.py @@ -0,0 +1,139 @@ +import os +import sys +import errno +import shutil +import copy + +import six +import pyblish.api +from bson.objectid import ObjectId + +from avalon import api, io + + +class IntegrateThumbnails(pyblish.api.InstancePlugin): + """Integrate Thumbnails.""" + + label = "Integrate Thumbnails" + order = pyblish.api.IntegratorOrder + 0.01 + families = ["review"] + + def process(self, instance): + + if not os.environ.get("AVALON_THUMBNAIL_ROOT"): + self.log.info("AVALON_THUMBNAIL_ROOT is not set." + " Skipping thumbnail integration.") + return + + published_repres = instance.data.get("published_representations") + if not published_repres: + self.log.debug( + "There are not published representation ids on the instance." + ) + return + + project_name = api.Session["AVALON_PROJECT"] + + anatomy = instance.context.data["anatomy"] + if "publish" not in anatomy.templates: + raise AssertionError("Anatomy does not have set publish key!") + + if "thumbnail" not in anatomy.templates["publish"]: + raise AssertionError(( + "There is not set \"thumbnail\" template for project \"{}\"" + ).format(project_name)) + + thumbnail_template = anatomy.templates["publish"]["thumbnail"] + + io.install() + + thumb_repre = None + for repre in published_repres: + if repre["name"].lower() == "thumbnail": + thumb_repre = repre + break + + if not thumb_repre: + self.log.debug( + "There is not representation with name \"thumbnail\"" + ) + return + + version = io.find_one({"_id": thumb_repre["parent"]}) + if not version: + raise AssertionError( + "There does not exist version with id {}".format( + str(thumb_repre["parent"]) + ) + ) + + # Get full path to thumbnail file from representation + src_full_path = os.path.normpath(thumb_repre["data"]["path"]) + if not os.path.exists(src_full_path): + self.log.warning("Thumbnail file was not found. Path: {}".format( + src_full_path + )) + return + + filename, file_extension = os.path.splitext(src_full_path) + # Create id for mongo entity now to fill anatomy template + thumbnail_id = ObjectId() + + # Prepare anatomy template fill data + template_data = copy.deepcopy(thumb_repre["context"]) + template_data.update({ + "_id": str(thumbnail_id), + "thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"), + "ext": file_extension, + "thumbnail_type": "thumbnail" + }) + + anatomy_filled = anatomy.format(template_data) + final_path = anatomy_filled.get("publish", {}).get("thumbnail") + if not final_path: + raise AssertionError(( + "Anatomy template was not filled with entered data" + "\nTemplate: {} " + "\nData: {}" + ).format(thumbnail_template, str(template_data))) + + dst_full_path = os.path.normpath(final_path) + self.log.debug( + "Copying file .. {} -> {}".format(src_full_path, dst_full_path) + ) + dirname = os.path.dirname(dst_full_path) + try: + os.makedirs(dirname) + except OSError as e: + if e.errno != errno.EEXIST: + tp, value, tb = sys.exc_info() + six.reraise(tp, value, tb) + + shutil.copy(src_full_path, dst_full_path) + + # Clean template data from keys that are dynamic + template_data.pop("_id") + template_data.pop("thumbnail_root") + + thumbnail_entity = { + "_id": thumbnail_id, + "type": "thumbnail", + "schema": "pype:thumbnail-1.0", + "data": { + "template": thumbnail_template, + "template_data": template_data + } + } + # Create thumbnail entity + io.insert_one(thumbnail_entity) + self.log.debug( + "Creating entity in database {}".format(str(thumbnail_entity)) + ) + # Set thumbnail id for version + io.update_many( + {"_id": version["_id"]}, + {"$set": {"data.thumbnail_id": thumbnail_id}} + ) + self.log.debug("Setting thumbnail for version \"{}\" <{}>".format( + version["name"], str(version["_id"]) + )) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 2a254b015c..faf4aaef93 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -21,20 +21,34 @@ def _get_script(): if module_path.endswith(".pyc"): module_path = module_path[:-len(".pyc")] + ".py" + module_path = os.path.normpath(module_path) + mount_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_MOUNT']) + network_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_PATH']) + + module_path = module_path.replace(mount_root, network_root) + return module_path # Logic to retrieve latest files concerning extendFrames def get_latest_version(asset_name, subset_name, family): # Get asset - asset_name = io.find_one({"type": "asset", - "name": asset_name}, - projection={"name": True}) + asset_name = io.find_one( + { + "type": "asset", + "name": asset_name + }, + projection={"name": True} + ) - subset = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset_name["_id"]}, - projection={"_id": True, "name": True}) + subset = io.find_one( + { + "type": "subset", + "name": subset_name, + "parent": asset_name["_id"] + }, + projection={"_id": True, "name": True} + ) # Check if subsets actually exists (pre-run check) assert subset, "No subsets found, please publish with `extendFrames` off" @@ -45,11 +59,15 @@ def get_latest_version(asset_name, subset_name, family): "data.endFrame": True, "parent": True} - version = io.find_one({"type": "version", - "parent": subset["_id"], - "data.families": family}, - projection=version_projection, - sort=[("name", -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"], + "data.families": family + }, + projection=version_projection, + sort=[("name", -1)] + ) assert version, "No version found, this is a bug" @@ -143,7 +161,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "FTRACK_API_USER", "FTRACK_API_KEY", "FTRACK_SERVER", - "PYPE_ROOT" + "PYPE_ROOT", + "PYPE_METADATA_FILE", + "PYPE_STUDIO_PROJECTS_PATH", + "PYPE_STUDIO_PROJECTS_MOUNT" ] def _submit_deadline_post_job(self, instance, job): @@ -154,7 +175,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ data = instance.data.copy() subset = data["subset"] - state = data.get("publishJobState", "Suspended") job_name = "{batch} - {subset} [publish image sequence]".format( batch=job["Props"]["Name"], subset=subset @@ -164,6 +184,13 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): output_dir = instance.data["outputDir"] metadata_path = os.path.join(output_dir, metadata_filename) + metadata_path = os.path.normpath(metadata_path) + mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT']) + network_root = os.path.normpath( + os.environ['PYPE_STUDIO_PROJECTS_PATH']) + + metadata_path = metadata_path.replace(mount_root, network_root) + # Generate the payload for Deadline submission payload = { "JobInfo": { @@ -174,13 +201,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "JobDependency0": job["_id"], "UserName": job["Props"]["User"], "Comment": instance.context.data.get("comment", ""), - "InitialStatus": state, "Priority": job["Props"]["Pri"] }, "PluginInfo": { "Version": "3.6", "ScriptFile": _get_script(), - "Arguments": '--paths "{}"'.format(metadata_path), + "Arguments": "", "SingleFrameOnly": "True" }, @@ -192,6 +218,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # job so they use the same environment environment = job["Props"].get("Env", {}) + environment["PYPE_METADATA_FILE"] = metadata_path i = 0 for index, key in enumerate(environment): self.log.info("KEY: {}".format(key)) @@ -282,6 +309,19 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): relative_path = os.path.relpath(source, api.registered_root()) source = os.path.join("{root}", relative_path).replace("\\", "/") + # find subsets and version to attach render to + attach_to = instance.data.get("attachTo") + attach_subset_versions = [] + if attach_to: + for subset in attach_to: + for instance in context: + if instance.data["subset"] != subset["subset"]: + continue + attach_subset_versions.append( + {"version": instance.data["version"], + "subset": subset["subset"], + "family": subset["family"]}) + # Write metadata for publish job metadata = { "asset": asset, @@ -293,6 +333,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "source": source, "user": context.data["user"], "version": context.data["version"], + "intent": context.data.get("intent"), + "comment": context.data.get("comment"), # Optional metadata (for debugging) "metadata": { "instance": data, @@ -301,6 +343,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): } } + if api.Session["AVALON_APP"] == "nuke": + metadata['subset'] = subset + if submission_type == "muster": ftrack = { "FTRACK_API_USER": os.environ.get("FTRACK_API_USER"), diff --git a/pype/plugins/global/publish/validate_ffmpeg_installed.py b/pype/plugins/global/publish/validate_ffmpeg_installed.py index 6d5ffba1e1..df7c330e95 100644 --- a/pype/plugins/global/publish/validate_ffmpeg_installed.py +++ b/pype/plugins/global/publish/validate_ffmpeg_installed.py @@ -27,6 +27,8 @@ class ValidateFfmpegInstallef(pyblish.api.Validator): return True def process(self, instance): + self.log.info("ffmpeg path: `{}`".format( + os.environ.get("FFMPEG_PATH", ""))) if self.is_tool( os.path.join( os.environ.get("FFMPEG_PATH", ""), "ffmpeg")) is False: diff --git a/pype/plugins/launcher/actions/Aport.py b/pype/plugins/launcher/actions/Aport.py index 94f14cd0d3..0ecd07c49a 100644 --- a/pype/plugins/launcher/actions/Aport.py +++ b/pype/plugins/launcher/actions/Aport.py @@ -1,7 +1,4 @@ import os -import sys -from avalon import io -from pprint import pprint import acre from avalon import api, lib diff --git a/pype/plugins/launcher/actions/unused/PremierePro.py b/pype/plugins/launcher/actions/unused/PremierePro.py index 97d693ffbb..57aa4eb2cb 100644 --- a/pype/plugins/launcher/actions/unused/PremierePro.py +++ b/pype/plugins/launcher/actions/unused/PremierePro.py @@ -1,10 +1,9 @@ import os -import sys -from pprint import pprint import acre from avalon import api, lib, io import pype.api as pype +from pypeapp import Anatomy class PremierePro(api.Action): diff --git a/pype/plugins/maya/create/create_ass.py b/pype/plugins/maya/create/create_ass.py index 84b42e9b20..6d8eda1a40 100644 --- a/pype/plugins/maya/create/create_ass.py +++ b/pype/plugins/maya/create/create_ass.py @@ -1,6 +1,7 @@ from collections import OrderedDict import avalon.maya +from pype.maya import lib from maya import cmds @@ -14,10 +15,21 @@ class CreateAss(avalon.maya.Creator): icon = "cube" defaults = ['Main'] + def __init__(self, *args, **kwargs): + super(CreateAss, self).__init__(*args, **kwargs) + + # Add animation data + self.data.update(lib.collect_animation_data()) + + # Vertex colors with the geometry + self.data["exportSequence"] = False + def process(self): instance = super(CreateAss, self).process() - data = OrderedDict(**self.data) + # data = OrderedDict(**self.data) + + nodes = list() @@ -30,4 +42,6 @@ class CreateAss(avalon.maya.Creator): assProxy = cmds.sets(name="proxy_SET", empty=True) cmds.sets([assContent, assProxy], forceElement=instance) - self.data = data + # self.log.info(data) + # + # self.data = data diff --git a/pype/plugins/maya/load/actions.py b/pype/plugins/maya/load/actions.py index 9f6a5c4d34..77d18b0ee3 100644 --- a/pype/plugins/maya/load/actions.py +++ b/pype/plugins/maya/load/actions.py @@ -140,9 +140,9 @@ class ImportMayaLoader(api.Loader): message = "Are you sure you want import this" state = QtWidgets.QMessageBox.warning(None, - "Are you sure?", - message, - buttons=buttons, - defaultButton=accept) + "Are you sure?", + message, + buttons=buttons, + defaultButton=accept) return state == accept diff --git a/pype/plugins/maya/load/load_ass.py b/pype/plugins/maya/load/load_ass.py index 2960e4403e..83dd80bd4e 100644 --- a/pype/plugins/maya/load/load_ass.py +++ b/pype/plugins/maya/load/load_ass.py @@ -2,6 +2,7 @@ from avalon import api import pype.maya.plugin import os from pypeapp import config +import clique class AssProxyLoader(pype.maya.plugin.ReferenceLoader): @@ -21,6 +22,13 @@ class AssProxyLoader(pype.maya.plugin.ReferenceLoader): from avalon import maya import pymel.core as pm + version = context['version'] + version_data = version.get("data", {}) + + self.log.info("version_data: {}\n".format(version_data)) + + frameStart = version_data.get("frameStart", None) + try: family = context["representation"]["context"]["family"] except ValueError: @@ -30,7 +38,24 @@ class AssProxyLoader(pype.maya.plugin.ReferenceLoader): groupName = "{}:{}".format(namespace, name) path = self.fname - proxyPath = os.path.splitext(path)[0] + ".ma" + proxyPath_base = os.path.splitext(path)[0] + + if frameStart is not None: + proxyPath_base = os.path.splitext(proxyPath_base)[0] + + publish_folder = os.path.split(path)[0] + files_in_folder = os.listdir(publish_folder) + collections, remainder = clique.assemble(files_in_folder) + + if collections: + hashes = collections[0].padding * '#' + coll = collections[0].format('{head}[index]{tail}') + filename = coll.replace('[index]', hashes) + + path = os.path.join(publish_folder, filename) + + proxyPath = proxyPath_base + ".ma" + self.log.info nodes = cmds.file(proxyPath, namespace=namespace, @@ -147,6 +172,13 @@ class AssStandinLoader(api.Loader): import mtoa.ui.arnoldmenu import pymel.core as pm + version = context['version'] + version_data = version.get("data", {}) + + self.log.info("version_data: {}\n".format(version_data)) + + frameStart = version_data.get("frameStart", None) + asset = context['asset']['name'] namespace = namespace or lib.unique_namespace( asset + "_", @@ -182,6 +214,8 @@ class AssStandinLoader(api.Loader): # Set the standin filepath standinShape.dso.set(self.fname) + if frameStart is not None: + standinShape.useFrameExtension.set(1) nodes = [root, standin] self[:] = nodes @@ -199,14 +233,23 @@ class AssStandinLoader(api.Loader): path = api.get_representation_path(representation) - # Update the standin - members = pm.sets(container['objectName'], query=True) - standins = pm.ls(members, type="AiStandIn", long=True) + files_in_path = os.listdir(os.path.split(path)[0]) + sequence = 0 + collections, remainder = clique.assemble(files_in_path) + if collections: + sequence = 1 - assert len(caches) == 1, "This is a bug" + # Update the standin + standins = list() + members = pm.sets(container['objectName'], query=True) + for member in members: + shape = member.getShape() + if (shape and shape.type() == "aiStandIn"): + standins.append(shape) for standin in standins: - standin.cacheFileName.set(path) + standin.dso.set(path) + standin.useFrameExtension.set(sequence) container = pm.PyNode(container["objectName"]) container.representation.set(str(representation["_id"])) diff --git a/pype/plugins/maya/load/load_camera.py b/pype/plugins/maya/load/load_camera.py deleted file mode 100644 index e9bf265b98..0000000000 --- a/pype/plugins/maya/load/load_camera.py +++ /dev/null @@ -1,62 +0,0 @@ -import pype.maya.plugin -import os -from pypeapp import config - - -class CameraLoader(pype.maya.plugin.ReferenceLoader): - """Specific loader of Alembic for the pype.camera family""" - - families = ["camera"] - label = "Reference camera" - representations = ["abc", "ma"] - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - import maya.cmds as cmds - # Get family type from the context - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "camera" - - cmds.loadPlugin("AbcImport.mll", quiet=True) - groupName = "{}:{}".format(namespace, name) - nodes = cmds.file(self.fname, - namespace=namespace, - sharedReferenceFile=False, - groupReference=True, - groupName="{}:{}".format(namespace, name), - reference=True, - returnNewNodes=True) - - cameras = cmds.ls(nodes, type="camera") - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - - # Check the Maya version, lockTransform has been introduced since - # Maya 2016.5 Ext 2 - version = int(cmds.about(version=True)) - if version >= 2016: - for camera in cameras: - cmds.camera(camera, edit=True, lockTransform=True) - else: - self.log.warning("This version of Maya does not support locking of" - " transforms of cameras.") - - self[:] = nodes - - return nodes - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_fbx.py b/pype/plugins/maya/load/load_fbx.py deleted file mode 100644 index 14df300c3c..0000000000 --- a/pype/plugins/maya/load/load_fbx.py +++ /dev/null @@ -1,54 +0,0 @@ -import pype.maya.plugin -import os -from pypeapp import config - - -class FBXLoader(pype.maya.plugin.ReferenceLoader): - """Load the FBX""" - - families = ["fbx"] - representations = ["fbx"] - - label = "Reference FBX" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - import maya.cmds as cmds - from avalon import maya - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "fbx" - - # Ensure FBX plug-in is loaded - cmds.loadPlugin("fbxmaya", quiet=True) - - with maya.maintained_selection(): - nodes = cmds.file(self.fname, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName="{}:{}".format(namespace, name)) - - groupName = "{}:{}".format(namespace, name) - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - - self[:] = nodes - - return nodes - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_look.py b/pype/plugins/maya/load/load_look.py index b1c88bcd18..04ac9b23e4 100644 --- a/pype/plugins/maya/load/load_look.py +++ b/pype/plugins/maya/load/load_look.py @@ -116,9 +116,11 @@ class LookLoader(pype.maya.plugin.ReferenceLoader): shapes=True)) nodes = set(nodes_list) - json_representation = io.find_one({"type": "representation", - "parent": representation['parent'], - "name": "json"}) + json_representation = io.find_one({ + "type": "representation", + "parent": representation['parent'], + "name": "json" + }) # Load relationships shader_relation = api.get_representation_path(json_representation) diff --git a/pype/plugins/maya/load/load_matchmove.py b/pype/plugins/maya/load/load_matchmove.py new file mode 100644 index 0000000000..abc702cde8 --- /dev/null +++ b/pype/plugins/maya/load/load_matchmove.py @@ -0,0 +1,30 @@ +from avalon import api +from maya import mel + + +class MatchmoveLoader(api.Loader): + """ + This will run matchmove script to create track in scene. + + Supported script types are .py and .mel + """ + + families = ["matchmove"] + representations = ["py", "mel"] + defaults = ["Camera", "Object", "Mocap"] + + label = "Run matchmove script" + icon = "empire" + color = "orange" + + def load(self, context, name, namespace, data): + if self.fname.lower().endswith(".py"): + exec(open(self.fname).read()) + + elif self.fname.lower().endswith(".mel"): + mel.eval('source "{}"'.format(self.fname)) + + else: + self.log.error("Unsupported script type") + + return True diff --git a/pype/plugins/maya/load/load_mayaascii.py b/pype/plugins/maya/load/load_mayaascii.py deleted file mode 100644 index b9a5de2782..0000000000 --- a/pype/plugins/maya/load/load_mayaascii.py +++ /dev/null @@ -1,68 +0,0 @@ -import pype.maya.plugin -from pypeapp import config -import os - - -class MayaAsciiLoader(pype.maya.plugin.ReferenceLoader): - """Load the model""" - - families = ["mayaAscii", - "setdress", - "layout"] - representations = ["ma"] - - label = "Reference Maya Ascii" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - import maya.cmds as cmds - from avalon import maya - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "model" - - with maya.maintained_selection(): - nodes = cmds.file(self.fname, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName="{}:{}".format(namespace, name)) - - self[:] = nodes - groupName = "{}:{}".format(namespace, name) - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - cmds.setAttr(groupName + ".displayHandle", 1) - # get bounding box - bbox = cmds.exactWorldBoundingBox(groupName) - # get pivot position on world space - pivot = cmds.xform(groupName, q=True, sp=True, ws=True) - # center of bounding box - cx = (bbox[0] + bbox[3]) / 2 - cy = (bbox[1] + bbox[4]) / 2 - cz = (bbox[2] + bbox[5]) / 2 - # add pivot position to calculate offset - cx = cx + pivot[0] - cy = cy + pivot[1] - cz = cz + pivot[2] - # set selection handle offset to center of bounding box - cmds.setAttr(groupName + ".selectHandleX", cx) - cmds.setAttr(groupName + ".selectHandleY", cy) - cmds.setAttr(groupName + ".selectHandleZ", cz) - return nodes - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index c17538c57d..cbd1da7cbd 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -1,15 +1,22 @@ import pype.maya.plugin +from avalon import api, maya +from maya import cmds import os from pypeapp import config -reload(config) -import pype.maya.plugin -reload(pype.maya.plugin) + class ReferenceLoader(pype.maya.plugin.ReferenceLoader): """Load the model""" - families = ["model", "pointcache", "animation"] - representations = ["ma", "abc"] + families = ["model", + "pointcache", + "animation", + "mayaAscii", + "setdress", + "layout", + "camera", + "rig"] + representations = ["ma", "abc", "fbx"] tool_names = ["loader"] label = "Reference" @@ -22,7 +29,6 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): from avalon import maya import pymel.core as pm - try: family = context["representation"]["context"]["family"] except ValueError: @@ -40,7 +46,7 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): reference=True, returnNewNodes=True) - namespace = cmds.referenceQuery(nodes[0], namespace=True) + # namespace = cmds.referenceQuery(nodes[0], namespace=True) shapes = cmds.ls(nodes, shapes=True, long=True) @@ -57,12 +63,12 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): for node in newNodes: try: roots.add(pm.PyNode(node).getAllParents()[-2]) - except: + except: # noqa: E722 pass for root in roots: root.setParent(world=True) - groupNode.root().zeroTransformPivots() + groupNode.zeroTransformPivots() for root in roots: root.setParent(groupNode) @@ -95,23 +101,39 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): cmds.setAttr(groupName + ".selectHandleY", cy) cmds.setAttr(groupName + ".selectHandleZ", cz) + if data.get("post_process", True): + if family == "rig": + self._post_process_rig(name, namespace, context, data) + return newNodes def switch(self, container, representation): self.update(container, representation) + def _post_process_rig(self, name, namespace, context, data): -# for backwards compatibility -class AbcLoader(ReferenceLoader): - label = "Deprecated loader (don't use)" - families = ["pointcache", "animation"] - representations = ["abc"] - tool_names = [] + output = next((node for node in self if + node.endswith("out_SET")), None) + controls = next((node for node in self if + node.endswith("controls_SET")), None) + assert output, "No out_SET in rig, this is a bug." + assert controls, "No controls_SET in rig, this is a bug." -# for backwards compatibility -class ModelLoader(ReferenceLoader): - label = "Deprecated loader (don't use)" - families = ["model", "pointcache"] - representations = ["abc"] - tool_names = [] + # Find the roots amongst the loaded nodes + roots = cmds.ls(self[:], assemblies=True, long=True) + assert roots, "No root nodes in rig, this is a bug." + + asset = api.Session["AVALON_ASSET"] + dependency = str(context["representation"]["_id"]) + + self.log.info("Creating subset: {}".format(namespace)) + + # Create the animation instance + with maya.maintained_selection(): + cmds.select([output, controls] + roots, noExpand=True) + api.create(name=namespace, + asset=asset, + family="animation", + options={"useSelection": True}, + data={"dependencies": dependency}) diff --git a/pype/plugins/maya/load/load_rig.py b/pype/plugins/maya/load/load_rig.py deleted file mode 100644 index fc6e666ac6..0000000000 --- a/pype/plugins/maya/load/load_rig.py +++ /dev/null @@ -1,95 +0,0 @@ -from maya import cmds - -import pype.maya.plugin -from avalon import api, maya -import os -from pypeapp import config - - -class RigLoader(pype.maya.plugin.ReferenceLoader): - """Specific loader for rigs - - This automatically creates an instance for animators upon load. - - """ - - families = ["rig"] - representations = ["ma"] - - label = "Reference rig" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "rig" - - groupName = "{}:{}".format(namespace, name) - nodes = cmds.file(self.fname, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName=groupName) - - cmds.xform(groupName, pivots=(0, 0, 0)) - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - - shapes = cmds.ls(nodes, shapes=True, long=True) - print(shapes) - - newNodes = (list(set(nodes) - set(shapes))) - print(newNodes) - - # Store for post-process - self[:] = newNodes - if data.get("post_process", True): - self._post_process(name, namespace, context, data) - - return newNodes - - def _post_process(self, name, namespace, context, data): - - # TODO(marcus): We are hardcoding the name "out_SET" here. - # Better register this keyword, so that it can be used - # elsewhere, such as in the Integrator plug-in, - # without duplication. - - output = next((node for node in self if - node.endswith("out_SET")), None) - controls = next((node for node in self if - node.endswith("controls_SET")), None) - - assert output, "No out_SET in rig, this is a bug." - assert controls, "No controls_SET in rig, this is a bug." - - # Find the roots amongst the loaded nodes - roots = cmds.ls(self[:], assemblies=True, long=True) - assert roots, "No root nodes in rig, this is a bug." - - asset = api.Session["AVALON_ASSET"] - dependency = str(context["representation"]["_id"]) - - # Create the animation instance - with maya.maintained_selection(): - cmds.select([output, controls] + roots, noExpand=True) - api.create(name=namespace, - asset=asset, - family="animation", - options={"useSelection": True}, - data={"dependencies": dependency}) - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_vrayproxy.py b/pype/plugins/maya/load/load_vrayproxy.py index 9b07dc7e30..35d93676a0 100644 --- a/pype/plugins/maya/load/load_vrayproxy.py +++ b/pype/plugins/maya/load/load_vrayproxy.py @@ -117,7 +117,7 @@ class VRayProxyLoader(api.Loader): vray_mesh = cmds.createNode('VRayMesh', name="{}_VRMS".format(name)) mesh_shape = cmds.createNode("mesh", name="{}_GEOShape".format(name)) vray_mat = cmds.shadingNode("VRayMeshMaterial", asShader=True, - name="{}_VRMM".format(name)) + name="{}_VRMM".format(name)) vray_mat_sg = cmds.sets(name="{}_VRSG".format(name), empty=True, renderable=True, diff --git a/pype/plugins/maya/publish/collect_ass.py b/pype/plugins/maya/publish/collect_ass.py index c0174e7026..8e6691120a 100644 --- a/pype/plugins/maya/publish/collect_ass.py +++ b/pype/plugins/maya/publish/collect_ass.py @@ -21,15 +21,17 @@ class CollectAssData(pyblish.api.InstancePlugin): objsets = instance.data['setMembers'] for objset in objsets: + objset = str(objset) members = cmds.sets(objset, query=True) if members is None: self.log.warning("Skipped empty instance: \"%s\" " % objset) continue - if objset == "content_SET": + if "content_SET" in objset: instance.data['setMembers'] = members - elif objset == "proxy_SET": + self.log.debug('content members: {}'.format(members)) + elif objset.startswith("proxy_SET"): assert len(members) == 1, "You have multiple proxy meshes, please only use one" instance.data['proxy'] = members - + self.log.debug('proxy members: {}'.format(members)) self.log.debug("data: {}".format(instance.data)) diff --git a/pype/plugins/maya/publish/collect_look.py b/pype/plugins/maya/publish/collect_look.py index 618f2749a4..7a5fea776c 100644 --- a/pype/plugins/maya/publish/collect_look.py +++ b/pype/plugins/maya/publish/collect_look.py @@ -219,10 +219,6 @@ class CollectLook(pyblish.api.InstancePlugin): with lib.renderlayer(instance.data["renderlayer"]): self.collect(instance) - # make ftrack publishable - self.maketx = instance.data.get('maketx', True) - instance.data['maketx'] = self.maketx - self.log.info('maketx: {}'.format(self.maketx)) def collect(self, instance): @@ -297,9 +293,11 @@ class CollectLook(pyblish.api.InstancePlugin): self.log.info("Collected file nodes:\n{}".format(files)) # Collect textures if any file nodes are found - instance.data["resources"] = [self.collect_resource(n) - for n in files] - self.log.info("Collected resources:\n{}".format(instance.data["resources"])) + instance.data["resources"] = [] + for n in files: + instance.data["resources"].append(self.collect_resource(n)) + + self.log.info("Collected resources: {}".format(instance.data["resources"])) # Log a warning when no relevant sets were retrieved for the look. if not instance.data["lookData"]["relationships"]: @@ -423,7 +421,7 @@ class CollectLook(pyblish.api.InstancePlugin): self.log.debug("processing: {}".format(node)) if cmds.nodeType(node) == 'file': - self.log.debug("file node") + self.log.debug(" - file node") attribute = "{}.fileTextureName".format(node) computed_attribute = "{}.computedFileTextureNamePattern".format(node) elif cmds.nodeType(node) == 'aiImage': @@ -431,7 +429,7 @@ class CollectLook(pyblish.api.InstancePlugin): attribute = "{}.filename".format(node) computed_attribute = attribute source = cmds.getAttr(attribute) - + self.log.info(" - file source: {}".format(source)) color_space_attr = "{}.colorSpace".format(node) color_space = cmds.getAttr(color_space_attr) # Compare with the computed file path, e.g. the one with the @@ -455,6 +453,13 @@ class CollectLook(pyblish.api.InstancePlugin): if len(files) == 0: self.log.error("No valid files found from node `%s`" % node) + self.log.info("collection of resource done:") + self.log.info(" - node: {}".format(node)) + self.log.info(" - attribute: {}".format(attribute)) + self.log.info(" - source: {}".format(source)) + self.log.info(" - file: {}".format(files)) + self.log.info(" - color space: {}".format(color_space)) + # Define the resource return {"node": node, "attribute": attribute, diff --git a/pype/plugins/maya/publish/collect_yeti_rig.py b/pype/plugins/maya/publish/collect_yeti_rig.py index 7ab5649c0b..c743b2c00b 100644 --- a/pype/plugins/maya/publish/collect_yeti_rig.py +++ b/pype/plugins/maya/publish/collect_yeti_rig.py @@ -119,11 +119,15 @@ class CollectYetiRig(pyblish.api.InstancePlugin): texture_filenames = [] if image_search_paths: + # TODO: Somehow this uses OS environment path separator, `:` vs `;` # Later on check whether this is pipeline OS cross-compatible. image_search_paths = [p for p in image_search_paths.split(os.path.pathsep) if p] + # find all ${TOKEN} tokens and replace them with $TOKEN env. variable + image_search_paths = self._replace_tokens(image_search_paths) + # List all related textures texture_filenames = cmds.pgYetiCommand(node, listTextures=True) self.log.info("Found %i texture(s)" % len(texture_filenames)) @@ -140,6 +144,8 @@ class CollectYetiRig(pyblish.api.InstancePlugin): "atttribute'" % node) # Collect all texture files + # find all ${TOKEN} tokens and replace them with $TOKEN env. variable + texture_filenames = self._replace_tokens(texture_filenames) for texture in texture_filenames: files = [] @@ -283,3 +289,20 @@ class CollectYetiRig(pyblish.api.InstancePlugin): collection, remainder = clique.assemble(files, patterns=pattern) return collection + + def _replace_tokens(self, strings): + env_re = re.compile(r"\$\{(\w+)\}") + + replaced = [] + for s in strings: + matches = re.finditer(env_re, s) + for m in matches: + try: + s = s.replace(m.group(), os.environ[m.group(1)]) + except KeyError: + msg = "Cannot find requested {} in environment".format( + m.group(1)) + self.log.error(msg) + raise RuntimeError(msg) + replaced.append(s) + return replaced diff --git a/pype/plugins/maya/publish/extract_ass.py b/pype/plugins/maya/publish/extract_ass.py index 1fed6c8dd7..4cf394aefe 100644 --- a/pype/plugins/maya/publish/extract_ass.py +++ b/pype/plugins/maya/publish/extract_ass.py @@ -17,11 +17,15 @@ class ExtractAssStandin(pype.api.Extractor): label = "Ass Standin (.ass)" hosts = ["maya"] families = ["ass"] + asciiAss = False def process(self, instance): + sequence = instance.data.get("exportSequence", False) + staging_dir = self.staging_dir(instance) filename = "{}.ass".format(instance.name) + filenames = list() file_path = os.path.join(staging_dir, filename) # Write out .ass file @@ -29,13 +33,49 @@ class ExtractAssStandin(pype.api.Extractor): with avalon.maya.maintained_selection(): self.log.info("Writing: {}".format(instance.data["setMembers"])) cmds.select(instance.data["setMembers"], noExpand=True) - cmds.arnoldExportAss( filename=file_path, - selected=True, - asciiAss=True, - shadowLinks=True, - lightLinks=True, - boundingBox=True - ) + + if sequence: + self.log.info("Extracting ass sequence") + + # Collect the start and end including handles + start = instance.data.get("frameStart", 1) + end = instance.data.get("frameEnd", 1) + handles = instance.data.get("handles", 0) + step = instance.data.get("step", 0) + if handles: + start -= handles + end += handles + + exported_files = cmds.arnoldExportAss(filename=file_path, + selected=True, + asciiAss=self.asciiAss, + shadowLinks=True, + lightLinks=True, + boundingBox=True, + startFrame=start, + endFrame=end, + frameStep=step + ) + for file in exported_files: + filenames.append(os.path.split(file)[1]) + self.log.info("Exported: {}".format(filenames)) + else: + self.log.info("Extracting ass") + cmds.arnoldExportAss(filename=file_path, + selected=True, + asciiAss=False, + shadowLinks=True, + lightLinks=True, + boundingBox=True + ) + self.log.info("Extracted {}".format(filename)) + filenames = filename + optionals = [ + "frameStart", "frameEnd", "step", "handles", + "handleEnd", "handleStart" + ] + for key in optionals: + instance.data.pop(key, None) if "representations" not in instance.data: instance.data["representations"] = [] @@ -43,9 +83,13 @@ class ExtractAssStandin(pype.api.Extractor): representation = { 'name': 'ass', 'ext': 'ass', - 'files': filename, + 'files': filenames, "stagingDir": staging_dir } + + if sequence: + representation['frameStart'] = start + instance.data["representations"].append(representation) self.log.info("Extracted instance '%s' to: %s" diff --git a/pype/plugins/maya/publish/extract_assproxy.py b/pype/plugins/maya/publish/extract_assproxy.py index 34c3113e11..59684febe1 100644 --- a/pype/plugins/maya/publish/extract_assproxy.py +++ b/pype/plugins/maya/publish/extract_assproxy.py @@ -43,8 +43,13 @@ class ExtractAssProxy(pype.api.Extractor): # Get only the shape contents we need in such a way that we avoid # taking along intermediateObjects - members = instance.data['proxy'] - members = cmds.ls(members, + proxy = instance.data.get('proxy', None) + + if not proxy: + self.log.info("no proxy mesh") + return + + members = cmds.ls(proxy, dag=True, transforms=True, noIntermediate=True) diff --git a/pype/plugins/maya/publish/extract_look.py b/pype/plugins/maya/publish/extract_look.py index c7b8058852..fa6ecd72c3 100644 --- a/pype/plugins/maya/publish/extract_look.py +++ b/pype/plugins/maya/publish/extract_look.py @@ -38,11 +38,7 @@ def source_hash(filepath, *args): file_name = os.path.basename(filepath) time = str(os.path.getmtime(filepath)) size = str(os.path.getsize(filepath)) - return "|".join([ - file_name, - time, - size - ] + list(args)).replace(".", ",") + return "|".join([file_name, time, size] + list(args)).replace(".", ",") def find_paths_by_hash(texture_hash): @@ -64,36 +60,33 @@ def maketx(source, destination, *args): """ cmd = [ - "maketx", - "-v", # verbose - "-u", # update mode - # unpremultiply before conversion (recommended when alpha present) - "--unpremult", - "--checknan", - # use oiio-optimized settings for tile-size, planarconfig, metadata - "--oiio", - "--filter lanczos3" - ] + "maketx", + "-v", # verbose + "-u", # update mode + # unpremultiply before conversion (recommended when alpha present) + "--unpremult", + "--checknan", + # use oiio-optimized settings for tile-size, planarconfig, metadata + "--oiio", + "--filter lanczos3", + ] cmd.extend(args) - cmd.extend([ - "-o", destination, - source - ]) + cmd.extend(["-o", destination, source]) + + cmd = " ".join(cmd) CREATE_NO_WINDOW = 0x08000000 - kwargs = dict( - args=cmd, - stderr=subprocess.STDOUT - ) + kwargs = dict(args=cmd, stderr=subprocess.STDOUT) if sys.platform == "win32": - kwargs["creationflags"] = CREATE_NO_WIDOW + kwargs["creationflags"] = CREATE_NO_WINDOW try: out = subprocess.check_output(**kwargs) except subprocess.CalledProcessError as exc: print(exc) import traceback + traceback.print_exc() raise @@ -180,41 +173,51 @@ class ExtractLook(pype.api.Extractor): # Preserve color space values (force value after filepath change) # This will also trigger in the same order at end of context to # ensure after context it's still the original value. - color_space = resource.get('color_space') + color_space = resource.get("color_space") for f in resource["files"]: - files_metadata[os.path.normpath(f)] = {'color_space': color_space} + files_metadata[os.path.normpath(f)] = { + "color_space": color_space} # files.update(os.path.normpath(f)) # Process the resource files transfers = list() hardlinks = list() hashes = dict() + forceCopy = instance.data.get("forceCopy", False) self.log.info(files) for filepath in files_metadata: - cspace = files_metadata[filepath]['color_space'] + cspace = files_metadata[filepath]["color_space"] linearise = False - if cspace == 'sRGB': + if cspace == "sRGB": linearise = True + # set its file node to 'raw' as tx will be linearized + files_metadata[filepath]["color_space"] = "raw" source, mode, hash = self._process_texture( - filepath, do_maketx, staging=dir_path, linearise=linearise - ) - destination = self.resource_destination( - instance, source, do_maketx + filepath, + do_maketx, + staging=dir_path, + linearise=linearise, + force=forceCopy ) + destination = self.resource_destination(instance, + source, + do_maketx) # Force copy is specified. - if instance.data.get("forceCopy", False): + if forceCopy: mode = COPY if mode == COPY: transfers.append((source, destination)) + self.log.info('copying') elif mode == HARDLINK: hardlinks.append((source, destination)) + self.log.info('hardlinking') # Store the hashes from hash to destination to include in the # database @@ -235,13 +238,14 @@ class ExtractLook(pype.api.Extractor): # Preserve color space values (force value after filepath change) # This will also trigger in the same order at end of context to # ensure after context it's still the original value. - color_space_attr = resource['node'] + ".colorSpace" + color_space_attr = resource["node"] + ".colorSpace" color_space = cmds.getAttr(color_space_attr) - + if files_metadata[source]["color_space"] == "raw": + # set colorpsace to raw if we linearized it + color_space = "Raw" # Remap file node filename to destination - attr = resource['attribute'] + attr = resource["attribute"] remap[attr] = destinations[source] - remap[color_space_attr] = color_space self.log.info("Finished remapping destinations ...") @@ -268,13 +272,15 @@ class ExtractLook(pype.api.Extractor): channels=True, constraints=True, expressions=True, - constructionHistory=True + constructionHistory=True, ) # Write the JSON data self.log.info("Extract json..") - data = {"attributes": lookdata["attributes"], - "relationships": relationships} + data = { + "attributes": lookdata["attributes"], + "relationships": relationships + } with open(json_path, "w") as f: json.dump(data, f) @@ -293,7 +299,7 @@ class ExtractLook(pype.api.Extractor): instance.data["representations"].append( { "name": "ma", - "ext": 'ma', + "ext": "ma", "files": os.path.basename(maya_fname), "stagingDir": os.path.dirname(maya_fname), } @@ -301,7 +307,7 @@ class ExtractLook(pype.api.Extractor): instance.data["representations"].append( { "name": "json", - "ext": 'json', + "ext": "json", "files": os.path.basename(json_fname), "stagingDir": os.path.dirname(json_fname), } @@ -314,13 +320,18 @@ class ExtractLook(pype.api.Extractor): # Source hash for the textures instance.data["sourceHashes"] = hashes - self.log.info("Extracted instance '%s' to: %s" % ( - instance.name, maya_path) - ) + """ + self.log.info("Returning colorspaces to their original values ...") + for attr, value in remap.items(): + self.log.info(" - {}: {}".format(attr, value)) + cmds.setAttr(attr, value, type="string") + """ + self.log.info("Extracted instance '%s' to: %s" % (instance.name, + maya_path)) def resource_destination(self, instance, filepath, do_maketx): - anatomy = instance.context.data['anatomy'] + anatomy = instance.context.data["anatomy"] self.create_destination_template(instance, anatomy) @@ -332,12 +343,10 @@ class ExtractLook(pype.api.Extractor): ext = ".tx" return os.path.join( - instance.data["assumedDestination"], - "resources", - basename + ext + instance.data["assumedDestination"], "resources", basename + ext ) - def _process_texture(self, filepath, do_maketx, staging, linearise): + def _process_texture(self, filepath, do_maketx, staging, linearise, force): """Process a single texture file on disk for publishing. This will: 1. Check whether it's already published, if so it will do hardlink @@ -359,24 +368,20 @@ class ExtractLook(pype.api.Extractor): # If source has been published before with the same settings, # then don't reprocess but hardlink from the original existing = find_paths_by_hash(texture_hash) - if existing: + if existing and not force: self.log.info("Found hash in database, preparing hardlink..") source = next((p for p in existing if os.path.exists(p)), None) if filepath: return source, HARDLINK, texture_hash else: self.log.warning( - "Paths not found on disk, " - "skipping hardlink: %s" % (existing,) + ("Paths not found on disk, " + "skipping hardlink: %s") % (existing,) ) if do_maketx and ext != ".tx": # Produce .tx file in staging if source file is not .tx - converted = os.path.join( - staging, - "resources", - fname + ".tx" - ) + converted = os.path.join(staging, "resources", fname + ".tx") if linearise: self.log.info("tx: converting sRGB -> linear") @@ -389,9 +394,15 @@ class ExtractLook(pype.api.Extractor): os.makedirs(os.path.dirname(converted)) self.log.info("Generating .tx file for %s .." % filepath) - maketx(filepath, converted, - # Include `source-hash` as string metadata - "-sattrib", "sourceHash", texture_hash, colorconvert) + maketx( + filepath, + converted, + # Include `source-hash` as string metadata + "-sattrib", + "sourceHash", + texture_hash, + colorconvert, + ) return converted, COPY, texture_hash @@ -417,58 +428,71 @@ class ExtractLook(pype.api.Extractor): project_name = api.Session["AVALON_PROJECT"] a_template = anatomy.templates - project = io.find_one({"type": "project", - "name": project_name}, - projection={"config": True, "data": True}) + project = io.find_one( + { + "type": "project", + "name": project_name + }, + projection={"config": True, "data": True} + ) - template = a_template['publish']['path'] + template = a_template["publish"]["path"] # anatomy = instance.context.data['anatomy'] - asset = io.find_one({"type": "asset", - "name": asset_name, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project["_id"] + }) assert asset, ("No asset found by the name '{}' " - "in project '{}'".format(asset_name, project_name)) - silo = asset.get('silo') + "in project '{}'").format(asset_name, project_name) + silo = asset.get("silo") - subset = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset["_id"]}) + subset = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset["_id"] + }) # assume there is no version yet, we start at `1` version = None version_number = 1 if subset is not None: - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) # if there is a subset there ought to be version if version is not None: version_number += version["name"] - if instance.data.get('version'): - version_number = int(instance.data.get('version')) + if instance.data.get("version"): + version_number = int(instance.data.get("version")) - padding = int(a_template['render']['padding']) + padding = int(a_template["render"]["padding"]) - hierarchy = asset['data']['parents'] + hierarchy = asset["data"]["parents"] if hierarchy: # hierarchy = os.path.sep.join(hierarchy) hierarchy = "/".join(hierarchy) - template_data = {"root": api.Session["AVALON_PROJECTS"], - "project": {"name": project_name, - "code": project['data']['code']}, - "silo": silo, - "family": instance.data['family'], - "asset": asset_name, - "subset": subset_name, - "frame": ('#' * padding), - "version": version_number, - "hierarchy": hierarchy, - "representation": "TEMP"} + template_data = { + "root": api.Session["AVALON_PROJECTS"], + "project": {"name": project_name, "code": project["data"]["code"]}, + "silo": silo, + "family": instance.data["family"], + "asset": asset_name, + "subset": subset_name, + "frame": ("#" * padding), + "version": version_number, + "hierarchy": hierarchy, + "representation": "TEMP", + } instance.data["assumedTemplateData"] = template_data self.log.info(template_data) diff --git a/pype/plugins/maya/publish/extract_quicktime.py b/pype/plugins/maya/publish/extract_quicktime.py index 1031955260..94b5a716a2 100644 --- a/pype/plugins/maya/publish/extract_quicktime.py +++ b/pype/plugins/maya/publish/extract_quicktime.py @@ -1,16 +1,14 @@ import os +import glob import contextlib -import capture_gui import clique +import capture # import pype.maya.lib as lib import pype.api # from maya import cmds, mel import pymel.core as pm -# import ffmpeg -# # from pype.scripts import otio_burnin -# reload(ffmpeg) # TODO: move codec settings to presets @@ -93,7 +91,18 @@ class ExtractQuicktime(pype.api.Extractor): pm.currentTime(refreshFrameInt, edit=True) with maintained_time(): - playblast = capture_gui.lib.capture_scene(preset) + filename = preset.get("filename", "%TEMP%") + + # Force viewer to False in call to capture because we have our own + # viewer opening call to allow a signal to trigger between playblast + # and viewer + preset['viewer'] = False + + # Remove panel key since it's internal value to capture_gui + preset.pop("panel", None) + + path = capture.capture(**preset) + playblast = self._fix_playblast_output_path(path) self.log.info("file list {}".format(playblast)) @@ -119,6 +128,46 @@ class ExtractQuicktime(pype.api.Extractor): } instance.data["representations"].append(representation) + def _fix_playblast_output_path(self, filepath): + """Workaround a bug in maya.cmds.playblast to return correct filepath. + + When the `viewer` argument is set to False and maya.cmds.playblast + does not automatically open the playblasted file the returned + filepath does not have the file's extension added correctly. + + To workaround this we just glob.glob() for any file extensions and + assume the latest modified file is the correct file and return it. + + """ + # Catch cancelled playblast + if filepath is None: + self.log.warning("Playblast did not result in output path. " + "Playblast is probably interrupted.") + return None + + # Fix: playblast not returning correct filename (with extension) + # Lets assume the most recently modified file is the correct one. + if not os.path.exists(filepath): + directory = os.path.dirname(filepath) + filename = os.path.basename(filepath) + # check if the filepath is has frame based filename + # example : capture.####.png + parts = filename.split(".") + if len(parts) == 3: + query = os.path.join(directory, "{}.*.{}".format(parts[0], + parts[-1])) + files = glob.glob(query) + else: + files = glob.glob("{}.*".format(filepath)) + + if not files: + raise RuntimeError("Couldn't find playblast from: " + "{0}".format(filepath)) + filepath = max(files, key=os.path.getmtime) + + return filepath + + @contextlib.contextmanager def maintained_time(): diff --git a/pype/plugins/maya/publish/extract_thumbnail.py b/pype/plugins/maya/publish/extract_thumbnail.py index dc8044cf19..8377af1ac0 100644 --- a/pype/plugins/maya/publish/extract_thumbnail.py +++ b/pype/plugins/maya/publish/extract_thumbnail.py @@ -1,31 +1,14 @@ import os import contextlib -import time -import sys +import glob -import capture_gui -import clique +import capture import pype.maya.lib as lib import pype.api from maya import cmds import pymel.core as pm -# import ffmpeg -# reload(ffmpeg) - -import avalon.maya - -# import maya_utils as mu - -# from tweakHUD import master -# from tweakHUD import draft_hud as dHUD -# from tweakHUD import ftrackStrings as fStrings - -# -# def soundOffsetFunc(oSF, SF, H): -# tmOff = (oSF - H) - SF -# return tmOff class ExtractThumbnail(pype.api.Extractor): @@ -47,39 +30,8 @@ class ExtractThumbnail(pype.api.Extractor): end = cmds.currentTime(query=True) self.log.info("start: {}, end: {}".format(start, end)) - members = instance.data['setMembers'] camera = instance.data['review_camera'] - # project_code = ftrack_data['Project']['code'] - # task_type = ftrack_data['Task']['type'] - # - # # load Preset - # studio_repos = os.path.abspath(os.environ.get('studio_repos')) - # shot_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # (project_code + '_' + task_type + '_' + asset + '.json')) - # - # task_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # (project_code + '_' + task_type + '.json')) - # - # project_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # (project_code + '.json')) - # - # default_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # 'default.json') - # - # if os.path.isfile(shot_preset_path): - # preset_to_use = shot_preset_path - # elif os.path.isfile(task_preset_path): - # preset_to_use = task_preset_path - # elif os.path.isfile(project_preset_path): - # preset_to_use = project_preset_path - # else: - # preset_to_use = default_preset_path - capture_preset = "" capture_preset = instance.context.data['presets']['maya']['capture'] try: @@ -126,7 +78,18 @@ class ExtractThumbnail(pype.api.Extractor): pm.currentTime(refreshFrameInt, edit=True) with maintained_time(): - playblast = capture_gui.lib.capture_scene(preset) + filename = preset.get("filename", "%TEMP%") + + # Force viewer to False in call to capture because we have our own + # viewer opening call to allow a signal to trigger between + # playblast and viewer + preset['viewer'] = False + + # Remove panel key since it's internal value to capture_gui + preset.pop("panel", None) + + path = capture.capture(**preset) + playblast = self._fix_playblast_output_path(path) _, thumbnail = os.path.split(playblast) @@ -144,6 +107,45 @@ class ExtractThumbnail(pype.api.Extractor): } instance.data["representations"].append(representation) + def _fix_playblast_output_path(self, filepath): + """Workaround a bug in maya.cmds.playblast to return correct filepath. + + When the `viewer` argument is set to False and maya.cmds.playblast + does not automatically open the playblasted file the returned + filepath does not have the file's extension added correctly. + + To workaround this we just glob.glob() for any file extensions and + assume the latest modified file is the correct file and return it. + + """ + # Catch cancelled playblast + if filepath is None: + self.log.warning("Playblast did not result in output path. " + "Playblast is probably interrupted.") + return None + + # Fix: playblast not returning correct filename (with extension) + # Lets assume the most recently modified file is the correct one. + if not os.path.exists(filepath): + directory = os.path.dirname(filepath) + filename = os.path.basename(filepath) + # check if the filepath is has frame based filename + # example : capture.####.png + parts = filename.split(".") + if len(parts) == 3: + query = os.path.join(directory, "{}.*.{}".format(parts[0], + parts[-1])) + files = glob.glob(query) + else: + files = glob.glob("{}.*".format(filepath)) + + if not files: + raise RuntimeError("Couldn't find playblast from: " + "{0}".format(filepath)) + filepath = max(files, key=os.path.getmtime) + + return filepath + @contextlib.contextmanager def maintained_time(): diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index 55c04e9c41..e3fa79b1c8 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -228,80 +228,19 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "AuxFiles": [] } - # Include critical environment variables with submission + # We need those to pass them to pype for it to set correct context keys = [ - # This will trigger `userSetup.py` on the slave - # such that proper initialisation happens the same - # way as it does on a local machine. - # TODO(marcus): This won't work if the slaves don't - # have accesss to these paths, such as if slaves are - # running Linux and the submitter is on Windows. - "PYTHONPATH", - "PATH", - - "MTOA_EXTENSIONS_PATH", - "MTOA_EXTENSIONS", - "DYLD_LIBRARY_PATH", - "MAYA_RENDER_DESC_PATH", - "MAYA_MODULE_PATH", - "ARNOLD_PLUGIN_PATH", - "AVALON_SCHEMA", "FTRACK_API_KEY", "FTRACK_API_USER", "FTRACK_SERVER", - "PYBLISHPLUGINPATH", - - # todo: This is a temporary fix for yeti variables - "PEREGRINEL_LICENSE", - "SOLIDANGLE_LICENSE", - "ARNOLD_LICENSE" - "MAYA_MODULE_PATH", - "TOOL_ENV" + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_TASK", + "PYPE_USERNAME" ] + environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **api.Session) - # self.log.debug("enviro: {}".format(pprint(environment))) - for path in os.environ: - if path.lower().startswith('pype_'): - environment[path] = os.environ[path] - - environment["PATH"] = os.environ["PATH"] - # self.log.debug("enviro: {}".format(environment['PYPE_SCRIPTS'])) - clean_environment = {} - for key in environment: - clean_path = "" - self.log.debug("key: {}".format(key)) - self.log.debug("value: {}".format(environment[key])) - to_process = str(environment[key]) - if key == "PYPE_STUDIO_CORE_MOUNT": - clean_path = to_process - elif "://" in to_process: - clean_path = to_process - elif os.pathsep not in str(to_process): - try: - path = to_process - path.decode('UTF-8', 'strict') - clean_path = os.path.normpath(path) - except UnicodeDecodeError: - print('path contains non UTF characters') - else: - for path in to_process.split(os.pathsep): - try: - path.decode('UTF-8', 'strict') - clean_path += os.path.normpath(path) + os.pathsep - except UnicodeDecodeError: - print('path contains non UTF characters') - - if key == "PYTHONPATH": - clean_path = clean_path.replace('python2', 'python3') - clean_path = clean_path.replace( - os.path.normpath( - environment['PYPE_STUDIO_CORE_MOUNT']), # noqa - os.path.normpath( - environment['PYPE_STUDIO_CORE_PATH'])) # noqa - clean_environment[key] = clean_path - - environment = clean_environment payload["JobInfo"].update({ "EnvironmentKeyValue%d" % index: "{key}={value}".format( @@ -319,7 +258,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): self.preflight_check(instance) - self.log.info("Submitting..") + self.log.info("Submitting ...") self.log.info(json.dumps(payload, indent=4, sort_keys=True)) # E.g. http://192.168.0.1:8082/api/jobs diff --git a/pype/plugins/maya/publish/validate_node_ids_in_database.py b/pype/plugins/maya/publish/validate_node_ids_in_database.py index 7347ce2ab2..fdcf0b20b0 100644 --- a/pype/plugins/maya/publish/validate_node_ids_in_database.py +++ b/pype/plugins/maya/publish/validate_node_ids_in_database.py @@ -1,6 +1,6 @@ import pyblish.api -import avalon.io as io +from avalon import io import pype.api import pype.maya.action diff --git a/pype/plugins/maya/publish/validate_node_ids_related.py b/pype/plugins/maya/publish/validate_node_ids_related.py index 4a154d0b71..191ac0c2f8 100644 --- a/pype/plugins/maya/publish/validate_node_ids_related.py +++ b/pype/plugins/maya/publish/validate_node_ids_related.py @@ -1,7 +1,7 @@ import pyblish.api import pype.api -import avalon.io as io +from avalon import io import pype.maya.action from pype.maya import lib @@ -38,9 +38,13 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin): invalid = list() asset = instance.data['asset'] - asset_data = io.find_one({"name": asset, - "type": "asset"}, - projection={"_id": True}) + asset_data = io.find_one( + { + "name": asset, + "type": "asset" + }, + projection={"_id": True} + ) asset_id = str(asset_data['_id']) # We do want to check the referenced nodes as we it might be diff --git a/pype/plugins/maya/publish/validate_renderlayer_aovs.py b/pype/plugins/maya/publish/validate_renderlayer_aovs.py index e14c92a8b4..686a11e906 100644 --- a/pype/plugins/maya/publish/validate_renderlayer_aovs.py +++ b/pype/plugins/maya/publish/validate_renderlayer_aovs.py @@ -49,9 +49,10 @@ class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin): """Check if subset is registered in the database under the asset""" asset = io.find_one({"type": "asset", "name": asset_name}) - is_valid = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset["_id"]}) + is_valid = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset["_id"] + }) return is_valid - diff --git a/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py b/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py index dd66b4fb3a..441658297d 100644 --- a/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py +++ b/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py @@ -1,7 +1,7 @@ import nuke import os import pyblish.api -import avalon.io as io +from avalon import io # TODO: add repair function diff --git a/pype/plugins/nuke/create/create_backdrop.py b/pype/plugins/nuke/create/create_backdrop.py index b5600e8b37..2016c66095 100644 --- a/pype/plugins/nuke/create/create_backdrop.py +++ b/pype/plugins/nuke/create/create_backdrop.py @@ -1,16 +1,52 @@ from avalon.nuke.pipeline import Creator - +from avalon.nuke import lib as anlib +import nuke class CreateBackdrop(Creator): """Add Publishable Backdrop""" - name = "backdrop" - label = "Backdrop" - family = "group" - icon = "cube" + name = "nukenodes" + label = "Create Backdrop" + family = "nukenodes" + icon = "file-archive-o" defaults = ["Main"] def __init__(self, *args, **kwargs): super(CreateBackdrop, self).__init__(*args, **kwargs) - + self.nodes = nuke.selectedNodes() + self.node_color = "0xdfea5dff" return + + def process(self): + from nukescripts import autoBackdrop + nodes = list() + if (self.options or {}).get("useSelection"): + nodes = self.nodes + + if len(nodes) >= 1: + anlib.select_nodes(nodes) + bckd_node = autoBackdrop() + bckd_node["name"].setValue("{}_BDN".format(self.name)) + bckd_node["tile_color"].setValue(int(self.node_color, 16)) + bckd_node["note_font_size"].setValue(24) + bckd_node["label"].setValue("[{}]".format(self.name)) + # add avalon knobs + instance = anlib.imprint(bckd_node, self.data) + + return instance + else: + msg = "Please select nodes you " + "wish to add to a container" + self.log.error(msg) + nuke.message(msg) + return + else: + bckd_node = autoBackdrop() + bckd_node["name"].setValue("{}_BDN".format(self.name)) + bckd_node["tile_color"].setValue(int(self.node_color, 16)) + bckd_node["note_font_size"].setValue(24) + bckd_node["label"].setValue("[{}]".format(self.name)) + # add avalon knobs + instance = anlib.imprint(bckd_node, self.data) + + return instance diff --git a/pype/plugins/nuke/create/create_gizmo.py b/pype/plugins/nuke/create/create_gizmo.py new file mode 100644 index 0000000000..ca199b8800 --- /dev/null +++ b/pype/plugins/nuke/create/create_gizmo.py @@ -0,0 +1,83 @@ +from avalon.nuke.pipeline import Creator +from avalon.nuke import lib as anlib +import nuke +import nukescripts + +class CreateGizmo(Creator): + """Add Publishable "gizmo" group + + The name is symbolically gizmo as presumably + it is something familiar to nuke users as group of nodes + distributed downstream in workflow + """ + + name = "gizmo" + label = "Gizmo" + family = "gizmo" + icon = "file-archive-o" + defaults = ["ViewerInput", "Lut", "Effect"] + + def __init__(self, *args, **kwargs): + super(CreateGizmo, self).__init__(*args, **kwargs) + self.nodes = nuke.selectedNodes() + self.node_color = "0x7533c1ff" + return + + def process(self): + if (self.options or {}).get("useSelection"): + nodes = self.nodes + self.log.info(len(nodes)) + if len(nodes) == 1: + anlib.select_nodes(nodes) + node = nodes[-1] + # check if Group node + if node.Class() in "Group": + node["name"].setValue("{}_GZM".format(self.name)) + node["tile_color"].setValue(int(self.node_color, 16)) + return anlib.imprint(node, self.data) + else: + msg = ("Please select a group node " + "you wish to publish as the gizmo") + self.log.error(msg) + nuke.message(msg) + + if len(nodes) >= 2: + anlib.select_nodes(nodes) + nuke.makeGroup() + gizmo_node = nuke.selectedNode() + gizmo_node["name"].setValue("{}_GZM".format(self.name)) + gizmo_node["tile_color"].setValue(int(self.node_color, 16)) + + # add sticky node wit guide + with gizmo_node: + sticky = nuke.createNode("StickyNote") + sticky["label"].setValue( + "Add following:\n- set Input" + " nodes\n- set one Output1\n" + "- create User knobs on the group") + + # add avalon knobs + return anlib.imprint(gizmo_node, self.data) + + else: + msg = ("Please select nodes you " + "wish to add to the gizmo") + self.log.error(msg) + nuke.message(msg) + return + else: + with anlib.maintained_selection(): + gizmo_node = nuke.createNode("Group") + gizmo_node["name"].setValue("{}_GZM".format(self.name)) + gizmo_node["tile_color"].setValue(int(self.node_color, 16)) + + # add sticky node wit guide + with gizmo_node: + sticky = nuke.createNode("StickyNote") + sticky["label"].setValue( + "Add following:\n- add Input" + " nodes\n- add one Output1\n" + "- create User knobs on the group") + + # add avalon knobs + return anlib.imprint(gizmo_node, self.data) diff --git a/pype/plugins/nuke/create/create_read.py b/pype/plugins/nuke/create/create_read.py index 87bb45a6ad..70db580a7e 100644 --- a/pype/plugins/nuke/create/create_read.py +++ b/pype/plugins/nuke/create/create_read.py @@ -6,9 +6,6 @@ from pype import api as pype import nuke -log = pype.Logger().get_logger(__name__, "nuke") - - class CrateRead(avalon.nuke.Creator): # change this to template preset name = "ReadCopy" @@ -37,7 +34,9 @@ class CrateRead(avalon.nuke.Creator): nodes = self.nodes if not nodes or len(nodes) == 0: - nuke.message('Please select Read node') + msg = "Please select Read node" + self.log.error(msg) + nuke.message(msg) else: count_reads = 0 for node in nodes: @@ -49,7 +48,9 @@ class CrateRead(avalon.nuke.Creator): count_reads += 1 if count_reads < 1: - nuke.message('Please select Read node') + msg = "Please select Read node" + self.log.error(msg) + nuke.message(msg) return def change_read_node(self, name, node, data): diff --git a/pype/plugins/nuke/create/create_read_plate b/pype/plugins/nuke/create/create_read_plate deleted file mode 100644 index 90a47cb55e..0000000000 --- a/pype/plugins/nuke/create/create_read_plate +++ /dev/null @@ -1,8 +0,0 @@ -# create publishable read node usually used for enabling version tracking -# also useful for sharing across shots or assets - -# if read nodes are selected it will convert them to centainer -# if no read node selected it will create read node and offer browser to shot resource folder - -# type movie > mov or imagesequence -# type still > matpaint .psd, .tif, .png, diff --git a/pype/plugins/nuke/create/create_write.py b/pype/plugins/nuke/create/create_write.py index 5eaf4279ee..74e450f267 100644 --- a/pype/plugins/nuke/create/create_write.py +++ b/pype/plugins/nuke/create/create_write.py @@ -1,22 +1,14 @@ from collections import OrderedDict -import avalon.api -import avalon.nuke -from pype import api as pype from pype.nuke import plugin -from pypeapp import config - import nuke -log = pype.Logger().get_logger(__name__, "nuke") - - class CreateWriteRender(plugin.PypeCreator): # change this to template preset name = "WriteRender" label = "Create Write Render" hosts = ["nuke"] - nClass = "write" + n_class = "write" family = "render" icon = "sign-out" defaults = ["Main", "Mask"] @@ -24,19 +16,18 @@ class CreateWriteRender(plugin.PypeCreator): def __init__(self, *args, **kwargs): super(CreateWriteRender, self).__init__(*args, **kwargs) - self.name = self.data["subset"] - data = OrderedDict() data["family"] = self.family - data["families"] = self.nClass + data["families"] = self.n_class for k, v in self.data.items(): if k not in data.keys(): data.update({k: v}) self.data = data - self.log.info("self.data: '{}'".format(self.data)) + self.nodes = nuke.selectedNodes() + self.log.debug("_ self.data: '{}'".format(self.data)) def process(self): from pype.nuke import lib as pnlib @@ -48,9 +39,13 @@ class CreateWriteRender(plugin.PypeCreator): # use selection if (self.options or {}).get("useSelection"): - nodes = nuke.selectedNodes() + nodes = self.nodes - assert len(nodes) == 1, self.log.error("Select only one node. The node you want to connect to, or tick off `Use selection`") + if not (len(nodes) < 2): + msg = ("Select only one node. The node you want to connect to, " + "or tick off `Use selection`") + log.error(msg) + nuke.message(msg) selected_node = nodes[0] inputs = [selected_node] @@ -71,7 +66,7 @@ class CreateWriteRender(plugin.PypeCreator): # recreate new write_data = { - "class": self.nClass, + "class": self.n_class, "families": [self.family], "avalon": self.data } @@ -102,75 +97,121 @@ class CreateWriteRender(plugin.PypeCreator): return write_node -# -# class CreateWritePrerender(avalon.nuke.Creator): -# # change this to template preset -# preset = "prerender" -# -# name = "WritePrerender" -# label = "Create Write Prerender" -# hosts = ["nuke"] -# family = "{}_write".format(preset) -# families = preset -# icon = "sign-out" -# defaults = ["Main", "Mask"] -# -# def __init__(self, *args, **kwargs): -# super(CreateWritePrerender, self).__init__(*args, **kwargs) -# self.presets = config.get_presets()['plugins']["nuke"]["create"].get( -# self.__class__.__name__, {} -# ) -# -# data = OrderedDict() -# -# data["family"] = self.family.split("_")[1] -# data["families"] = self.families -# -# {data.update({k: v}) for k, v in self.data.items() -# if k not in data.keys()} -# self.data = data -# -# def process(self): -# self.name = self.data["subset"] -# -# instance = nuke.toNode(self.data["subset"]) -# node = 'write' -# -# if not instance: -# write_data = { -# "class": node, -# "preset": self.preset, -# "avalon": self.data -# } -# -# if self.presets.get('fpath_template'): -# self.log.info("Adding template path from preset") -# write_data.update( -# {"fpath_template": self.presets["fpath_template"]} -# ) -# else: -# self.log.info("Adding template path from plugin") -# write_data.update({ -# "fpath_template": "{work}/prerenders/{subset}/{subset}.{frame}.{ext}"}) -# -# # get group node -# group_node = create_write_node(self.data["subset"], write_data) -# -# # open group node -# group_node.begin() -# for n in nuke.allNodes(): -# # get write node -# if n.Class() in "Write": -# write_node = n -# group_node.end() -# -# # linking knobs to group property panel -# linking_knobs = ["first", "last", "use_limit"] -# for k in linking_knobs: -# lnk = nuke.Link_Knob(k) -# lnk.makeLink(write_node.name(), k) -# lnk.setName(k.replace('_', ' ').capitalize()) -# lnk.clearFlag(nuke.STARTLINE) -# group_node.addKnob(lnk) -# -# return + +class CreateWritePrerender(plugin.PypeCreator): + # change this to template preset + name = "WritePrerender" + label = "Create Write Prerender" + hosts = ["nuke"] + n_class = "write" + family = "prerender" + icon = "sign-out" + defaults = ["Key01", "Bg01", "Fg01", "Branch01", "Part01"] + + def __init__(self, *args, **kwargs): + super(CreateWritePrerender, self).__init__(*args, **kwargs) + + data = OrderedDict() + + data["family"] = self.family + data["families"] = self.n_class + + for k, v in self.data.items(): + if k not in data.keys(): + data.update({k: v}) + + self.data = data + self.nodes = nuke.selectedNodes() + self.log.debug("_ self.data: '{}'".format(self.data)) + + def process(self): + from pype.nuke import lib as pnlib + + inputs = [] + outputs = [] + instance = nuke.toNode(self.data["subset"]) + selected_node = None + + # use selection + if (self.options or {}).get("useSelection"): + nodes = self.nodes + + if not (len(nodes) < 2): + msg = ("Select only one node. The node you want to connect to, " + "or tick off `Use selection`") + self.log.error(msg) + nuke.message(msg) + + selected_node = nodes[0] + inputs = [selected_node] + outputs = selected_node.dependent() + + if instance: + if (instance.name() in selected_node.name()): + selected_node = instance.dependencies()[0] + + # if node already exist + if instance: + # collect input / outputs + inputs = instance.dependencies() + outputs = instance.dependent() + selected_node = inputs[0] + # remove old one + nuke.delete(instance) + + # recreate new + write_data = { + "class": self.n_class, + "families": [self.family], + "avalon": self.data + } + + if self.presets.get('fpath_template'): + self.log.info("Adding template path from preset") + write_data.update( + {"fpath_template": self.presets["fpath_template"]} + ) + else: + self.log.info("Adding template path from plugin") + write_data.update({ + "fpath_template": "{work}/prerenders/nuke/{subset}/{subset}.{frame}.{ext}"}) + + write_node = pnlib.create_write_node( + self.data["subset"], + write_data, + input=selected_node, + prenodes=[]) + + # relinking to collected connections + for i, input in enumerate(inputs): + write_node.setInput(i, input) + + write_node.autoplace() + + for output in outputs: + output.setInput(0, write_node) + + # open group node + write_node.begin() + for n in nuke.allNodes(): + # get write node + if n.Class() in "Write": + w_node = n + write_node.end() + + # add inner write node Tab + write_node.addKnob(nuke.Tab_Knob("WriteLinkedKnobs")) + + # linking knobs to group property panel + linking_knobs = ["channels", "___", "first", "last", "use_limit"] + for k in linking_knobs: + if "___" in k: + write_node.addKnob(nuke.Text_Knob('')) + else: + lnk = nuke.Link_Knob(k) + lnk.makeLink(w_node.name(), k) + lnk.setName(k.replace('_', ' ').capitalize()) + lnk.clearFlag(nuke.STARTLINE) + write_node.addKnob(lnk) + + return write_node diff --git a/pype/plugins/nuke/load/load_backdrop.py b/pype/plugins/nuke/load/load_backdrop.py new file mode 100644 index 0000000000..07a6724771 --- /dev/null +++ b/pype/plugins/nuke/load/load_backdrop.py @@ -0,0 +1,322 @@ +from avalon import api, style, io +import nuke +import nukescripts +from pype.nuke import lib as pnlib +from avalon.nuke import lib as anlib +from avalon.nuke import containerise, update_container +reload(pnlib) + +class LoadBackdropNodes(api.Loader): + """Loading Published Backdrop nodes (workfile, nukenodes)""" + + representations = ["nk"] + families = ["workfile", "nukenodes"] + + label = "Iport Nuke Nodes" + order = 0 + icon = "eye" + color = style.colors.light + node_color = "0x7533c1ff" + + def load(self, context, name, namespace, data): + """ + Loading function to import .nk file into script and wrap + it on backdrop + + Arguments: + context (dict): context of version + name (str): name of the version + namespace (str): asset name + data (dict): compulsory attribute > not used + + Returns: + nuke node: containerised nuke node object + """ + + # get main variables + version = context['version'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + namespace = namespace or context['asset']['name'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = self.fname.replace("\\", "/") + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + # Get mouse position + n = nuke.createNode("NoOp") + xcursor, ycursor = (n.xpos(), n.ypos()) + anlib.reset_selection() + nuke.delete(n) + + bdn_frame = 50 + + with anlib.maintained_selection(): + + # add group from nk + nuke.nodePaste(file) + + # get all pasted nodes + new_nodes = list() + nodes = nuke.selectedNodes() + + # get pointer position in DAG + xpointer, ypointer = pnlib.find_free_space_to_paste_nodes(nodes, direction="right", offset=200+bdn_frame) + + # reset position to all nodes and replace inputs and output + for n in nodes: + anlib.reset_selection() + xpos = (n.xpos() - xcursor) + xpointer + ypos = (n.ypos() - ycursor) + ypointer + n.setXYpos(xpos, ypos) + + # replace Input nodes for dots + if n.Class() in "Input": + dot = nuke.createNode("Dot") + new_name = n.name().replace("INP", "DOT") + dot.setName(new_name) + dot["label"].setValue(new_name) + dot.setXYpos(xpos, ypos) + new_nodes.append(dot) + + # rewire + dep = n.dependent() + for d in dep: + index = next((i for i, dpcy in enumerate( + d.dependencies()) + if n is dpcy), 0) + d.setInput(index, dot) + + # remove Input node + anlib.reset_selection() + nuke.delete(n) + continue + + # replace Input nodes for dots + elif n.Class() in "Output": + dot = nuke.createNode("Dot") + new_name = n.name() + "_DOT" + dot.setName(new_name) + dot["label"].setValue(new_name) + dot.setXYpos(xpos, ypos) + new_nodes.append(dot) + + # rewire + dep = next((d for d in n.dependencies()), None) + if dep: + dot.setInput(0, dep) + + # remove Input node + anlib.reset_selection() + nuke.delete(n) + continue + else: + new_nodes.append(n) + + # reselect nodes with new Dot instead of Inputs and Output + anlib.reset_selection() + anlib.select_nodes(new_nodes) + # place on backdrop + bdn = nukescripts.autoBackdrop() + + # add frame offset + xpos = bdn.xpos() - bdn_frame + ypos = bdn.ypos() - bdn_frame + bdwidth = bdn["bdwidth"].value() + (bdn_frame*2) + bdheight = bdn["bdheight"].value() + (bdn_frame*2) + + bdn["xpos"].setValue(xpos) + bdn["ypos"].setValue(ypos) + bdn["bdwidth"].setValue(bdwidth) + bdn["bdheight"].setValue(bdheight) + + bdn["name"].setValue(object_name) + bdn["label"].setValue("Version tracked frame: \n`{}`\n\nPLEASE DO NOT REMOVE OR MOVE \nANYTHING FROM THIS FRAME!".format(object_name)) + bdn["note_font_size"].setValue(20) + + return containerise( + node=bdn, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def update(self, container, representation): + """Update the Loader's path + + Nuke automatically tries to reset some variables when changing + the loader's path to a new file. These automatic changes are to its + inputs: + + """ + + # get main variables + # Get version from io + version = io.find_one({ + "type": "version", + "_id": representation["parent"] + }) + # get corresponding node + GN = nuke.toNode(container['objectName']) + + file = api.get_representation_path(representation).replace("\\", "/") + context = representation["context"] + name = container['name'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + namespace = container['namespace'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"representation": str(representation["_id"]), + "frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + with anlib.maintained_selection(): + xpos = GN.xpos() + ypos = GN.ypos() + avalon_data = anlib.get_avalon_knob_data(GN) + nuke.delete(GN) + # add group from nk + nuke.nodePaste(file) + + GN = nuke.selectedNode() + anlib.set_avalon_knob_data(GN, avalon_data) + GN.setXYpos(xpos, ypos) + GN["name"].setValue(object_name) + + # get all versions in list + versions = io.find({ + "type": "version", + "parent": version["parent"] + }).distinct('name') + + max_version = max(versions) + + # change color of node + if version.get("name") not in [max_version]: + GN["tile_color"].setValue(int("0xd88467ff", 16)) + else: + GN["tile_color"].setValue(int(self.node_color, 16)) + + self.log.info("udated to version: {}".format(version.get("name"))) + + return update_container(GN, data_imprint) + + def connect_active_viewer(self, group_node): + """ + Finds Active viewer and + place the node under it, also adds + name of group into Input Process of the viewer + + Arguments: + group_node (nuke node): nuke group node object + + """ + group_node_name = group_node["name"].value() + + viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()] + if len(viewer) > 0: + viewer = viewer[0] + else: + if not (len(nodes) < 2): + msg = "Please create Viewer node before you " + "run this action again" + self.log.error(msg) + nuke.message(msg) + return None + + # get coordinates of Viewer1 + xpos = viewer["xpos"].value() + ypos = viewer["ypos"].value() + + ypos += 150 + + viewer["ypos"].setValue(ypos) + + # set coordinates to group node + group_node["xpos"].setValue(xpos) + group_node["ypos"].setValue(ypos + 50) + + # add group node name to Viewer Input Process + viewer["input_process_node"].setValue(group_node_name) + + # put backdrop under + pnlib.create_backdrop(label="Input Process", layer=2, + nodes=[viewer, group_node], color="0x7c7faaff") + + return True + + def get_item(self, data, trackIndex, subTrackIndex): + return {key: val for key, val in data.items() + if subTrackIndex == val["subTrackIndex"] + if trackIndex == val["trackIndex"]} + + def byteify(self, input): + """ + Converts unicode strings to strings + It goes trought all dictionary + + Arguments: + input (dict/str): input + + Returns: + dict: with fixed values and keys + + """ + + if isinstance(input, dict): + return {self.byteify(key): self.byteify(value) + for key, value in input.iteritems()} + elif isinstance(input, list): + return [self.byteify(element) for element in input] + elif isinstance(input, unicode): + return input.encode('utf-8') + else: + return input + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from avalon.nuke import viewer_update_and_undo_stop + node = nuke.toNode(container['objectName']) + with viewer_update_and_undo_stop(): + nuke.delete(node) diff --git a/pype/plugins/nuke/load/load_gizmo_ip.py b/pype/plugins/nuke/load/load_gizmo_ip.py new file mode 100644 index 0000000000..23d7ef2f4a --- /dev/null +++ b/pype/plugins/nuke/load/load_gizmo_ip.py @@ -0,0 +1,241 @@ +from avalon import api, style, io +import nuke +from pype.nuke import lib as pnlib +from avalon.nuke import lib as anlib +from avalon.nuke import containerise, update_container + + +class LoadGizmoInputProcess(api.Loader): + """Loading colorspace soft effect exported from nukestudio""" + + representations = ["gizmo"] + families = ["gizmo"] + + label = "Load Gizmo - Input Process" + order = 0 + icon = "eye" + color = style.colors.alert + node_color = "0x7533c1ff" + + def load(self, context, name, namespace, data): + """ + Loading function to get Gizmo as Input Process on viewer + + Arguments: + context (dict): context of version + name (str): name of the version + namespace (str): asset name + data (dict): compulsory attribute > not used + + Returns: + nuke node: containerised nuke node object + """ + + # get main variables + version = context['version'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + namespace = namespace or context['asset']['name'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = self.fname.replace("\\", "/") + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + with anlib.maintained_selection(): + # add group from nk + nuke.nodePaste(file) + + GN = nuke.selectedNode() + + GN["name"].setValue(object_name) + + # try to place it under Viewer1 + if not self.connect_active_viewer(GN): + nuke.delete(GN) + return + + return containerise( + node=GN, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def update(self, container, representation): + """Update the Loader's path + + Nuke automatically tries to reset some variables when changing + the loader's path to a new file. These automatic changes are to its + inputs: + + """ + + # get main variables + # Get version from io + version = io.find_one({ + "type": "version", + "_id": representation["parent"] + }) + # get corresponding node + GN = nuke.toNode(container['objectName']) + + file = api.get_representation_path(representation).replace("\\", "/") + context = representation["context"] + name = container['name'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + namespace = container['namespace'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"representation": str(representation["_id"]), + "frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + with anlib.maintained_selection(): + xpos = GN.xpos() + ypos = GN.ypos() + avalon_data = anlib.get_avalon_knob_data(GN) + nuke.delete(GN) + # add group from nk + nuke.nodePaste(file) + + GN = nuke.selectedNode() + anlib.set_avalon_knob_data(GN, avalon_data) + GN.setXYpos(xpos, ypos) + GN["name"].setValue(object_name) + + # get all versions in list + versions = io.find({ + "type": "version", + "parent": version["parent"] + }).distinct('name') + + max_version = max(versions) + + # change color of node + if version.get("name") not in [max_version]: + GN["tile_color"].setValue(int("0xd88467ff", 16)) + else: + GN["tile_color"].setValue(int(self.node_color, 16)) + + self.log.info("udated to version: {}".format(version.get("name"))) + + return update_container(GN, data_imprint) + + def connect_active_viewer(self, group_node): + """ + Finds Active viewer and + place the node under it, also adds + name of group into Input Process of the viewer + + Arguments: + group_node (nuke node): nuke group node object + + """ + group_node_name = group_node["name"].value() + + viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()] + if len(viewer) > 0: + viewer = viewer[0] + else: + msg = "Please create Viewer node before you " + "run this action again" + self.log.error(msg) + nuke.message(msg) + return None + + # get coordinates of Viewer1 + xpos = viewer["xpos"].value() + ypos = viewer["ypos"].value() + + ypos += 150 + + viewer["ypos"].setValue(ypos) + + # set coordinates to group node + group_node["xpos"].setValue(xpos) + group_node["ypos"].setValue(ypos + 50) + + # add group node name to Viewer Input Process + viewer["input_process_node"].setValue(group_node_name) + + # put backdrop under + pnlib.create_backdrop(label="Input Process", layer=2, + nodes=[viewer, group_node], color="0x7c7faaff") + + return True + + def get_item(self, data, trackIndex, subTrackIndex): + return {key: val for key, val in data.items() + if subTrackIndex == val["subTrackIndex"] + if trackIndex == val["trackIndex"]} + + def byteify(self, input): + """ + Converts unicode strings to strings + It goes trought all dictionary + + Arguments: + input (dict/str): input + + Returns: + dict: with fixed values and keys + + """ + + if isinstance(input, dict): + return {self.byteify(key): self.byteify(value) + for key, value in input.iteritems()} + elif isinstance(input, list): + return [self.byteify(element) for element in input] + elif isinstance(input, unicode): + return input.encode('utf-8') + else: + return input + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from avalon.nuke import viewer_update_and_undo_stop + node = nuke.toNode(container['objectName']) + with viewer_update_and_undo_stop(): + nuke.delete(node) diff --git a/pype/plugins/nuke/load/load_luts_ip.py b/pype/plugins/nuke/load/load_luts_ip.py index 5f09adb05f..2b38a9ff08 100644 --- a/pype/plugins/nuke/load/load_luts_ip.py +++ b/pype/plugins/nuke/load/load_luts_ip.py @@ -276,7 +276,10 @@ class LoadLutsInputProcess(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - self.log.error("Please create Viewer node before you run this action again") + msg = "Please create Viewer node before you " + "run this action again" + self.log.error(msg) + nuke.message(msg) return None # get coordinates of Viewer1 diff --git a/pype/plugins/nuke/load/load_matchmove.py b/pype/plugins/nuke/load/load_matchmove.py new file mode 100644 index 0000000000..60d5dc026f --- /dev/null +++ b/pype/plugins/nuke/load/load_matchmove.py @@ -0,0 +1,27 @@ +from avalon import api +import nuke + + +class MatchmoveLoader(api.Loader): + """ + This will run matchmove script to create track in script. + """ + + families = ["matchmove"] + representations = ["py"] + defaults = ["Camera", "Object"] + + label = "Run matchmove script" + icon = "empire" + color = "orange" + + def load(self, context, name, namespace, data): + if self.fname.lower().endswith(".py"): + exec(open(self.fname).read()) + + else: + msg = "Unsupported script type" + self.log.error(msg) + nuke.message(msg) + + return True diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py index e6daaaff8a..e598839405 100644 --- a/pype/plugins/nuke/load/load_mov.py +++ b/pype/plugins/nuke/load/load_mov.py @@ -1,9 +1,6 @@ -import os import contextlib -from avalon import api -import avalon.io as io - +from avalon import api, io import nuke @@ -102,7 +99,7 @@ class LoadMov(api.Loader): handle_start = version_data.get("handleStart", None) handle_end = version_data.get("handleEnd", None) repr_cont = context["representation"]["context"] - + # fix handle start and end if none are available if not handle_start and not handle_end: handle_start = handles diff --git a/pype/plugins/nuke/load/load_script_precomp.py b/pype/plugins/nuke/load/load_script_precomp.py index e84e23a890..310157f099 100644 --- a/pype/plugins/nuke/load/load_script_precomp.py +++ b/pype/plugins/nuke/load/load_script_precomp.py @@ -7,7 +7,7 @@ class LinkAsGroup(api.Loader): """Copy the published file to be pasted at the desired location""" representations = ["nk"] - families = ["workfile"] + families = ["workfile", "nukenodes"] label = "Load Precomp" order = 0 @@ -63,8 +63,6 @@ class LinkAsGroup(api.Loader): colorspace = context["version"]["data"].get("colorspace", None) self.log.info("colorspace: {}\n".format(colorspace)) - # ['version', 'file', 'reading', 'output', 'useOutput'] - P["name"].setValue("{}_{}".format(name, namespace)) P["useOutput"].setValue(True) @@ -74,14 +72,15 @@ class LinkAsGroup(api.Loader): if n.Class() == "Group" if get_avalon_knob_data(n)] - # create panel for selecting output - panel_choices = " ".join(writes) - panel_label = "Select write node for output" - p = nuke.Panel("Select Write Node") - p.addEnumerationPulldown( - panel_label, panel_choices) - p.show() - P["output"].setValue(p.value(panel_label)) + if writes: + # create panel for selecting output + panel_choices = " ".join(writes) + panel_label = "Select write node for output" + p = nuke.Panel("Select Write Node") + p.addEnumerationPulldown( + panel_label, panel_choices) + p.show() + P["output"].setValue(p.value(panel_label)) P["tile_color"].setValue(0xff0ff0ff) diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index e1c75584d7..76599c3351 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -1,9 +1,6 @@ -import os import contextlib -from avalon import api -import avalon.io as io - +from avalon import api, io import nuke @@ -76,7 +73,7 @@ class LoadSequence(api.Loader): """Load image sequence into Nuke""" families = ["write", "source", "plate", "render"] - representations = ["exr", "dpx", "jpg", "jpeg"] + representations = ["exr", "dpx", "jpg", "jpeg", "png"] label = "Load sequence" order = -10 diff --git a/pype/plugins/nuke/publish/collect_asset_info.py b/pype/plugins/nuke/publish/collect_asset_info.py index 76b93ef3d0..8a8791ec36 100644 --- a/pype/plugins/nuke/publish/collect_asset_info.py +++ b/pype/plugins/nuke/publish/collect_asset_info.py @@ -13,8 +13,10 @@ class CollectAssetInfo(pyblish.api.ContextPlugin): ] def process(self, context): - asset_data = io.find_one({"type": "asset", - "name": api.Session["AVALON_ASSET"]}) + asset_data = io.find_one({ + "type": "asset", + "name": api.Session["AVALON_ASSET"] + }) self.log.info("asset_data: {}".format(asset_data)) context.data['handles'] = int(asset_data["data"].get("handles", 0)) diff --git a/pype/plugins/nuke/publish/collect_backdrop.py b/pype/plugins/nuke/publish/collect_backdrop.py new file mode 100644 index 0000000000..d98a20aee0 --- /dev/null +++ b/pype/plugins/nuke/publish/collect_backdrop.py @@ -0,0 +1,83 @@ +import pyblish.api +import pype.api as pype +from pype.nuke import lib as pnlib +import nuke + +@pyblish.api.log +class CollectBackdrops(pyblish.api.InstancePlugin): + """Collect Backdrop node instance and its content + """ + + order = pyblish.api.CollectorOrder + 0.22 + label = "Collect Backdrop" + hosts = ["nuke"] + families = ["nukenodes"] + + def process(self, instance): + + bckn = instance[0] + + # define size of the backdrop + left = bckn.xpos() + top = bckn.ypos() + right = left + bckn['bdwidth'].value() + bottom = top + bckn['bdheight'].value() + + # iterate all nodes + for node in nuke.allNodes(): + + # exclude viewer + if node.Class() == "Viewer": + continue + + # find all related nodes + if (node.xpos() > left) \ + and (node.xpos() + node.screenWidth() < right) \ + and (node.ypos() > top) \ + and (node.ypos() + node.screenHeight() < bottom): + + # add contained nodes to instance's node list + instance.append(node) + + # get all connections from outside of backdrop + nodes = instance[1:] + connections_in, connections_out = pnlib.get_dependent_nodes(nodes) + instance.data["connections_in"] = connections_in + instance.data["connections_out"] = connections_out + + # make label nicer + instance.data["label"] = "{0} ({1} nodes)".format( + bckn.name(), len(instance)-1) + + instance.data["families"].append(instance.data["family"]) + + # Get frame range + handle_start = instance.context.data["handleStart"] + handle_end = instance.context.data["handleEnd"] + first_frame = int(nuke.root()["first_frame"].getValue()) + last_frame = int(nuke.root()["last_frame"].getValue()) + + # get version + version = pype.get_version_from_path(nuke.root().name()) + instance.data['version'] = version + + # Add version data to instance + version_data = { + "handles": handle_start, + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": first_frame + handle_start, + "frameEnd": last_frame - handle_end, + "version": int(version), + "families": [instance.data["family"]] + instance.data["families"], + "subset": instance.data["subset"], + "fps": instance.context.data["fps"] + } + + instance.data.update({ + "versionData": version_data, + "frameStart": first_frame, + "frameEnd": last_frame + }) + self.log.info("Backdrop content collected: `{}`".format(instance[:])) + self.log.info("Backdrop instance collected: `{}`".format(instance)) diff --git a/pype/plugins/nuke/publish/collect_gizmo.py b/pype/plugins/nuke/publish/collect_gizmo.py new file mode 100644 index 0000000000..11e8c17a3f --- /dev/null +++ b/pype/plugins/nuke/publish/collect_gizmo.py @@ -0,0 +1,56 @@ +import pyblish.api +import pype.api as pype +import nuke + + +@pyblish.api.log +class CollectGizmo(pyblish.api.InstancePlugin): + """Collect Gizmo (group) node instance and its content + """ + + order = pyblish.api.CollectorOrder + 0.22 + label = "Collect Gizmo (Group)" + hosts = ["nuke"] + families = ["gizmo"] + + def process(self, instance): + + grpn = instance[0] + + # add family to familiess + instance.data["families"].insert(0, instance.data["family"]) + # make label nicer + instance.data["label"] = "{0} ({1} nodes)".format( + grpn.name(), len(instance) - 1) + + # Get frame range + handle_start = instance.context.data["handleStart"] + handle_end = instance.context.data["handleEnd"] + first_frame = int(nuke.root()["first_frame"].getValue()) + last_frame = int(nuke.root()["last_frame"].getValue()) + + # get version + version = pype.get_version_from_path(nuke.root().name()) + instance.data['version'] = version + + # Add version data to instance + version_data = { + "handles": handle_start, + "handleStart": handle_start, + "handleEnd": handle_end, + "frameStart": first_frame + handle_start, + "frameEnd": last_frame - handle_end, + "colorspace": nuke.root().knob('workingSpaceLUT').value(), + "version": int(version), + "families": [instance.data["family"]] + instance.data["families"], + "subset": instance.data["subset"], + "fps": instance.context.data["fps"] + } + + instance.data.update({ + "versionData": version_data, + "frameStart": first_frame, + "frameEnd": last_frame + }) + self.log.info("Gizmo content collected: `{}`".format(instance[:])) + self.log.info("Gizmo instance collected: `{}`".format(instance)) diff --git a/pype/plugins/nuke/publish/collect_instances.py b/pype/plugins/nuke/publish/collect_instances.py index 483f260295..5b123ed7b9 100644 --- a/pype/plugins/nuke/publish/collect_instances.py +++ b/pype/plugins/nuke/publish/collect_instances.py @@ -15,13 +15,15 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): hosts = ["nuke", "nukeassist"] def process(self, context): - - asset_data = io.find_one({"type": "asset", - "name": api.Session["AVALON_ASSET"]}) + asset_data = io.find_one({ + "type": "asset", + "name": api.Session["AVALON_ASSET"] + }) self.log.debug("asset_data: {}".format(asset_data["data"])) instances = [] - # creating instances per write node + + root = nuke.root() self.log.debug("nuke.allNodes(): {}".format(nuke.allNodes())) for node in nuke.allNodes(): @@ -31,11 +33,11 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): continue except Exception as E: self.log.warning(E) - continue + # get data from avalon knob self.log.debug("node[name]: {}".format(node['name'].value())) - avalon_knob_data = get_avalon_knob_data(node) + avalon_knob_data = get_avalon_knob_data(node, ["avalon:", "ak:"]) self.log.debug("avalon_knob_data: {}".format(avalon_knob_data)) @@ -45,6 +47,14 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): if avalon_knob_data["id"] != "pyblish.avalon.instance": continue + # establish families + family = avalon_knob_data["family"] + families = list() + + # except disabled nodes but exclude backdrops in test + if ("nukenodes" not in family) and (node["disable"].value()): + continue + subset = avalon_knob_data.get( "subset", None) or node["name"].value() @@ -54,16 +64,47 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): # Add all nodes in group instances. if node.Class() == "Group": + # only alter families for render family + if ("render" in family): + # check if node is not disabled + families.append(avalon_knob_data["families"]) + if node["render"].value(): + self.log.info("flagged for render") + add_family = "render.local" + # dealing with local/farm rendering + if node["render_farm"].value(): + self.log.info("adding render farm family") + add_family = "render.farm" + instance.data["transfer"] = False + families.append(add_family) + else: + # add family into families + families.insert(0, family) + node.begin() for i in nuke.allNodes(): instance.append(i) node.end() family = avalon_knob_data["family"] - families = [avalon_knob_data["families"]] - + families = list() + families_ak = avalon_knob_data.get("families") + + if families_ak: + families.append(families_ak) + else: + families.append(family) + + # Get format + format = root['format'].value() + resolution_width = format.width() + resolution_height = format.height() + pixel_aspect = format.pixelAspect() + if node.Class() not in "Read": - if node["render"].value(): + if "render" not in node.knobs().keys(): + pass + elif node["render"].value(): self.log.info("flagged for render") add_family = "render.local" # dealing with local/farm rendering @@ -87,7 +128,10 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): "avalonKnob": avalon_knob_data, "publish": node.knob('publish').value(), "step": 1, - "fps": nuke.root()['fps'].value() + "fps": nuke.root()['fps'].value(), + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "pixelAspect": pixel_aspect, }) @@ -95,5 +139,4 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): instances.append(instance) context.data["instances"] = instances - self.log.debug("context: {}".format(context)) diff --git a/pype/plugins/nuke/publish/collect_legacy_write.py b/pype/plugins/nuke/publish/collect_legacy_write.py index 74280b743a..cfb0798434 100644 --- a/pype/plugins/nuke/publish/collect_legacy_write.py +++ b/pype/plugins/nuke/publish/collect_legacy_write.py @@ -24,7 +24,8 @@ class CollectWriteLegacy(pyblish.api.InstancePlugin): self.log.info("render") return - instance.data.update( - {"family": "write.legacy", - "families": []} - ) + if "render" in node.knobs(): + instance.data.update( + {"family": "write.legacy", + "families": []} + ) diff --git a/pype/plugins/nuke/publish/collect_script_version.py b/pype/plugins/nuke/publish/collect_script_version.py new file mode 100644 index 0000000000..9a6b5bf572 --- /dev/null +++ b/pype/plugins/nuke/publish/collect_script_version.py @@ -0,0 +1,22 @@ +import os +import pype.api as pype +import pyblish.api + + +class CollectScriptVersion(pyblish. api.ContextPlugin): + """Collect Script Version.""" + + order = pyblish.api.CollectorOrder + label = "Collect Script Version" + hosts = [ + "nuke", + "nukeassist" + ] + + def process(self, context): + file_path = context.data["currentFile"] + base_name = os.path.basename(file_path) + # get version string + version = pype.get_version_from_path(base_name) + + context.data['version'] = version diff --git a/pype/plugins/nuke/publish/collect_slate_node.py b/pype/plugins/nuke/publish/collect_slate_node.py new file mode 100644 index 0000000000..d8d6b50f05 --- /dev/null +++ b/pype/plugins/nuke/publish/collect_slate_node.py @@ -0,0 +1,40 @@ +import pyblish.api +import nuke + + +class CollectSlate(pyblish.api.InstancePlugin): + """Check if SLATE node is in scene and connected to rendering tree""" + + order = pyblish.api.CollectorOrder + 0.09 + label = "Collect Slate Node" + hosts = ["nuke"] + families = ["write"] + + def process(self, instance): + node = instance[0] + + slate = next((n for n in nuke.allNodes() + if "slate" in n.name().lower() + if not n["disable"].getValue()), + None) + + if slate: + # check if slate node is connected to write node tree + slate_check = 0 + slate_node = None + while slate_check == 0: + try: + node = node.dependencies()[0] + if slate.name() in node.name(): + slate_node = node + slate_check = 1 + except IndexError: + break + + if slate_node: + instance.data["slateNode"] = slate_node + instance.data["families"].append("slate") + self.log.info( + "Slate node is in node graph: `{}`".format(slate.name())) + self.log.debug( + "__ instance: `{}`".format(instance)) diff --git a/pype/plugins/nuke/publish/collect_workfile.py b/pype/plugins/nuke/publish/collect_workfile.py index aaee554fbf..9c01a3ec97 100644 --- a/pype/plugins/nuke/publish/collect_workfile.py +++ b/pype/plugins/nuke/publish/collect_workfile.py @@ -2,8 +2,6 @@ import nuke import pyblish.api import os -import pype.api as pype - from avalon.nuke import ( get_avalon_knob_data, add_publish_knob @@ -11,7 +9,7 @@ from avalon.nuke import ( class CollectWorkfile(pyblish.api.ContextPlugin): - """Publish current script version.""" + """Collect current script for publish.""" order = pyblish.api.CollectorOrder + 0.1 label = "Collect Workfile" @@ -31,9 +29,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin): base_name = os.path.basename(file_path) subset = "{0}_{1}".format(os.getenv("AVALON_TASK", None), family) - # get version string - version = pype.get_version_from_path(base_name) - # Get frame range first_frame = int(root["first_frame"].getValue()) last_frame = int(root["last_frame"].getValue()) @@ -53,7 +48,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin): script_data = { "asset": os.getenv("AVALON_ASSET", None), - "version": version, "frameStart": first_frame + handle_start, "frameEnd": last_frame - handle_end, "resolutionWidth": resolution_width, @@ -78,8 +72,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): "publish": root.knob('publish').value(), "family": family, "families": [family], - "representations": list(), - "subsetGroup": "workfiles" + "representations": list() }) # adding basic script data diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index ba8a0534b1..3eff527d47 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -11,7 +11,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): order = pyblish.api.CollectorOrder + 0.1 label = "Collect Writes" hosts = ["nuke", "nukeassist"] - families = ["render", "render.local", "render.farm"] + families = ["write"] def process(self, instance): @@ -50,9 +50,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): output_dir = os.path.dirname(path) self.log.debug('output dir: {}'.format(output_dir)) - # get version - version = pype.get_version_from_path(nuke.root().name()) - instance.data['version'] = version + # get version to instance for integration + instance.data['version'] = instance.context.data.get( + "version", pype.get_version_from_path(nuke.root().name())) + self.log.debug('Write Version: %s' % instance.data('version')) # create label @@ -76,7 +77,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): } try: - collected_frames = os.listdir(output_dir) + collected_frames = [f for f in os.listdir(output_dir) + if ext in f] if collected_frames: representation['frameStart'] = "%0{}d".format( len(str(last_frame))) % first_frame @@ -93,13 +95,14 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "handleEnd": handle_end, "frameStart": first_frame + handle_start, "frameEnd": last_frame - handle_end, - "version": int(version), + "version": int(instance.data['version']), "colorspace": node["colorspace"].value(), - "families": [instance.data["family"]] + instance.data["families"], + "families": [instance.data["family"]], "subset": instance.data["subset"], "fps": instance.context.data["fps"] } + instance.data["family"] = "write" group_node = [x for x in instance if x.Class() == "Group"][0] deadlineChunkSize = 1 if "deadlineChunkSize" in group_node.knobs(): @@ -109,6 +112,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): if "deadlinePriority" in group_node.knobs(): deadlinePriority = group_node["deadlinePriority"].value() + families = [f for f in instance.data["families"] if "write" not in f] instance.data.update({ "versionData": version_data, "path": path, @@ -119,10 +123,11 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "frameStart": first_frame, "frameEnd": last_frame, "outputType": output_type, + "family": "write", + "families": families, "colorspace": node["colorspace"].value(), "deadlineChunkSize": deadlineChunkSize, - "deadlinePriority": deadlinePriority, - "subsetGroup": "renders" + "deadlinePriority": deadlinePriority }) self.log.debug("instance.data: {}".format(instance.data)) diff --git a/pype/plugins/nuke/publish/extract_backdrop.py b/pype/plugins/nuke/publish/extract_backdrop.py new file mode 100644 index 0000000000..7b01b5deac --- /dev/null +++ b/pype/plugins/nuke/publish/extract_backdrop.py @@ -0,0 +1,103 @@ +import pyblish.api +from avalon.nuke import lib as anlib +from pype.nuke import lib as pnlib +import nuke +import os +import pype +reload(pnlib) + +class ExtractBackdropNode(pype.api.Extractor): + """Extracting content of backdrop nodes + + Will create nuke script only with containing nodes. + Also it will solve Input and Output nodes. + + """ + + order = pyblish.api.ExtractorOrder + label = "Extract Backdrop" + hosts = ["nuke"] + families = ["nukenodes"] + + def process(self, instance): + tmp_nodes = list() + nodes = instance[1:] + # Define extract output file path + stagingdir = self.staging_dir(instance) + filename = "{0}.nk".format(instance.name) + path = os.path.join(stagingdir, filename) + + # maintain selection + with anlib.maintained_selection(): + # all connections outside of backdrop + connections_in = instance.data["connections_in"] + connections_out = instance.data["connections_out"] + self.log.debug("_ connections_in: `{}`".format(connections_in)) + self.log.debug("_ connections_out: `{}`".format(connections_out)) + + # create input nodes and name them as passing node (*_INP) + for n, inputs in connections_in.items(): + for i, input in inputs: + inpn = nuke.createNode("Input") + inpn["name"].setValue("{}_{}_INP".format(n.name(), i)) + n.setInput(i, inpn) + inpn.setXYpos(input.xpos(), input.ypos()) + nodes.append(inpn) + tmp_nodes.append(inpn) + + anlib.reset_selection() + + # connect output node + for n, output in connections_out.items(): + opn = nuke.createNode("Output") + self.log.info(n.name()) + self.log.info(output.name()) + output.setInput( + next((i for i, d in enumerate(output.dependencies()) + if d.name() in n.name()), 0), opn) + opn.setInput(0, n) + opn.autoplace() + nodes.append(opn) + tmp_nodes.append(opn) + anlib.reset_selection() + + # select nodes to copy + anlib.reset_selection() + anlib.select_nodes(nodes) + # create tmp nk file + # save file to the path + nuke.nodeCopy(path) + + # Clean up + for tn in tmp_nodes: + nuke.delete(tn) + + # restore original connections + # reconnect input node + for n, inputs in connections_in.items(): + for i, input in inputs: + n.setInput(i, input) + + # reconnect output node + for n, output in connections_out.items(): + output.setInput( + next((i for i, d in enumerate(output.dependencies()) + if d.name() in n.name()), 0), n) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + # create representation + representation = { + 'name': 'nk', + 'ext': 'nk', + 'files': filename, + "stagingDir": stagingdir + } + instance.data["representations"].append(representation) + + self.log.info("Extracted instance '{}' to: {}".format( + instance.name, path)) + + self.log.info("Data {}".format( + instance.data)) diff --git a/pype/plugins/nuke/publish/extract_gizmo.py b/pype/plugins/nuke/publish/extract_gizmo.py new file mode 100644 index 0000000000..36ef1d464c --- /dev/null +++ b/pype/plugins/nuke/publish/extract_gizmo.py @@ -0,0 +1,95 @@ +import pyblish.api +from avalon.nuke import lib as anlib +from pype.nuke import lib as pnlib +from pype.nuke import utils as pnutils +import nuke +import os +import pype + + +class ExtractGizmo(pype.api.Extractor): + """Extracting Gizmo (Group) node + + Will create nuke script only with the Gizmo node. + """ + + order = pyblish.api.ExtractorOrder + label = "Extract Gizmo (Group)" + hosts = ["nuke"] + families = ["gizmo"] + + def process(self, instance): + tmp_nodes = list() + orig_grpn = instance[0] + # Define extract output file path + stagingdir = self.staging_dir(instance) + filename = "{0}.nk".format(instance.name) + path = os.path.join(stagingdir, filename) + + # maintain selection + with anlib.maintained_selection(): + orig_grpn_name = orig_grpn.name() + tmp_grpn_name = orig_grpn_name + "_tmp" + # select original group node + anlib.select_nodes([orig_grpn]) + + # copy to clipboard + nuke.nodeCopy("%clipboard%") + + # reset selection to none + anlib.reset_selection() + + # paste clipboard + nuke.nodePaste("%clipboard%") + + # assign pasted node + copy_grpn = nuke.selectedNode() + copy_grpn.setXYpos((orig_grpn.xpos() + 120), orig_grpn.ypos()) + + # convert gizmos to groups + pnutils.bake_gizmos_recursively(copy_grpn) + + # remove avalonknobs + knobs = copy_grpn.knobs() + avalon_knobs = [k for k in knobs.keys() + for ak in ["avalon:", "ak:"] + if ak in k] + avalon_knobs.append("publish") + for ak in avalon_knobs: + copy_grpn.removeKnob(knobs[ak]) + + # add to temporary nodes + tmp_nodes.append(copy_grpn) + + # swap names + orig_grpn.setName(tmp_grpn_name) + copy_grpn.setName(orig_grpn_name) + + # create tmp nk file + # save file to the path + nuke.nodeCopy(path) + + # Clean up + for tn in tmp_nodes: + nuke.delete(tn) + + # rename back to original + orig_grpn.setName(orig_grpn_name) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + # create representation + representation = { + 'name': 'gizmo', + 'ext': 'nk', + 'files': filename, + "stagingDir": stagingdir + } + instance.data["representations"].append(representation) + + self.log.info("Extracted instance '{}' to: {}".format( + instance.name, path)) + + self.log.info("Data {}".format( + instance.data)) diff --git a/pype/plugins/nuke/publish/extract_render_local.py b/pype/plugins/nuke/publish/extract_render_local.py index 825db67e9d..9b8baa468b 100644 --- a/pype/plugins/nuke/publish/extract_render_local.py +++ b/pype/plugins/nuke/publish/extract_render_local.py @@ -28,6 +28,11 @@ class NukeRenderLocal(pype.api.Extractor): self.log.debug("instance collected: {}".format(instance.data)) first_frame = instance.data.get("frameStart", None) + + # exception for slate workflow + if "slate" in instance.data["families"]: + first_frame -= 1 + last_frame = instance.data.get("frameEnd", None) node_subset_name = instance.data.get("name", None) @@ -47,6 +52,10 @@ class NukeRenderLocal(pype.api.Extractor): int(last_frame) ) + # exception for slate workflow + if "slate" in instance.data["families"]: + first_frame += 1 + path = node['file'].value() out_dir = os.path.dirname(path) ext = node["file_type"].value() diff --git a/pype/plugins/nuke/publish/extract_review_data.py b/pype/plugins/nuke/publish/extract_review_data.py deleted file mode 100644 index 791b9d7969..0000000000 --- a/pype/plugins/nuke/publish/extract_review_data.py +++ /dev/null @@ -1,187 +0,0 @@ -import os -import nuke -import pyblish.api -import pype - -class ExtractReviewData(pype.api.Extractor): - """Extracts movie and thumbnail with baked in luts - - must be run after extract_render_local.py - - """ - - order = pyblish.api.ExtractorOrder + 0.01 - label = "Extract Review Data" - - families = ["review"] - hosts = ["nuke"] - - def process(self, instance): - - # Store selection - selection = [i for i in nuke.allNodes() if i["selected"].getValue()] - # Deselect all nodes to prevent external connections - [i["selected"].setValue(False) for i in nuke.allNodes()] - self.log.debug("creating staging dir:") - self.staging_dir(instance) - - self.log.debug("instance: {}".format(instance)) - self.log.debug("instance.data[families]: {}".format( - instance.data["families"])) - - if "still" not in instance.data["families"]: - self.render_review_representation(instance, - representation="mov") - self.render_review_representation(instance, - representation="jpeg") - else: - self.render_review_representation(instance, representation="jpeg") - - # Restore selection - [i["selected"].setValue(False) for i in nuke.allNodes()] - [i["selected"].setValue(True) for i in selection] - - def render_review_representation(self, - instance, - representation="mov"): - - assert instance.data['representations'][0]['files'], "Instance data files should't be empty!" - - temporary_nodes = [] - stagingDir = instance.data[ - 'representations'][0]["stagingDir"].replace("\\", "/") - self.log.debug("StagingDir `{0}`...".format(stagingDir)) - - collection = instance.data.get("collection", None) - - if collection: - # get path - fname = os.path.basename(collection.format( - "{head}{padding}{tail}")) - fhead = collection.format("{head}") - - # get first and last frame - first_frame = min(collection.indexes) - last_frame = max(collection.indexes) - else: - fname = os.path.basename(instance.data.get("path", None)) - fhead = os.path.splitext(fname)[0] + "." - first_frame = instance.data.get("frameStart", None) - last_frame = instance.data.get("frameEnd", None) - - rnode = nuke.createNode("Read") - - rnode["file"].setValue( - os.path.join(stagingDir, fname).replace("\\", "/")) - - rnode["first"].setValue(first_frame) - rnode["origfirst"].setValue(first_frame) - rnode["last"].setValue(last_frame) - rnode["origlast"].setValue(last_frame) - temporary_nodes.append(rnode) - previous_node = rnode - - # get input process and connect it to baking - ipn = self.get_view_process_node() - if ipn is not None: - ipn.setInput(0, previous_node) - previous_node = ipn - temporary_nodes.append(ipn) - - reformat_node = nuke.createNode("Reformat") - - ref_node = self.nodes.get("Reformat", None) - if ref_node: - for k, v in ref_node: - self.log.debug("k,v: {0}:{1}".format(k,v)) - if isinstance(v, unicode): - v = str(v) - reformat_node[k].setValue(v) - - reformat_node.setInput(0, previous_node) - previous_node = reformat_node - temporary_nodes.append(reformat_node) - - dag_node = nuke.createNode("OCIODisplay") - dag_node.setInput(0, previous_node) - previous_node = dag_node - temporary_nodes.append(dag_node) - - # create write node - write_node = nuke.createNode("Write") - - if representation in "mov": - file = fhead + "baked.mov" - name = "baked" - path = os.path.join(stagingDir, file).replace("\\", "/") - self.log.debug("Path: {}".format(path)) - instance.data["baked_colorspace_movie"] = path - write_node["file"].setValue(path) - write_node["file_type"].setValue("mov") - write_node["raw"].setValue(1) - write_node.setInput(0, previous_node) - temporary_nodes.append(write_node) - tags = ["review", "delete"] - - elif representation in "jpeg": - file = fhead + "jpeg" - name = "thumbnail" - path = os.path.join(stagingDir, file).replace("\\", "/") - instance.data["thumbnail"] = path - write_node["file"].setValue(path) - write_node["file_type"].setValue("jpeg") - write_node["raw"].setValue(1) - write_node.setInput(0, previous_node) - temporary_nodes.append(write_node) - tags = ["thumbnail"] - - # retime for - first_frame = int(last_frame) / 2 - last_frame = int(last_frame) / 2 - - repre = { - 'name': name, - 'ext': representation, - 'files': file, - "stagingDir": stagingDir, - "frameStart": first_frame, - "frameEnd": last_frame, - "anatomy_template": "render", - "tags": tags - } - instance.data["representations"].append(repre) - - # Render frames - nuke.execute(write_node.name(), int(first_frame), int(last_frame)) - - self.log.debug("representations: {}".format(instance.data["representations"])) - - # Clean up - for node in temporary_nodes: - nuke.delete(node) - - def get_view_process_node(self): - - # Select only the target node - if nuke.selectedNodes(): - [n.setSelected(False) for n in nuke.selectedNodes()] - - ipn_orig = None - for v in [n for n in nuke.allNodes() - if "Viewer" in n.Class()]: - ip = v['input_process'].getValue() - ipn = v['input_process_node'].getValue() - if "VIEWER_INPUT" not in ipn and ip: - ipn_orig = nuke.toNode(ipn) - ipn_orig.setSelected(True) - - if ipn_orig: - nuke.nodeCopy('%clipboard%') - - [n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all - - nuke.nodePaste('%clipboard%') - - ipn = nuke.selectedNode() - - return ipn diff --git a/pype/plugins/nuke/publish/extract_review_data_lut.py b/pype/plugins/nuke/publish/extract_review_data_lut.py new file mode 100644 index 0000000000..90b1fda1ec --- /dev/null +++ b/pype/plugins/nuke/publish/extract_review_data_lut.py @@ -0,0 +1,59 @@ +import os +import pyblish.api +from avalon.nuke import lib as anlib +from pype.nuke import lib as pnlib +import pype +reload(pnlib) + + +class ExtractReviewDataLut(pype.api.Extractor): + """Extracts movie and thumbnail with baked in luts + + must be run after extract_render_local.py + + """ + + order = pyblish.api.ExtractorOrder + 0.005 + label = "Extract Review Data Lut" + + families = ["review"] + hosts = ["nuke"] + + def process(self, instance): + families = instance.data["families"] + self.log.info("Creating staging dir...") + if "representations" in instance.data: + staging_dir = instance.data[ + "representations"][0]["stagingDir"].replace("\\", "/") + instance.data["stagingDir"] = staging_dir + instance.data["representations"][0]["tags"] = ["review"] + else: + instance.data["representations"] = [] + # get output path + render_path = instance.data['path'] + staging_dir = os.path.normpath(os.path.dirname(render_path)) + instance.data["stagingDir"] = staging_dir + + self.log.info( + "StagingDir `{0}`...".format(instance.data["stagingDir"])) + + # generate data + with anlib.maintained_selection(): + exporter = pnlib.ExporterReviewLut( + self, instance + ) + data = exporter.generate_lut() + + # assign to representations + instance.data["lutPath"] = os.path.join( + exporter.stagingDir, exporter.file).replace("\\", "/") + instance.data["representations"] += data["representations"] + + if "render.farm" in families: + instance.data["families"].remove("review") + instance.data["families"].remove("ftrack") + + self.log.debug( + "_ lutPath: {}".format(instance.data["lutPath"])) + self.log.debug( + "_ representations: {}".format(instance.data["representations"])) diff --git a/pype/plugins/nuke/publish/extract_review_data_mov.py b/pype/plugins/nuke/publish/extract_review_data_mov.py new file mode 100644 index 0000000000..8b204680a7 --- /dev/null +++ b/pype/plugins/nuke/publish/extract_review_data_mov.py @@ -0,0 +1,61 @@ +import os +import pyblish.api +from avalon.nuke import lib as anlib +from pype.nuke import lib as pnlib +import pype + + +class ExtractReviewDataMov(pype.api.Extractor): + """Extracts movie and thumbnail with baked in luts + + must be run after extract_render_local.py + + """ + + order = pyblish.api.ExtractorOrder + 0.01 + label = "Extract Review Data Mov" + + families = ["review", "render", "render.local"] + hosts = ["nuke"] + + def process(self, instance): + families = instance.data["families"] + self.log.info("Creating staging dir...") + + if "representations" not in instance.data: + instance.data["representations"] = list() + + staging_dir = os.path.normpath( + os.path.dirname(instance.data['path'])) + + instance.data["stagingDir"] = staging_dir + + self.log.info( + "StagingDir `{0}`...".format(instance.data["stagingDir"])) + + # generate data + with anlib.maintained_selection(): + exporter = pnlib.ExporterReviewMov( + self, instance) + + if "render.farm" in families: + instance.data["families"].remove("review") + instance.data["families"].remove("ftrack") + data = exporter.generate_mov(farm=True) + + self.log.debug( + "_ data: {}".format(data)) + + instance.data.update({ + "bakeRenderPath": data.get("bakeRenderPath"), + "bakeScriptPath": data.get("bakeScriptPath"), + "bakeWriteNodeName": data.get("bakeWriteNodeName") + }) + else: + data = exporter.generate_mov() + + # assign to representations + instance.data["representations"] += data["representations"] + + self.log.debug( + "_ representations: {}".format(instance.data["representations"])) diff --git a/pype/plugins/nuke/publish/extract_slate_frame.py b/pype/plugins/nuke/publish/extract_slate_frame.py new file mode 100644 index 0000000000..4d43f38859 --- /dev/null +++ b/pype/plugins/nuke/publish/extract_slate_frame.py @@ -0,0 +1,154 @@ +import os +import nuke +from avalon.nuke import lib as anlib +import pyblish.api +import pype + + +class ExtractSlateFrame(pype.api.Extractor): + """Extracts movie and thumbnail with baked in luts + + must be run after extract_render_local.py + + """ + + order = pyblish.api.ExtractorOrder - 0.001 + label = "Extract Slate Frame" + + families = ["slate"] + hosts = ["nuke"] + + + def process(self, instance): + if hasattr(self, "viewer_lut_raw"): + self.viewer_lut_raw = self.viewer_lut_raw + else: + self.viewer_lut_raw = False + + with anlib.maintained_selection(): + self.log.debug("instance: {}".format(instance)) + self.log.debug("instance.data[families]: {}".format( + instance.data["families"])) + + self.render_slate(instance) + + def render_slate(self, instance): + node = instance[0] # group node + self.log.info("Creating staging dir...") + + if "representations" not in instance.data: + instance.data["representations"] = list() + + staging_dir = os.path.normpath( + os.path.dirname(instance.data['path'])) + + instance.data["stagingDir"] = staging_dir + + self.log.info( + "StagingDir `{0}`...".format(instance.data["stagingDir"])) + + temporary_nodes = [] + collection = instance.data.get("collection", None) + + if collection: + # get path + fname = os.path.basename(collection.format( + "{head}{padding}{tail}")) + fhead = collection.format("{head}") + + # get first and last frame + first_frame = min(collection.indexes) - 1 + + if "slate" in instance.data["families"]: + first_frame += 1 + + last_frame = first_frame + else: + fname = os.path.basename(instance.data.get("path", None)) + fhead = os.path.splitext(fname)[0] + "." + first_frame = instance.data.get("frameStart", None) - 1 + last_frame = first_frame + + if "#" in fhead: + fhead = fhead.replace("#", "")[:-1] + + previous_node = node + + # get input process and connect it to baking + ipn = self.get_view_process_node() + if ipn is not None: + ipn.setInput(0, previous_node) + previous_node = ipn + temporary_nodes.append(ipn) + + if not self.viewer_lut_raw: + dag_node = nuke.createNode("OCIODisplay") + dag_node.setInput(0, previous_node) + previous_node = dag_node + temporary_nodes.append(dag_node) + + # create write node + write_node = nuke.createNode("Write") + file = fhead + "slate.png" + path = os.path.join(staging_dir, file).replace("\\", "/") + instance.data["slateFrame"] = path + write_node["file"].setValue(path) + write_node["file_type"].setValue("png") + write_node["raw"].setValue(1) + write_node.setInput(0, previous_node) + temporary_nodes.append(write_node) + + # fill slate node with comments + self.add_comment_slate_node(instance) + + # Render frames + nuke.execute(write_node.name(), int(first_frame), int(last_frame)) + + self.log.debug( + "slate frame path: {}".format(instance.data["slateFrame"])) + + # Clean up + for node in temporary_nodes: + nuke.delete(node) + + + def get_view_process_node(self): + + # Select only the target node + if nuke.selectedNodes(): + [n.setSelected(False) for n in nuke.selectedNodes()] + + ipn_orig = None + for v in [n for n in nuke.allNodes() + if "Viewer" in n.Class()]: + ip = v['input_process'].getValue() + ipn = v['input_process_node'].getValue() + if "VIEWER_INPUT" not in ipn and ip: + ipn_orig = nuke.toNode(ipn) + ipn_orig.setSelected(True) + + if ipn_orig: + nuke.nodeCopy('%clipboard%') + + [n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all + + nuke.nodePaste('%clipboard%') + + ipn = nuke.selectedNode() + + return ipn + + def add_comment_slate_node(self, instance): + node = instance.data.get("slateNode") + if not node: + return + + comment = instance.context.data.get("comment") + intent = instance.context.data.get("intent") + + try: + node["f_submission_note"].setValue(comment) + node["f_submitting_for"].setValue(intent) + except NameError: + return + instance.data.pop("slateNode") diff --git a/pype/plugins/nuke/publish/extract_thumbnail.py b/pype/plugins/nuke/publish/extract_thumbnail.py new file mode 100644 index 0000000000..55ba34a0d4 --- /dev/null +++ b/pype/plugins/nuke/publish/extract_thumbnail.py @@ -0,0 +1,171 @@ +import os +import nuke +from avalon.nuke import lib as anlib +import pyblish.api +import pype + + +class ExtractThumbnail(pype.api.Extractor): + """Extracts movie and thumbnail with baked in luts + + must be run after extract_render_local.py + + """ + + order = pyblish.api.ExtractorOrder + 0.01 + label = "Extract Thumbnail" + + families = ["review", "render.farm"] + hosts = ["nuke"] + + def process(self, instance): + + with anlib.maintained_selection(): + self.log.debug("instance: {}".format(instance)) + self.log.debug("instance.data[families]: {}".format( + instance.data["families"])) + + self.render_thumbnail(instance) + + def render_thumbnail(self, instance): + node = instance[0] # group node + self.log.info("Creating staging dir...") + + if "representations" not in instance.data: + instance.data["representations"] = list() + + staging_dir = os.path.normpath( + os.path.dirname(instance.data['path'])) + + instance.data["stagingDir"] = staging_dir + + self.log.info( + "StagingDir `{0}`...".format(instance.data["stagingDir"])) + + temporary_nodes = [] + collection = instance.data.get("collection", None) + + if collection: + # get path + fname = os.path.basename(collection.format( + "{head}{padding}{tail}")) + fhead = collection.format("{head}") + + # get first and last frame + first_frame = min(collection.indexes) + last_frame = max(collection.indexes) + else: + fname = os.path.basename(instance.data.get("path", None)) + fhead = os.path.splitext(fname)[0] + "." + first_frame = instance.data.get("frameStart", None) + last_frame = instance.data.get("frameEnd", None) + + if "#" in fhead: + fhead = fhead.replace("#", "")[:-1] + + path_render = os.path.join(staging_dir, fname).replace("\\", "/") + # check if file exist otherwise connect to write node + if os.path.isfile(path_render): + rnode = nuke.createNode("Read") + + rnode["file"].setValue(path_render) + + rnode["first"].setValue(first_frame) + rnode["origfirst"].setValue(first_frame) + rnode["last"].setValue(last_frame) + rnode["origlast"].setValue(last_frame) + temporary_nodes.append(rnode) + previous_node = rnode + else: + previous_node = node + + # get input process and connect it to baking + ipn = self.get_view_process_node() + if ipn is not None: + ipn.setInput(0, previous_node) + previous_node = ipn + temporary_nodes.append(ipn) + + reformat_node = nuke.createNode("Reformat") + + ref_node = self.nodes.get("Reformat", None) + if ref_node: + for k, v in ref_node: + self.log.debug("k, v: {0}:{1}".format(k, v)) + if isinstance(v, unicode): + v = str(v) + reformat_node[k].setValue(v) + + reformat_node.setInput(0, previous_node) + previous_node = reformat_node + temporary_nodes.append(reformat_node) + + dag_node = nuke.createNode("OCIODisplay") + dag_node.setInput(0, previous_node) + previous_node = dag_node + temporary_nodes.append(dag_node) + + # create write node + write_node = nuke.createNode("Write") + file = fhead + "jpeg" + name = "thumbnail" + path = os.path.join(staging_dir, file).replace("\\", "/") + instance.data["thumbnail"] = path + write_node["file"].setValue(path) + write_node["file_type"].setValue("jpeg") + write_node["raw"].setValue(1) + write_node.setInput(0, previous_node) + temporary_nodes.append(write_node) + tags = ["thumbnail"] + + # retime for + first_frame = int(last_frame) / 2 + last_frame = int(last_frame) / 2 + + repre = { + 'name': name, + 'ext': "jpeg", + 'files': file, + "stagingDir": staging_dir, + "frameStart": first_frame, + "frameEnd": last_frame, + "anatomy_template": "render", + "tags": tags + } + instance.data["representations"].append(repre) + + # Render frames + nuke.execute(write_node.name(), int(first_frame), int(last_frame)) + + self.log.debug( + "representations: {}".format(instance.data["representations"])) + + # Clean up + for node in temporary_nodes: + nuke.delete(node) + + def get_view_process_node(self): + + # Select only the target node + if nuke.selectedNodes(): + [n.setSelected(False) for n in nuke.selectedNodes()] + + ipn_orig = None + for v in [n for n in nuke.allNodes() + if "Viewer" in n.Class()]: + ip = v['input_process'].getValue() + ipn = v['input_process_node'].getValue() + if "VIEWER_INPUT" not in ipn and ip: + ipn_orig = nuke.toNode(ipn) + ipn_orig.setSelected(True) + + if ipn_orig: + nuke.nodeCopy('%clipboard%') + + [n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all + + nuke.nodePaste('%clipboard%') + + ipn = nuke.selectedNode() + + return ipn diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index 4044026b5e..71108189c0 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -2,8 +2,6 @@ import os import json import getpass -import nuke - from avalon import api from avalon.vendor import requests import re @@ -27,102 +25,133 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): def process(self, instance): - node = None - for x in instance: - if x.Class() == "Write": - node = x - - if node is None: - return + node = instance[0] + context = instance.context DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL", "http://localhost:8082") assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL" - context = instance.context - workspace = os.path.dirname(context.data["currentFile"]) - filepath = None + self.deadline_url = "{}/api/jobs".format(DEADLINE_REST_URL) + self._comment = context.data.get("comment", "") + self._ver = re.search(r"\d+\.\d+", context.data.get("hostVersion")) + self._deadline_user = context.data.get( + "deadlineUser", getpass.getuser()) + self._frame_start = int(instance.data["frameStart"]) + self._frame_end = int(instance.data["frameEnd"]) - # get path - path = nuke.filename(node) - output_dir = instance.data['outputDir'] + # get output path + render_path = instance.data['path'] + script_path = context.data["currentFile"] - filepath = context.data["currentFile"] + # exception for slate workflow + if "slate" in instance.data["families"]: + self._frame_start -= 1 - self.log.debug(filepath) + response = self.payload_submit(instance, + script_path, + render_path, + node.name() + ) + # Store output dir for unified publisher (filesequence) + instance.data["deadlineSubmissionJob"] = response.json() + instance.data["publishJobState"] = "Active" - filename = os.path.basename(filepath) - comment = context.data.get("comment", "") - dirname = os.path.join(workspace, "renders") - deadline_user = context.data.get("deadlineUser", getpass.getuser()) - jobname = "%s - %s" % (filename, instance.name) - ver = re.search(r"\d+\.\d+", context.data.get("hostVersion")) + if instance.data.get("bakeScriptPath"): + render_path = instance.data.get("bakeRenderPath") + script_path = instance.data.get("bakeScriptPath") + exe_node_name = instance.data.get("bakeWriteNodeName") + + # exception for slate workflow + if "slate" in instance.data["families"]: + self._frame_start += 1 + + resp = self.payload_submit(instance, + script_path, + render_path, + exe_node_name, + response.json() + ) + # Store output dir for unified publisher (filesequence) + instance.data["deadlineSubmissionJob"] = resp.json() + instance.data["publishJobState"] = "Suspended" + + def payload_submit(self, + instance, + script_path, + render_path, + exe_node_name, + responce_data=None + ): + render_dir = os.path.normpath(os.path.dirname(render_path)) + script_name = os.path.basename(script_path) + jobname = "%s - %s" % (script_name, instance.name) + + if not responce_data: + responce_data = {} try: # Ensure render folder exists - os.makedirs(dirname) + os.makedirs(render_dir) except OSError: pass - # Documentation for keys available at: - # https://docs.thinkboxsoftware.com - # /products/deadline/8.0/1_User%20Manual/manual - # /manual-submission.html#job-info-file-options payload = { "JobInfo": { # Top-level group name - "BatchName": filename, + "BatchName": script_name, # Job name, as seen in Monitor "Name": jobname, # Arbitrary username, for visualisation in Monitor - "UserName": deadline_user, + "UserName": self._deadline_user, + + "Priority": instance.data["deadlinePriority"], + + "Pool": "2d", + "SecondaryPool": "2d", "Plugin": "Nuke", "Frames": "{start}-{end}".format( - start=int(instance.data["frameStart"]), - end=int(instance.data["frameEnd"]) + start=self._frame_start, + end=self._frame_end ), - "ChunkSize": instance.data["deadlineChunkSize"], - "Priority": instance.data["deadlinePriority"], + "Comment": self._comment, - "Comment": comment, - - # Optional, enable double-click to preview rendered - # frames from Deadline Monitor - # "OutputFilename0": output_filename_0.replace("\\", "/"), }, "PluginInfo": { # Input - "SceneFile": filepath, + "SceneFile": script_path, # Output directory and filename - "OutputFilePath": dirname.replace("\\", "/"), + "OutputFilePath": render_dir.replace("\\", "/"), # "OutputFilePrefix": render_variables["filename_prefix"], # Mandatory for Deadline - "Version": ver.group(), + "Version": self._ver.group(), # Resolve relative references - "ProjectPath": workspace, - + "ProjectPath": script_path, + "AWSAssetFile0": render_path, # Only the specific write node is rendered. - "WriteNode": instance[0].name() + "WriteNode": exe_node_name }, # Mandatory for Deadline, may be empty "AuxFiles": [] } + if responce_data.get("_id"): + payload["JobInfo"].update({ + "JobType": "Normal", + "BatchName": responce_data["Props"]["Batch"], + "JobDependency0": responce_data["_id"], + "ChunkSize": 99999999 + }) + # Include critical environment variables with submission keys = [ - # This will trigger `userSetup.py` on the slave - # such that proper initialisation happens the same - # way as it does on a local machine. - # TODO(marcus): This won't work if the slaves don't - # have accesss to these paths, such as if slaves are - # running Linux and the submitter is on Windows. "PYTHONPATH", "PATH", "AVALON_SCHEMA", @@ -168,11 +197,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): if key == "PYTHONPATH": clean_path = clean_path.replace('python2', 'python3') + clean_path = clean_path.replace( - os.path.normpath( - environment['PYPE_STUDIO_CORE_MOUNT']), # noqa - os.path.normpath( - environment['PYPE_STUDIO_CORE_PATH'])) # noqa + os.path.normpath( + environment['PYPE_STUDIO_CORE_MOUNT']), # noqa + os.path.normpath( + environment['PYPE_STUDIO_CORE_PATH'])) # noqa clean_environment[key] = clean_path environment = clean_environment @@ -187,20 +217,15 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): plugin = payload["JobInfo"]["Plugin"] self.log.info("using render plugin : {}".format(plugin)) - self.preflight_check(instance) - self.log.info("Submitting..") self.log.info(json.dumps(payload, indent=4, sort_keys=True)) - # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(DEADLINE_REST_URL) - response = requests.post(url, json=payload) + response = requests.post(self.deadline_url, json=payload) + if not response.ok: raise Exception(response.text) - # Store output dir for unified publisher (filesequence) - instance.data["deadlineSubmissionJob"] = response.json() - instance.data["publishJobState"] = "Active" + return response def preflight_check(self, instance): """Ensure the startFrame, endFrame and byFrameStep are integers""" diff --git a/pype/plugins/nuke/publish/validate_backdrop.py b/pype/plugins/nuke/publish/validate_backdrop.py new file mode 100644 index 0000000000..cf2d56087d --- /dev/null +++ b/pype/plugins/nuke/publish/validate_backdrop.py @@ -0,0 +1,69 @@ +import pyblish +from avalon.nuke import lib as anlib +import nuke + + +class SelectCenterInNodeGraph(pyblish.api.Action): + """ + Centering failed instance node in node grap + """ + + label = "Center node in node graph" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + + # Get the errored instances + failed = [] + for result in context.data["results"]: + if (result["error"] is not None and result["instance"] is not None + and result["instance"] not in failed): + failed.append(result["instance"]) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(failed, plugin) + + all_xC = list() + all_yC = list() + + # maintain selection + with anlib.maintained_selection(): + # collect all failed nodes xpos and ypos + for instance in instances: + bdn = instance[0] + xC = bdn.xpos() + bdn.screenWidth()/2 + yC = bdn.ypos() + bdn.screenHeight()/2 + + all_xC.append(xC) + all_yC.append(yC) + + self.log.info("all_xC: `{}`".format(all_xC)) + self.log.info("all_yC: `{}`".format(all_yC)) + + # zoom to nodes in node graph + nuke.zoom(2, [min(all_xC), min(all_yC)]) + + +@pyblish.api.log +class ValidateBackdrop(pyblish.api.InstancePlugin): + """Validate amount of nodes on backdrop node in case user + forgoten to add nodes above the publishing backdrop node""" + + order = pyblish.api.ValidatorOrder + optional = True + families = ["nukenodes"] + label = "Validate Backdrop" + hosts = ["nuke"] + actions = [SelectCenterInNodeGraph] + + def process(self, instance): + connections_out = instance.data["connections_out"] + + msg_multiple_outputs = "Only one outcoming connection from \"{}\" is allowed".format( + instance.data["name"]) + assert len(connections_out.keys()) <= 1, msg_multiple_outputs + + msg_no_content = "No content on backdrop node: \"{}\"".format( + instance.data["name"]) + assert len(instance) > 1, msg_no_content diff --git a/pype/plugins/nuke/publish/validate_gizmo.py b/pype/plugins/nuke/publish/validate_gizmo.py new file mode 100644 index 0000000000..9c94ea88ef --- /dev/null +++ b/pype/plugins/nuke/publish/validate_gizmo.py @@ -0,0 +1,58 @@ +import pyblish +from avalon.nuke import lib as anlib +import nuke + + +class OpenFailedGroupNode(pyblish.api.Action): + """ + Centering failed instance node in node grap + """ + + label = "Open Gizmo in Node Graph" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + + # Get the errored instances + failed = [] + for result in context.data["results"]: + if (result["error"] is not None and result["instance"] is not None + and result["instance"] not in failed): + failed.append(result["instance"]) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(failed, plugin) + + # maintain selection + with anlib.maintained_selection(): + # collect all failed nodes xpos and ypos + for instance in instances: + grpn = instance[0] + nuke.showDag(grpn) + + +@pyblish.api.log +class ValidateGizmo(pyblish.api.InstancePlugin): + """Validate amount of output nodes in gizmo (group) node""" + + order = pyblish.api.ValidatorOrder + optional = True + families = ["gizmo"] + label = "Validate Gizmo (Group)" + hosts = ["nuke"] + actions = [OpenFailedGroupNode] + + def process(self, instance): + grpn = instance[0] + + with grpn: + connections_out = nuke.allNodes('Output') + msg_multiple_outputs = "Only one outcoming connection from " + "\"{}\" is allowed".format(instance.data["name"]) + assert len(connections_out) <= 1, msg_multiple_outputs + + connections_in = nuke.allNodes('Input') + msg_missing_inputs = "At least one Input node has to be used in: " + "\"{}\"".format(instance.data["name"]) + assert len(connections_in) >= 1, msg_missing_inputs diff --git a/pype/plugins/nuke/publish/validate_output_resolution.py b/pype/plugins/nuke/publish/validate_output_resolution.py new file mode 100644 index 0000000000..2563ee929f --- /dev/null +++ b/pype/plugins/nuke/publish/validate_output_resolution.py @@ -0,0 +1,78 @@ +import nuke + +import pyblish.api + + +class RepairWriteResolutionDifference(pyblish.api.Action): + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + + # Get the errored instances + failed = [] + for result in context.data["results"]: + if (result["error"] is not None and result["instance"] is not None + and result["instance"] not in failed): + failed.append(result["instance"]) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(failed, plugin) + + for instance in instances: + reformat = instance[0].dependencies()[0] + if reformat.Class() != "Reformat": + reformat = nuke.nodes.Reformat(inputs=[instance[0].input(0)]) + + xpos = instance[0].xpos() + ypos = instance[0].ypos() - 26 + + dependent_ypos = instance[0].dependencies()[0].ypos() + if (instance[0].ypos() - dependent_ypos) <= 51: + xpos += 110 + + reformat.setXYpos(xpos, ypos) + + instance[0].setInput(0, reformat) + + reformat["resize"].setValue("none") + + +class ValidateOutputResolution(pyblish.api.InstancePlugin): + """Validates Output Resolution. + + It is making sure the resolution of write's input is the same as + Format definition of script in Root node. + """ + + order = pyblish.api.ValidatorOrder + optional = True + families = ["render", "render.local", "render.farm"] + label = "Write Resolution" + hosts = ["nuke"] + actions = [RepairWriteResolutionDifference] + + def process(self, instance): + + # Skip bounding box check if a crop node exists. + if instance[0].dependencies()[0].Class() == "Crop": + return + + msg = "Bounding box is outside the format." + assert self.check_resolution(instance), msg + + def check_resolution(self, instance): + node = instance[0] + + root_width = instance.data["resolutionWidth"] + root_height = instance.data["resolutionHeight"] + + write_width = node.format().width() + write_height = node.format().height() + + if (root_width != write_width) or (root_height != write_height): + return None + else: + return True diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py index e244a9b4b6..169ea1ecb5 100644 --- a/pype/plugins/nuke/publish/validate_rendered_frames.py +++ b/pype/plugins/nuke/publish/validate_rendered_frames.py @@ -28,7 +28,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): """ Validates file output. """ order = pyblish.api.ValidatorOrder + 0.1 - families = ["render.no"] + families = ["render"] label = "Validate rendered frame" hosts = ["nuke", "nukestudio"] @@ -41,7 +41,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): if not repre.get('files'): msg = ("no frames were collected, " "you need to render them") - self.log.warning(msg) + self.log.error(msg) raise ValidationException(msg) collections, remainder = clique.assemble(repre["files"]) @@ -76,6 +76,9 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): 'len(collection.indexes): {}'.format(collected_frames_len) ) + if "slate" in instance.data["families"]: + collected_frames_len -= 1 + assert (collected_frames_len == frame_length), ( "{} missing frames. Use repair to render all frames" ).format(__name__) diff --git a/pype/plugins/nuke/publish/validate_write_bounding_box.py b/pype/plugins/nuke/publish/validate_write_bounding_box.py index 417d4ab004..e4b7c77a25 100644 --- a/pype/plugins/nuke/publish/validate_write_bounding_box.py +++ b/pype/plugins/nuke/publish/validate_write_bounding_box.py @@ -57,7 +57,7 @@ class ValidateNukeWriteBoundingBox(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder optional = True - families = ["render"] + families = ["render", "render.local", "render.farm"] label = "Write Bounding Box" hosts = ["nuke"] actions = [RepairNukeBoundingBoxAction] diff --git a/pype/plugins/nuke/publish/validate_write_knobs.py b/pype/plugins/nuke/publish/validate_write_knobs.py index 072ffd4b17..24572bedb3 100644 --- a/pype/plugins/nuke/publish/validate_write_knobs.py +++ b/pype/plugins/nuke/publish/validate_write_knobs.py @@ -8,24 +8,31 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin): """Ensure knobs are consistent. Knobs to validate and their values comes from the - "nuke/knobs.json" preset, which needs this structure: - { - "family": { - "knob_name": knob_value - } - } + + Example for presets in config: + "presets/plugins/nuke/publish.json" preset, which needs this structure: + "ValidateNukeWriteKnobs": { + "enabled": true, + "knobs": { + "family": { + "knob_name": knob_value + } + } + } """ order = pyblish.api.ValidatorOrder - label = "Knobs" + label = "Validate Write Knobs" hosts = ["nuke"] actions = [pype.api.RepairContextAction] optional = True def process(self, context): # Check for preset existence. - if not context.data["presets"]["nuke"].get("knobs"): + if not getattr(self, "knobs"): return + + self.log.debug("__ self.knobs: {}".format(self.knobs)) invalid = self.get_invalid(context, compute=True) if invalid: @@ -43,7 +50,6 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin): @classmethod def get_invalid_knobs(cls, context): - presets = context.data["presets"]["nuke"]["knobs"] invalid_knobs = [] for instance in context: # Filter publisable instances. @@ -53,15 +59,15 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin): # Filter families. families = [instance.data["family"]] families += instance.data.get("families", []) - families = list(set(families) & set(presets.keys())) + families = list(set(families) & set(cls.knobs.keys())) if not families: continue # Get all knobs to validate. knobs = {} for family in families: - for preset in presets[family]: - knobs.update({preset: presets[family][preset]}) + for preset in cls.knobs[family]: + knobs.update({preset: cls.knobs[family][preset]}) # Get invalid knobs. nodes = [] diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 7a400909fd..3759d50f6a 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -4,7 +4,6 @@ from pyblish import api import nuke - class CollectClips(api.ContextPlugin): """Collect all Track items selection.""" @@ -31,6 +30,7 @@ class CollectClips(api.ContextPlugin): sub_items = video_track.subTrackItems() for item in items: + data = dict() # compare with selection or if disabled if item not in selection or not item.isEnabled(): continue @@ -83,9 +83,12 @@ class CollectClips(api.ContextPlugin): except Exception: source_first_frame = 0 - data = {"name": "{0}_{1}".format(track.name(), item.name()), + data.update({ + "name": "{0}_{1}".format(track.name(), item.name()), "item": item, "source": source, + "timecodeStart": str(source.timecodeStart()), + "timelineTimecodeStart": str(sequence.timecodeStart()), "sourcePath": source_path, "track": track.name(), "trackIndex": track_index, @@ -93,19 +96,24 @@ class CollectClips(api.ContextPlugin): "effects": effects, "sourceIn": int(item.sourceIn()), "sourceOut": int(item.sourceOut()), + "mediaDuration": (int(item.sourceOut()) - + int(item.sourceIn())) + 1, "clipIn": int(item.timelineIn()), "clipOut": int(item.timelineOut()), + "clipDuration": (int(item.timelineOut()) - + int(item.timelineIn())) + 1, "asset": asset, "family": "clip", "families": [], "handles": 0, - "handleStart": projectdata.get("handles", 0), - "handleEnd": projectdata.get("handles", 0), - "version": int(version)} + "handleStart": projectdata.get("handleStart", 0), + "handleEnd": projectdata.get("handleEnd", 0), + "version": int(version)}) instance = context.create_instance(**data) self.log.info("Created instance: {}".format(instance)) + self.log.info("Created instance.data: {}".format(instance.data)) self.log.debug(">> effects: {}".format(instance.data["effects"])) context.data["assetsShared"][asset] = dict() diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index f9eb126772..be448931c8 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -234,8 +234,9 @@ class CollectPlatesData(api.InstancePlugin): 'stagingDir': staging_dir, 'name': ext, 'ext': ext, - "frameStart": frame_start, "frameEnd": frame_end, + "frameStart": "%0{}d".format( + len(str(frame_end))) % frame_start } instance.data["representations"].append(plates_representation) diff --git a/pype/plugins/nukestudio/publish/extract_effects.py b/pype/plugins/nukestudio/publish/extract_effects.py index 7aa79d6cc3..15d2a80a55 100644 --- a/pype/plugins/nukestudio/publish/extract_effects.py +++ b/pype/plugins/nukestudio/publish/extract_effects.py @@ -169,32 +169,44 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): project_name = api.Session["AVALON_PROJECT"] a_template = anatomy.templates - project = io.find_one({"type": "project", - "name": project_name}, - projection={"config": True, "data": True}) + project = io.find_one( + { + "type": "project", + "name": project_name + }, + projection={"config": True, "data": True} + ) template = a_template['publish']['path'] # anatomy = instance.context.data['anatomy'] - asset = io.find_one({"type": "asset", - "name": asset_name, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project["_id"] + }) assert asset, ("No asset found by the name '{}' " "in project '{}'".format(asset_name, project_name)) silo = asset.get('silo') - subset = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset["_id"]}) + subset = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset["_id"] + }) # assume there is no version yet, we start at `1` version = None version_number = 1 if subset is not None: - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) # if there is a subset there ought to be version if version is not None: diff --git a/pype/plugins/nukestudio/publish/validate_version.py b/pype/plugins/nukestudio/publish/validate_version.py index 194b270d51..ebb8f357f8 100644 --- a/pype/plugins/nukestudio/publish/validate_version.py +++ b/pype/plugins/nukestudio/publish/validate_version.py @@ -3,6 +3,7 @@ from avalon import io from pype.action import get_errored_instances_from_context import pype.api as pype + @pyblish.api.log class RepairNukestudioVersionUp(pyblish.api.Action): label = "Version Up Workfile" @@ -53,13 +54,17 @@ class ValidateVersion(pyblish.api.InstancePlugin): io.install() project = io.find_one({"type": "project"}) - asset = io.find_one({"type": "asset", - "name": asset_name, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project["_id"] + }) - subset = io.find_one({"type": "subset", - "parent": asset["_id"], - "name": subset_name}) + subset = io.find_one({ + "type": "subset", + "parent": asset["_id"], + "name": subset_name + }) version_db = io.find_one({ 'type': 'version', diff --git a/pype/plugins/premiere/publish/integrate_assumed_destination.py b/pype/plugins/premiere/publish/integrate_assumed_destination.py index c82b70c66f..a0393e8a43 100644 --- a/pype/plugins/premiere/publish/integrate_assumed_destination.py +++ b/pype/plugins/premiere/publish/integrate_assumed_destination.py @@ -77,32 +77,44 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin): asset_name = instance.data["asset"] project_name = api.Session["AVALON_PROJECT"] - project = io.find_one({"type": "project", - "name": project_name}, - projection={"config": True, "data": True}) + project = io.find_one( + { + "type": "project", + "name": project_name + }, + projection={"config": True, "data": True} + ) template = project["config"]["template"]["publish"] # anatomy = instance.context.data['anatomy'] - asset = io.find_one({"type": "asset", - "name": asset_name, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project["_id"] + }) assert asset, ("No asset found by the name '{}' " "in project '{}'".format(asset_name, project_name)) silo = asset.get('silo') - subset = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset["_id"]}) + subset = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset["_id"] + }) # assume there is no version yet, we start at `1` version = None version_number = 1 if subset is not None: - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) # if there is a subset there ought to be version if version is not None: diff --git a/pype/plugins/standalonepublisher/publish/collect_context.py b/pype/plugins/standalonepublisher/publish/collect_context.py index 43e2350be4..327b99f432 100644 --- a/pype/plugins/standalonepublisher/publish/collect_context.py +++ b/pype/plugins/standalonepublisher/publish/collect_context.py @@ -45,66 +45,71 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin): with open(input_json_path, "r") as f: in_data = json.load(f) - asset_name = in_data['asset'] - family_preset_key = in_data.get('family_preset_key', '') - family = in_data['family'] - subset = in_data['subset'] + asset_name = in_data["asset"] + family_preset_key = in_data.get("family_preset_key", "") + family = in_data["family"] + subset = in_data["subset"] # Load presets presets = context.data.get("presets") if not presets: from pypeapp import config + presets = config.get_presets() # Get from presets anatomy key that will be used for getting template # - default integrate new is used if not set - anatomy_key = presets.get( - "standalone_publish", {}).get( - "families", {}).get( - family_preset_key, {}).get( - "anatomy_template" + anatomy_key = ( + presets.get("standalone_publish", {}) + .get("families", {}) + .get(family_preset_key, {}) + .get("anatomy_template") ) - project = io.find_one({'type': 'project'}) - asset = io.find_one({ - 'type': 'asset', - 'name': asset_name - }) - context.data['project'] = project - context.data['asset'] = asset + project = io.find_one({"type": "project"}) + asset = io.find_one({"type": "asset", "name": asset_name}) + context.data["project"] = project + context.data["asset"] = asset instance = context.create_instance(subset) - instance.data.update({ - "subset": subset, - "asset": asset_name, - "label": subset, - "name": subset, - "family": family, - "frameStart": in_data.get("representations", [None])[0].get("frameStart", None), - "frameEnd": in_data.get("representations", [None])[0].get("frameEnd", None), - "families": [family, 'ftrack'], - }) + instance.data.update( + { + "subset": subset, + "asset": asset_name, + "label": subset, + "name": subset, + "family": family, + "version": in_data.get("version", 1), + "frameStart": in_data.get("representations", [None])[0].get( + "frameStart", None + ), + "frameEnd": in_data.get("representations", [None])[0].get( + "frameEnd", None + ), + "families": [family, "ftrack"], + } + ) self.log.info("collected instance: {}".format(instance.data)) self.log.info("parsing data: {}".format(in_data)) - instance.data['destination_list'] = list() - instance.data['representations'] = list() - instance.data['source'] = 'standalone publisher' + instance.data["destination_list"] = list() + instance.data["representations"] = list() + instance.data["source"] = "standalone publisher" - for component in in_data['representations']: + for component in in_data["representations"]: - component['destination'] = component['files'] - component['stagingDir'] = component['stagingDir'] + component["destination"] = component["files"] + component["stagingDir"] = component["stagingDir"] # Do not set anatomy_template if not specified if anatomy_key: - component['anatomy_template'] = anatomy_key - if isinstance(component['files'], list): - collections, remainder = clique.assemble(component['files']) + component["anatomy_template"] = anatomy_key + if isinstance(component["files"], list): + collections, remainder = clique.assemble(component["files"]) self.log.debug("collecting sequence: {}".format(collections)) instance.data["frameStart"] = int(component["frameStart"]) instance.data["frameEnd"] = int(component["frameEnd"]) - instance.data['fps'] = int(component['fps']) + instance.data["fps"] = int(component["fps"]) if component["preview"]: instance.data["families"].append("review") diff --git a/pype/plugins/standalonepublisher/publish/collect_matchmove.py b/pype/plugins/standalonepublisher/publish/collect_matchmove.py new file mode 100644 index 0000000000..5d9e8ddfb4 --- /dev/null +++ b/pype/plugins/standalonepublisher/publish/collect_matchmove.py @@ -0,0 +1,29 @@ +""" +Requires: + Nothing + +Provides: + Instance +""" + +import pyblish.api +import logging + + +log = logging.getLogger("collector") + + +class CollectMatchmovePublish(pyblish.api.InstancePlugin): + """ + Collector with only one reason for its existence - remove 'ftrack' + family implicitly added by Standalone Publisher + """ + + label = "Collect Matchmove - SA Publish" + order = pyblish.api.CollectorOrder + families = ["matchmove"] + hosts = ["standalonepublisher"] + + def process(self, instance): + if "ftrack" in instance.data["families"]: + instance.data["families"].remove("ftrack") diff --git a/pype/scripts/fusion_switch_shot.py b/pype/scripts/fusion_switch_shot.py index 26a93b9b9a..539bcf4f68 100644 --- a/pype/scripts/fusion_switch_shot.py +++ b/pype/scripts/fusion_switch_shot.py @@ -170,8 +170,10 @@ def switch(asset_name, filepath=None, new=True): assert asset, "Could not find '%s' in the database" % asset_name # Get current project - self._project = io.find_one({"type": "project", - "name": api.Session["AVALON_PROJECT"]}) + self._project = io.find_one({ + "type": "project", + "name": api.Session["AVALON_PROJECT"] + }) # Go to comp if not filepath: diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index ad2e59fc96..d5bc2594a4 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -1,5 +1,7 @@ import os import datetime +import subprocess +import json import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins from pypeapp.lib import config from pype import api as pype @@ -9,6 +11,53 @@ from pype import api as pype log = pype.Logger().get_logger("BurninWrapper", "burninwrap") +ffmpeg_path = os.environ.get("FFMPEG_PATH") +if ffmpeg_path and os.path.exists(ffmpeg_path): + # add separator "/" or "\" to be prepared for next part + ffmpeg_path += os.path.sep +else: + ffmpeg_path = "" + +FFMPEG = ( + '{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s' +).format(os.path.normpath(ffmpeg_path + "ffmpeg")) +FFPROBE = ( + '{} -v quiet -print_format json -show_format -show_streams %(source)s' +).format(os.path.normpath(ffmpeg_path + "ffprobe")) + + +def _streams(source): + """Reimplemented from otio burnins to be able use full path to ffprobe + :param str source: source media file + :rtype: [{}, ...] + """ + command = FFPROBE % {'source': source} + proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE) + out = proc.communicate()[0] + if proc.returncode != 0: + raise RuntimeError("Failed to run: %s" % command) + return json.loads(out)['streams'] + + +def get_fps(str_value): + if str_value == "0/0": + print("Source has \"r_frame_rate\" value set to \"0/0\".") + return "Unknown" + + items = str_value.split("/") + if len(items) == 1: + fps = float(items[0]) + + elif len(items) == 2: + fps = float(items[0]) / float(items[1]) + + # Check if fps is integer or float number + if int(fps) == fps: + fps = int(fps) + + return str(fps) + + class ModifiedBurnins(ffmpeg_burnins.Burnins): ''' This is modification of OTIO FFmpeg Burnin adapter. @@ -61,7 +110,11 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): } def __init__(self, source, streams=None, options_init=None): + if not streams: + streams = _streams(source) + super().__init__(source, streams) + if options_init: self.options_init.update(options_init) @@ -91,7 +144,9 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): text = today.strftime(date_format) self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT) - def add_frame_numbers(self, align, options=None, start_frame=None): + def add_frame_numbers( + self, align, options=None, start_frame=None, text=None + ): """ Convenience method to create the frame number expression. @@ -103,8 +158,14 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): if start_frame: options['frame_offset'] = start_frame - options['expression'] = r'%%{eif\:n+%d\:d}' % options['frame_offset'] - text = str(int(self.end_frame + options['frame_offset'])) + expr = r'%%{eif\:n+%d\:d}' % options['frame_offset'] + _text = str(int(self.end_frame + options['frame_offset'])) + if text and isinstance(text, str): + text = r"{}".format(text) + expr = text.replace("{current_frame}", expr) + text = text.replace("{current_frame}", _text) + + options['expression'] = expr self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT) def add_timecode(self, align, options=None, start_frame=None): @@ -121,7 +182,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): timecode = ffmpeg_burnins._frames_to_timecode( options['frame_offset'], - self.frame_rate + self.frame_rate ) options = options.copy() if not options.get('fps'): @@ -180,7 +241,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): if self.filter_string: filters = '-vf "{}"'.format(self.filter_string) - return (ffmpeg_burnins.FFMPEG % { + return (FFMPEG % { 'input': self.source, 'output': output, 'args': '%s ' % args if args else '', @@ -213,13 +274,15 @@ def example(input_path, output_path): burnin.render(output_path, overwrite=True) -def burnins_from_data(input_path, output_path, data, overwrite=True): +def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True): ''' This method adds burnins to video/image file based on presets setting. Extension of output MUST be same as input. (mov -> mov, avi -> avi,...) :param input_path: full path to input file where burnins should be add :type input_path: str + :param codec_data: all codec related arguments in list + :param codec_data: list :param output_path: full path to output file where output will be rendered :type output_path: str :param data: data required for burnin settings (more info below) @@ -284,8 +347,19 @@ def burnins_from_data(input_path, output_path, data, overwrite=True): burnin = ModifiedBurnins(input_path, options_init=options_init) - start_frame = data.get("start_frame") - start_frame_tc = data.get('start_frame_tc', start_frame) + frame_start = data.get("frame_start") + frame_start_tc = data.get('frame_start_tc', frame_start) + + stream = burnin._streams[0] + if "resolution_width" not in data: + data["resolution_width"] = stream.get("width", "Unknown") + + if "resolution_height" not in data: + data["resolution_height"] = stream.get("height", "Unknown") + + if "fps" not in data: + data["fps"] = get_fps(stream.get("r_frame_rate", "0/0")) + for align_text, preset in presets.get('burnins', {}).items(): align = None if align_text == 'TOP_LEFT': @@ -311,7 +385,7 @@ def burnins_from_data(input_path, output_path, data, overwrite=True): if ( bi_func in ['frame_numbers', 'timecode'] and - start_frame is None + frame_start is None ): log.error( 'start_frame is not set in entered data!' @@ -320,15 +394,34 @@ def burnins_from_data(input_path, output_path, data, overwrite=True): return if bi_func == 'frame_numbers': - burnin.add_frame_numbers(align, start_frame=start_frame) + current_frame_identifier = "{current_frame}" + text = preset.get('text') or current_frame_identifier + + if current_frame_identifier not in text: + log.warning(( + 'Text for Frame numbers don\'t have ' + '`{current_frame}` key in text!' + )) + + text_items = [] + split_items = text.split(current_frame_identifier) + for item in split_items: + text_items.append(item.format(**data)) + + text = "{current_frame}".join(text_items) + + burnin.add_frame_numbers(align, start_frame=frame_start, text=text) + elif bi_func == 'timecode': - burnin.add_timecode(align, start_frame=start_frame_tc) + burnin.add_timecode(align, start_frame=frame_start_tc) + elif bi_func == 'text': if not preset.get('text'): log.error('Text is not set for text function burnin!') return text = preset['text'].format(**data) burnin.add_text(text, align) + elif bi_func == "datetime": date_format = preset["format"] burnin.add_datetime(date_format, align) @@ -339,11 +432,20 @@ def burnins_from_data(input_path, output_path, data, overwrite=True): ) return - burnin.render(output_path, overwrite=overwrite) + codec_args = '' + if codec_data is not []: + codec_args = " ".join(codec_data) + + burnin.render(output_path, args=codec_args, overwrite=overwrite) if __name__ == '__main__': import sys import json data = json.loads(sys.argv[-1]) - burnins_from_data(data['input'], data['output'], data['burnin_data']) + burnins_from_data( + data['input'], + data['codec'], + data['output'], + data['burnin_data'] + ) diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py index 25ed4135c3..620ee3d851 100644 --- a/pype/scripts/publish_filesequence.py +++ b/pype/scripts/publish_filesequence.py @@ -1,10 +1,23 @@ """This module is used for command line publishing of image sequences.""" import os +import sys +import argparse import logging import subprocess import platform +try: + from shutil import which +except ImportError: + # we are in python < 3.3 + def which(command): + path = os.getenv('PATH') + for p in path.split(os.path.pathsep): + p = os.path.join(p, command) + if os.path.exists(p) and os.access(p, os.X_OK): + return p + handler = logging.basicConfig() log = logging.getLogger("Publish Image Sequences") log.setLevel(logging.DEBUG) @@ -13,7 +26,6 @@ error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" def __main__(): - import argparse parser = argparse.ArgumentParser() parser.add_argument("--paths", nargs="*", @@ -33,24 +45,38 @@ def __main__(): print("Running pype ...") auto_pype_root = os.path.dirname(os.path.abspath(__file__)) auto_pype_root = os.path.abspath(auto_pype_root + "../../../../..") + auto_pype_root = os.environ.get('PYPE_ROOT') or auto_pype_root + if os.environ.get('PYPE_ROOT'): + print("Got Pype location from environment: {}".format( + os.environ.get('PYPE_ROOT'))) - if kwargs.pype: - pype_root = kwargs.pype - else: - # if pype argument not specified, lets assume it is set in PATH - pype_root = "" - - print("Set pype root to: {}".format(pype_root)) - print("Paths: {}".format(kwargs.paths or [os.getcwd()])) - - paths = kwargs.paths or [os.getcwd()] pype_command = "pype.ps1" if platform.system().lower() == "linux": pype_command = "pype" elif platform.system().lower() == "windows": pype_command = "pype.bat" + if kwargs.pype: + pype_root = kwargs.pype + else: + # test if pype.bat / pype is in the PATH + # if it is, which() will return its path and we use that. + # if not, we use auto_pype_root path. Caveat of that one is + # that it can be UNC path and that will not work on windows. + + pype_path = which(pype_command) + + if pype_path: + pype_root = os.path.dirname(pype_path) + else: + pype_root = auto_pype_root + + print("Set pype root to: {}".format(pype_root)) + print("Paths: {}".format(kwargs.paths or [os.getcwd()])) + + paths = kwargs.paths or [os.environ.get("PYPE_METADATA_FILE")] or [os.getcwd()] # noqa + args = [ os.path.join(pype_root, pype_command), "publish", @@ -60,9 +86,11 @@ def __main__(): print("Pype command: {}".format(" ".join(args))) # Forcing forwaring the environment because environment inheritance does # not always work. - exit_code = subprocess.call(args, env=os.environ) + # Cast all values in environment to str to be safe + env = {k: str(v) for k, v in os.environ.items()} + exit_code = subprocess.call(args, env=env) if exit_code != 0: - raise ValueError("Publishing failed.") + raise RuntimeError("Publishing failed.") if __name__ == '__main__': diff --git a/pype/services/idle_manager/idle_manager.py b/pype/services/idle_manager/idle_manager.py index 64cafcd193..0897245049 100644 --- a/pype/services/idle_manager/idle_manager.py +++ b/pype/services/idle_manager/idle_manager.py @@ -1,6 +1,6 @@ import time import collections -from Qt import QtCore, QtGui, QtWidgets +from Qt import QtCore from pynput import mouse, keyboard from pypeapp import Logger @@ -29,6 +29,13 @@ class IdleManager(QtCore.QThread): def tray_start(self): self.start() + def tray_exit(self): + self.stop() + try: + self.time_signals = {} + except Exception: + pass + def add_time_signal(self, emit_time, signal): """ If any module want to use IdleManager, need to use add_time_signal :param emit_time: time when signal will be emitted diff --git a/pype/setdress_api.py b/pype/setdress_api.py index c6de0a4f74..707a5b713f 100644 --- a/pype/setdress_api.py +++ b/pype/setdress_api.py @@ -7,8 +7,7 @@ import copy from maya import cmds -from avalon import api -import avalon.io as io +from avalon import api, io from avalon.maya.lib import unique_namespace from pype.maya.lib import matrix_equals @@ -463,8 +462,12 @@ def update_scene(set_container, containers, current_data, new_data, new_file): # Check whether the conversion can be done by the Loader. # They *must* use the same asset, subset and Loader for # `api.update` to make sense. - old = io.find_one({"_id": io.ObjectId(representation_current)}) - new = io.find_one({"_id": io.ObjectId(representation_new)}) + old = io.find_one({ + "_id": io.ObjectId(representation_current) + }) + new = io.find_one({ + "_id": io.ObjectId(representation_new) + }) is_valid = compare_representations(old=old, new=new) if not is_valid: log.error("Skipping: %s. See log for details.", diff --git a/pype/standalonepublish/resources/menu.png b/pype/standalonepublish/resources/menu.png new file mode 100644 index 0000000000..da83b45244 Binary files /dev/null and b/pype/standalonepublish/resources/menu.png differ diff --git a/pype/standalonepublish/resources/menu.svg b/pype/standalonepublish/resources/menu.svg deleted file mode 100644 index ac1e728011..0000000000 --- a/pype/standalonepublish/resources/menu.svg +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - diff --git a/pype/standalonepublish/resources/menu_disabled.png b/pype/standalonepublish/resources/menu_disabled.png new file mode 100644 index 0000000000..e4758f0b19 Binary files /dev/null and b/pype/standalonepublish/resources/menu_disabled.png differ diff --git a/pype/standalonepublish/resources/menu_hover.png b/pype/standalonepublish/resources/menu_hover.png new file mode 100644 index 0000000000..dfe8ed53b2 Binary files /dev/null and b/pype/standalonepublish/resources/menu_hover.png differ diff --git a/pype/standalonepublish/resources/menu_pressed.png b/pype/standalonepublish/resources/menu_pressed.png new file mode 100644 index 0000000000..a5f931b2c4 Binary files /dev/null and b/pype/standalonepublish/resources/menu_pressed.png differ diff --git a/pype/standalonepublish/resources/menu_pressed_hover.png b/pype/standalonepublish/resources/menu_pressed_hover.png new file mode 100644 index 0000000000..51503add0f Binary files /dev/null and b/pype/standalonepublish/resources/menu_pressed_hover.png differ diff --git a/pype/standalonepublish/resources/preview.svg b/pype/standalonepublish/resources/preview.svg deleted file mode 100644 index 4a9810c1d5..0000000000 --- a/pype/standalonepublish/resources/preview.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - - - PREVIEW - - diff --git a/pype/standalonepublish/resources/thumbnail.svg b/pype/standalonepublish/resources/thumbnail.svg deleted file mode 100644 index dbc228f8c8..0000000000 --- a/pype/standalonepublish/resources/thumbnail.svg +++ /dev/null @@ -1,19 +0,0 @@ - - - - - THUMBNAIL - - diff --git a/pype/standalonepublish/resources/trash.png b/pype/standalonepublish/resources/trash.png new file mode 100644 index 0000000000..8d12d5f8e0 Binary files /dev/null and b/pype/standalonepublish/resources/trash.png differ diff --git a/pype/standalonepublish/resources/trash.svg b/pype/standalonepublish/resources/trash.svg deleted file mode 100644 index 07905024c0..0000000000 --- a/pype/standalonepublish/resources/trash.svg +++ /dev/null @@ -1,23 +0,0 @@ - - - - - - - - - - - diff --git a/pype/standalonepublish/resources/trash_disabled.png b/pype/standalonepublish/resources/trash_disabled.png new file mode 100644 index 0000000000..06f5ae5276 Binary files /dev/null and b/pype/standalonepublish/resources/trash_disabled.png differ diff --git a/pype/standalonepublish/resources/trash_hover.png b/pype/standalonepublish/resources/trash_hover.png new file mode 100644 index 0000000000..4725c0f8ab Binary files /dev/null and b/pype/standalonepublish/resources/trash_hover.png differ diff --git a/pype/standalonepublish/resources/trash_pressed.png b/pype/standalonepublish/resources/trash_pressed.png new file mode 100644 index 0000000000..901b0e6d35 Binary files /dev/null and b/pype/standalonepublish/resources/trash_pressed.png differ diff --git a/pype/standalonepublish/resources/trash_pressed_hover.png b/pype/standalonepublish/resources/trash_pressed_hover.png new file mode 100644 index 0000000000..076ced260f Binary files /dev/null and b/pype/standalonepublish/resources/trash_pressed_hover.png differ diff --git a/pype/standalonepublish/widgets/model_filter_proxy_recursive_sort.py b/pype/standalonepublish/widgets/model_filter_proxy_recursive_sort.py index 04ee88229f..9528e96ebf 100644 --- a/pype/standalonepublish/widgets/model_filter_proxy_recursive_sort.py +++ b/pype/standalonepublish/widgets/model_filter_proxy_recursive_sort.py @@ -1,4 +1,5 @@ from . import QtCore +import re class RecursiveSortFilterProxyModel(QtCore.QSortFilterProxyModel): diff --git a/pype/standalonepublish/widgets/widget_component_item.py b/pype/standalonepublish/widgets/widget_component_item.py index 9631fed258..6275238412 100644 --- a/pype/standalonepublish/widgets/widget_component_item.py +++ b/pype/standalonepublish/widgets/widget_component_item.py @@ -1,21 +1,19 @@ import os from . import QtCore, QtGui, QtWidgets -from . import SvgButton from . import get_resource -from avalon import style +from pypeapp import style class ComponentItem(QtWidgets.QFrame): - C_NORMAL = '#777777' - C_HOVER = '#ffffff' - C_ACTIVE = '#4BB543' - C_ACTIVE_HOVER = '#4BF543' signal_remove = QtCore.Signal(object) signal_thumbnail = QtCore.Signal(object) signal_preview = QtCore.Signal(object) signal_repre_change = QtCore.Signal(object, object) + preview_text = "PREVIEW" + thumbnail_text = "THUMBNAIL" + def __init__(self, parent, main_parent): super().__init__() self.has_valid_repre = True @@ -55,10 +53,8 @@ class ComponentItem(QtWidgets.QFrame): self.icon.setText("") self.icon.setScaledContents(True) - self.btn_action_menu = SvgButton( - get_resource('menu.svg'), 22, 22, - [self.C_NORMAL, self.C_HOVER], - frame_image_info, False + self.btn_action_menu = PngButton( + name="menu", size=QtCore.QSize(22, 22) ) self.action_menu = QtWidgets.QMenu() @@ -85,7 +81,9 @@ class ComponentItem(QtWidgets.QFrame): self.file_info.setStyleSheet('padding-left:3px;') - expanding_sizePolicy.setHeightForWidth(self.name.sizePolicy().hasHeightForWidth()) + expanding_sizePolicy.setHeightForWidth( + self.name.sizePolicy().hasHeightForWidth() + ) frame_name_repre = QtWidgets.QFrame(frame) @@ -101,7 +99,8 @@ class ComponentItem(QtWidgets.QFrame): layout.addWidget(self.ext, alignment=QtCore.Qt.AlignRight) frame_name_repre.setSizePolicy( - QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding + QtWidgets.QSizePolicy.MinimumExpanding, + QtWidgets.QSizePolicy.MinimumExpanding ) # Repre + icons @@ -124,17 +123,8 @@ class ComponentItem(QtWidgets.QFrame): frame_icons = QtWidgets.QFrame(frame_repre_icons) - self.preview = SvgButton( - get_resource('preview.svg'), 64, 18, - [self.C_NORMAL, self.C_HOVER, self.C_ACTIVE, self.C_ACTIVE_HOVER], - frame_icons - ) - - self.thumbnail = SvgButton( - get_resource('thumbnail.svg'), 84, 18, - [self.C_NORMAL, self.C_HOVER, self.C_ACTIVE, self.C_ACTIVE_HOVER], - frame_icons - ) + self.preview = LightingButton(self.preview_text) + self.thumbnail = LightingButton(self.thumbnail_text) layout = QtWidgets.QHBoxLayout(frame_icons) layout.setSpacing(6) @@ -162,12 +152,7 @@ class ComponentItem(QtWidgets.QFrame): layout_main.addWidget(frame_middle) - self.remove = SvgButton( - get_resource('trash.svg'), 22, 22, - [self.C_NORMAL, self.C_HOVER], - frame, False - ) - + self.remove = PngButton(name="trash", size=QtCore.QSize(22, 22)) layout_main.addWidget(self.remove) layout = QtWidgets.QVBoxLayout(self) @@ -272,16 +257,16 @@ class ComponentItem(QtWidgets.QFrame): self.signal_repre_change.emit(self, repre_name) def is_thumbnail(self): - return self.thumbnail.checked + return self.thumbnail.isChecked() def change_thumbnail(self, hover=True): - self.thumbnail.change_checked(hover) + self.thumbnail.setChecked(hover) def is_preview(self): - return self.preview.checked + return self.preview.isChecked() def change_preview(self, hover=True): - self.preview.change_checked(hover) + self.preview.setChecked(hover) def collect_data(self): in_files = self.in_data['files'] @@ -309,3 +294,229 @@ class ComponentItem(QtWidgets.QFrame): data['fps'] = self.in_data['fps'] return data + + +class LightingButton(QtWidgets.QPushButton): + lightingbtnstyle = """ + QPushButton { + font: %(font_size_pt)spt; + text-align: center; + color: #777777; + background-color: transparent; + border-width: 1px; + border-color: #777777; + border-style: solid; + padding-top: 0px; + padding-bottom: 0px; + padding-left: 3px; + padding-right: 3px; + border-radius: 3px; + } + + QPushButton:hover { + border-color: #cccccc; + color: #cccccc; + } + + QPushButton:pressed { + border-color: #ffffff; + color: #ffffff; + } + + QPushButton:disabled { + border-color: #3A3939; + color: #3A3939; + } + + QPushButton:checked { + border-color: #4BB543; + color: #4BB543; + } + + QPushButton:checked:hover { + border-color: #4Bd543; + color: #4Bd543; + } + + QPushButton:checked:pressed { + border-color: #4BF543; + color: #4BF543; + } + """ + + def __init__(self, text, font_size_pt=8, *args, **kwargs): + super(LightingButton, self).__init__(text, *args, **kwargs) + self.setStyleSheet(self.lightingbtnstyle % { + "font_size_pt": font_size_pt + }) + self.setCheckable(True) + + +class PngFactory: + png_names = { + "trash": { + "normal": QtGui.QIcon(get_resource("trash.png")), + "hover": QtGui.QIcon(get_resource("trash_hover.png")), + "pressed": QtGui.QIcon(get_resource("trash_pressed.png")), + "pressed_hover": QtGui.QIcon( + get_resource("trash_pressed_hover.png") + ), + "disabled": QtGui.QIcon(get_resource("trash_disabled.png")) + }, + + "menu": { + "normal": QtGui.QIcon(get_resource("menu.png")), + "hover": QtGui.QIcon(get_resource("menu_hover.png")), + "pressed": QtGui.QIcon(get_resource("menu_pressed.png")), + "pressed_hover": QtGui.QIcon( + get_resource("menu_pressed_hover.png") + ), + "disabled": QtGui.QIcon(get_resource("menu_disabled.png")) + } + } + + +class PngButton(QtWidgets.QPushButton): + png_button_style = """ + QPushButton { + border: none; + background-color: transparent; + padding-top: 0px; + padding-bottom: 0px; + padding-left: 0px; + padding-right: 0px; + } + QPushButton:hover {} + QPushButton:pressed {} + QPushButton:disabled {} + QPushButton:checked {} + QPushButton:checked:hover {} + QPushButton:checked:pressed {} + """ + + def __init__( + self, name=None, path=None, hover_path=None, pressed_path=None, + hover_pressed_path=None, disabled_path=None, + size=None, *args, **kwargs + ): + self._hovered = False + self._pressed = False + super(PngButton, self).__init__(*args, **kwargs) + self.setStyleSheet(self.png_button_style) + + png_dict = {} + if name: + png_dict = PngFactory.png_names.get(name) or {} + if not png_dict: + print(( + "WARNING: There is not set icon with name \"{}\"" + "in PngFactory!" + ).format(name)) + + ico_normal = png_dict.get("normal") + ico_hover = png_dict.get("hover") + ico_pressed = png_dict.get("pressed") + ico_hover_pressed = png_dict.get("pressed_hover") + ico_disabled = png_dict.get("disabled") + + if path: + ico_normal = QtGui.QIcon(path) + if hover_path: + ico_hover = QtGui.QIcon(hover_path) + + if pressed_path: + ico_pressed = QtGui.QIcon(hover_path) + + if hover_pressed_path: + ico_hover_pressed = QtGui.QIcon(hover_pressed_path) + + if disabled_path: + ico_disabled = QtGui.QIcon(disabled_path) + + self.setIcon(ico_normal) + if size: + self.setIconSize(size) + self.setMaximumSize(size) + + self.ico_normal = ico_normal + self.ico_hover = ico_hover + self.ico_pressed = ico_pressed + self.ico_hover_pressed = ico_hover_pressed + self.ico_disabled = ico_disabled + + def setDisabled(self, in_bool): + super(PngButton, self).setDisabled(in_bool) + icon = self.ico_normal + if in_bool and self.ico_disabled: + icon = self.ico_disabled + self.setIcon(icon) + + def enterEvent(self, event): + self._hovered = True + if not self.isEnabled(): + return + icon = self.ico_normal + if self.ico_hover: + icon = self.ico_hover + + if self._pressed and self.ico_hover_pressed: + icon = self.ico_hover_pressed + + if self.icon() != icon: + self.setIcon(icon) + + def mouseMoveEvent(self, event): + super(PngButton, self).mouseMoveEvent(event) + if self._pressed: + mouse_pos = event.pos() + hovering = self.rect().contains(mouse_pos) + if hovering and not self._hovered: + self.enterEvent(event) + elif not hovering and self._hovered: + self.leaveEvent(event) + + def leaveEvent(self, event): + self._hovered = False + if not self.isEnabled(): + return + icon = self.ico_normal + if self._pressed and self.ico_pressed: + icon = self.ico_pressed + + if self.icon() != icon: + self.setIcon(icon) + + def mousePressEvent(self, event): + self._pressed = True + if not self.isEnabled(): + return + icon = self.ico_hover + if self.ico_pressed: + icon = self.ico_pressed + + if self.ico_hover_pressed: + mouse_pos = event.pos() + if self.rect().contains(mouse_pos): + icon = self.ico_hover_pressed + + if icon is None: + icon = self.ico_normal + + if self.icon() != icon: + self.setIcon(icon) + + def mouseReleaseEvent(self, event): + if not self.isEnabled(): + return + if self._pressed: + self._pressed = False + mouse_pos = event.pos() + if self.rect().contains(mouse_pos): + self.clicked.emit() + + icon = self.ico_normal + if self._hovered and self.ico_hover: + icon = self.ico_hover + + if self.icon() != icon: + self.setIcon(icon) diff --git a/pype/standalonepublish/widgets/widget_drop_frame.py b/pype/standalonepublish/widgets/widget_drop_frame.py index ba8ab44cf8..73b9f0e179 100644 --- a/pype/standalonepublish/widgets/widget_drop_frame.py +++ b/pype/standalonepublish/widgets/widget_drop_frame.py @@ -92,28 +92,32 @@ class DropDataFrame(QtWidgets.QFrame): self._refresh_view() def _set_thumbnail(self, in_item): + current_state = in_item.is_thumbnail() + in_item.change_thumbnail(not current_state) + checked_item = None for item in self.components_list.widgets(): if item.is_thumbnail(): checked_item = item break - if checked_item is None or checked_item == in_item: - in_item.change_thumbnail() - else: + if checked_item is not None and checked_item != in_item: checked_item.change_thumbnail(False) - in_item.change_thumbnail() + + in_item.change_thumbnail(current_state) def _set_preview(self, in_item): + current_state = in_item.is_preview() + in_item.change_preview(not current_state) + checked_item = None for item in self.components_list.widgets(): if item.is_preview(): checked_item = item break - if checked_item is None or checked_item == in_item: - in_item.change_preview() - else: + if checked_item is not None and checked_item != in_item: checked_item.change_preview(False) - in_item.change_preview() + + in_item.change_preview(current_state) def _remove_item(self, in_item): valid_repre = in_item.has_valid_repre is True diff --git a/pype/tools/assetcreator/widget.py b/pype/tools/assetcreator/widget.py index 75e793479a..1e9e4ab624 100644 --- a/pype/tools/assetcreator/widget.py +++ b/pype/tools/assetcreator/widget.py @@ -3,9 +3,8 @@ import contextlib import collections from avalon.vendor import qtawesome -from avalon.vendor.Qt import QtWidgets, QtCore, QtGui -from avalon import io -from avalon import style +from Qt import QtWidgets, QtCore, QtGui +from avalon import style, io from .model import ( TreeModel, diff --git a/pype/vendor/ftrack_api_old/_version.py b/pype/vendor/ftrack_api_old/_version.py index 07f744ca5d..aa1a8c4aba 100644 --- a/pype/vendor/ftrack_api_old/_version.py +++ b/pype/vendor/ftrack_api_old/_version.py @@ -1 +1 @@ -__version__ = '1.3.3' +__version__ = '1.8.2' diff --git a/pype/vendor/ftrack_api_old/_weakref.py b/pype/vendor/ftrack_api_old/_weakref.py new file mode 100644 index 0000000000..69cc6f4b4f --- /dev/null +++ b/pype/vendor/ftrack_api_old/_weakref.py @@ -0,0 +1,66 @@ +""" +Yet another backport of WeakMethod for Python 2.7. +Changes include removing exception chaining and adding args to super() calls. + +Copyright (c) 2001-2019 Python Software Foundation.All rights reserved. + +Full license available in LICENSE.python. +""" +from weakref import ref + + +class WeakMethod(ref): + """ + A custom `weakref.ref` subclass which simulates a weak reference to + a bound method, working around the lifetime problem of bound methods. + """ + + __slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__" + + def __new__(cls, meth, callback=None): + try: + obj = meth.__self__ + func = meth.__func__ + except AttributeError: + raise TypeError( + "argument should be a bound method, not {}".format(type(meth)) + ) + + def _cb(arg): + # The self-weakref trick is needed to avoid creating a reference + # cycle. + self = self_wr() + if self._alive: + self._alive = False + if callback is not None: + callback(self) + + self = ref.__new__(cls, obj, _cb) + self._func_ref = ref(func, _cb) + self._meth_type = type(meth) + self._alive = True + self_wr = ref(self) + return self + + def __call__(self): + obj = super(WeakMethod, self).__call__() + func = self._func_ref() + if obj is None or func is None: + return None + return self._meth_type(func, obj) + + def __eq__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is other + return ref.__eq__(self, other) and self._func_ref == other._func_ref + return NotImplemented + + def __ne__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is not other + return ref.__ne__(self, other) or self._func_ref != other._func_ref + return NotImplemented + + __hash__ = ref.__hash__ diff --git a/pype/vendor/ftrack_api_old/attribute.py b/pype/vendor/ftrack_api_old/attribute.py index 66840bed66..47fd6c9616 100644 --- a/pype/vendor/ftrack_api_old/attribute.py +++ b/pype/vendor/ftrack_api_old/attribute.py @@ -148,7 +148,8 @@ class Attribute(object): '''A name and value pair persisted remotely.''' def __init__( - self, name, default_value=ftrack_api_old.symbol.NOT_SET, mutable=True + self, name, default_value=ftrack_api_old.symbol.NOT_SET, mutable=True, + computed=False ): '''Initialise attribute with *name*. @@ -161,10 +162,14 @@ class Attribute(object): are :attr:`ftrack_api_old.symbol.NOT_SET`. The exception to this is when the target value is also :attr:`ftrack_api_old.symbol.NOT_SET`. + If *computed* is set to True the value is a remote side computed value + and should not be long-term cached. + ''' super(Attribute, self).__init__() self._name = name self._mutable = mutable + self._computed = computed self.default_value = default_value self._local_key = 'local' @@ -205,6 +210,11 @@ class Attribute(object): '''Return whether attribute is mutable.''' return self._mutable + @property + def computed(self): + '''Return whether attribute is computed.''' + return self._computed + def get_value(self, entity): '''Return current value for *entity*. diff --git a/pype/vendor/ftrack_api_old/entity/factory.py b/pype/vendor/ftrack_api_old/entity/factory.py index 16721514bd..f47c92e563 100644 --- a/pype/vendor/ftrack_api_old/entity/factory.py +++ b/pype/vendor/ftrack_api_old/entity/factory.py @@ -49,9 +49,11 @@ class Factory(object): # Build attributes for class. attributes = ftrack_api_old.attribute.Attributes() - immutable = schema.get('immutable', []) + immutable_properties = schema.get('immutable', []) + computed_properties = schema.get('computed', []) for name, fragment in schema.get('properties', {}).items(): - mutable = name not in immutable + mutable = name not in immutable_properties + computed = name in computed_properties default = fragment.get('default', ftrack_api_old.symbol.NOT_SET) if default == '{uid}': @@ -62,7 +64,8 @@ class Factory(object): if data_type is not ftrack_api_old.symbol.NOT_SET: if data_type in ( - 'string', 'boolean', 'integer', 'number', 'variable' + 'string', 'boolean', 'integer', 'number', 'variable', + 'object' ): # Basic scalar attribute. if data_type == 'number': @@ -74,7 +77,7 @@ class Factory(object): data_type = 'datetime' attribute = self.create_scalar_attribute( - class_name, name, mutable, default, data_type + class_name, name, mutable, computed, default, data_type ) if attribute: attributes.add(attribute) @@ -139,11 +142,12 @@ class Factory(object): return cls def create_scalar_attribute( - self, class_name, name, mutable, default, data_type + self, class_name, name, mutable, computed, default, data_type ): '''Return appropriate scalar attribute instance.''' return ftrack_api_old.attribute.ScalarAttribute( - name, data_type=data_type, default_value=default, mutable=mutable + name, data_type=data_type, default_value=default, mutable=mutable, + computed=computed ) def create_reference_attribute(self, class_name, name, mutable, reference): diff --git a/pype/vendor/ftrack_api_old/entity/location.py b/pype/vendor/ftrack_api_old/entity/location.py index d48264abc2..8d9d52c654 100644 --- a/pype/vendor/ftrack_api_old/entity/location.py +++ b/pype/vendor/ftrack_api_old/entity/location.py @@ -526,7 +526,8 @@ class Location(ftrack_api_old.entity.base.Entity): for index, resource_identifier in enumerate(resource_identifiers): resource_identifiers[index] = ( self.resource_identifier_transformer.decode( - resource_identifier + resource_identifier, + context={'component': components[index]} ) ) diff --git a/pype/vendor/ftrack_api_old/entity/note.py b/pype/vendor/ftrack_api_old/entity/note.py index 4cacf6ac8a..c628886fd9 100644 --- a/pype/vendor/ftrack_api_old/entity/note.py +++ b/pype/vendor/ftrack_api_old/entity/note.py @@ -1,6 +1,8 @@ # :coding: utf-8 # :copyright: Copyright (c) 2015 ftrack +import warnings + import ftrack_api_old.entity.base @@ -33,26 +35,52 @@ class Note(ftrack_api_old.entity.base.Entity): class CreateNoteMixin(object): '''Mixin to add create_note method on entity class.''' - def create_note(self, content, author, recipients=None, category=None): + def create_note( + self, content, author, recipients=None, category=None, labels=None + ): '''Create note with *content*, *author*. - Note category can be set by including *category* and *recipients* - can be specified as a list of user or group instances. + NoteLabels can be set by including *labels*. + + Note category can be set by including *category*. + + *recipients* can be specified as a list of user or group instances. ''' + note_label_support = 'NoteLabel' in self.session.types + + if not labels: + labels = [] + + if labels and not note_label_support: + raise ValueError( + 'NoteLabel is not supported by the current server version.' + ) + + if category and labels: + raise ValueError( + 'Both category and labels cannot be set at the same time.' + ) + if not recipients: recipients = [] - category_id = None - if category: - category_id = category['id'] - data = { 'content': content, - 'author': author, - 'category_id': category_id + 'author': author } + if category: + if note_label_support: + labels = [category] + warnings.warn( + 'category argument will be removed in an upcoming version, ' + 'please use labels instead.', + PendingDeprecationWarning + ) + else: + data['category_id'] = category['id'] + note = self.session.create('Note', data) self['notes'].append(note) @@ -65,4 +93,13 @@ class CreateNoteMixin(object): note['recipients'].append(recipient) + for label in labels: + self.session.create( + 'NoteLabelLink', + { + 'label_id': label['id'], + 'note_id': note['id'] + } + ) + return note diff --git a/pype/vendor/ftrack_api_old/event/expression.py b/pype/vendor/ftrack_api_old/event/expression.py index e10cd85844..8de4be0d71 100644 --- a/pype/vendor/ftrack_api_old/event/expression.py +++ b/pype/vendor/ftrack_api_old/event/expression.py @@ -3,14 +3,15 @@ from operator import eq, ne, ge, le, gt, lt -from pyparsing import (ParserElement, Group, Word, CaselessKeyword, Forward, +from pyparsing import (Group, Word, CaselessKeyword, Forward, FollowedBy, Suppress, oneOf, OneOrMore, Optional, alphanums, quotedString, removeQuotes) import ftrack_api_old.exception -# Optimise parsing using packrat memoisation feature. -ParserElement.enablePackrat() +# Do not enable packrat since it is not thread-safe and will result in parsing +# exceptions in a multi threaded environment. +# ParserElement.enablePackrat() class Parser(object): diff --git a/pype/vendor/ftrack_api_old/event/hub.py b/pype/vendor/ftrack_api_old/event/hub.py index 25410aa1e1..3ffbd38056 100644 --- a/pype/vendor/ftrack_api_old/event/hub.py +++ b/pype/vendor/ftrack_api_old/event/hub.py @@ -14,6 +14,7 @@ import operator import functools import json import socket +import warnings import requests import requests.exceptions @@ -40,9 +41,20 @@ ServerDetails = collections.namedtuple('ServerDetails', [ ]) + + class EventHub(object): '''Manage routing of events.''' + _future_signature_warning = ( + 'When constructing your Session object you did not explicitly define ' + 'auto_connect_event_hub as True even though you appear to be publishing ' + 'and / or subscribing to asynchronous events. In version version 2.0 of ' + 'the ftrack-python-api the default behavior will change from True ' + 'to False. Please make sure to update your tools. You can read more at ' + 'http://ftrack-python-api.rtd.ftrack.com/en/stable/release/migration.html' + ) + def __init__(self, server_url, api_user, api_key): '''Initialise hub, connecting to ftrack *server_url*. @@ -76,6 +88,8 @@ class EventHub(object): self._auto_reconnect_attempts = 30 self._auto_reconnect_delay = 10 + self._deprecation_warning_auto_connect = False + # Mapping of Socket.IO codes to meaning. self._code_name_mapping = { '0': 'disconnect', @@ -134,6 +148,9 @@ class EventHub(object): connected or connection fails. ''' + + self._deprecation_warning_auto_connect = False + if self.connected: raise ftrack_api_old.exception.EventHubConnectionError( 'Already connected.' @@ -164,17 +181,26 @@ class EventHub(object): # https://docs.python.org/2/library/socket.html#socket.socket.setblocking self._connection = websocket.create_connection(url, timeout=60) - except Exception: + except Exception as error: + error_message = ( + 'Failed to connect to event server at {server_url} with ' + 'error: "{error}".' + ) + + error_details = { + 'error': unicode(error), + 'server_url': self.get_server_url() + } + self.logger.debug( L( - 'Error connecting to event server at {0}.', - self.get_server_url() + error_message, **error_details ), exc_info=1 ) raise ftrack_api_old.exception.EventHubConnectionError( - 'Failed to connect to event server at {0}.' - .format(self.get_server_url()) + error_message, + details=error_details ) # Start background processing thread. @@ -543,6 +569,11 @@ class EventHub(object): event will be caught by this method and ignored. ''' + if self._deprecation_warning_auto_connect and not synchronous: + warnings.warn( + self._future_signature_warning, FutureWarning + ) + try: return self._publish( event, synchronous=synchronous, on_reply=on_reply @@ -700,18 +731,23 @@ class EventHub(object): # Automatically publish a non None response as a reply when not in # synchronous mode. - if not synchronous and response is not None: - - try: - self.publish_reply( - event, data=response, source=subscriber.metadata + if not synchronous: + if self._deprecation_warning_auto_connect: + warnings.warn( + self._future_signature_warning, FutureWarning ) - except Exception: - self.logger.exception(L( - 'Error publishing response {0} from subscriber {1} ' - 'for event {2}.', response, subscriber, event - )) + if response is not None: + try: + self.publish_reply( + event, data=response, source=subscriber.metadata + ) + + except Exception: + self.logger.exception(L( + 'Error publishing response {0} from subscriber {1} ' + 'for event {2}.', response, subscriber, event + )) # Check whether to continue processing topic event. if event.is_stopped(): @@ -881,6 +917,7 @@ class EventHub(object): if code_name == 'connect': self.logger.debug('Connected to event server.') event = ftrack_api_old.event.base.Event('ftrack.meta.connected') + self._prepare_event(event) self._event_queue.put(event) elif code_name == 'disconnect': @@ -901,6 +938,7 @@ class EventHub(object): if not self.connected: event = ftrack_api_old.event.base.Event('ftrack.meta.disconnected') + self._prepare_event(event) self._event_queue.put(event) elif code_name == 'heartbeat': diff --git a/pype/vendor/ftrack_api_old/logging.py b/pype/vendor/ftrack_api_old/logging.py index 2b28ce900b..41969c5b2a 100644 --- a/pype/vendor/ftrack_api_old/logging.py +++ b/pype/vendor/ftrack_api_old/logging.py @@ -1,6 +1,23 @@ # :coding: utf-8 # :copyright: Copyright (c) 2016 ftrack +import functools +import warnings + + +def deprecation_warning(message): + def decorator(function): + @functools.wraps(function) + def wrapper(*args, **kwargs): + warnings.warn( + message, + PendingDeprecationWarning + ) + return function(*args, **kwargs) + return wrapper + + return decorator + class LazyLogMessage(object): '''A log message that can be evaluated lazily for improved performance. diff --git a/pype/vendor/ftrack_api_old/session.py b/pype/vendor/ftrack_api_old/session.py index c313203a0c..0986962ca4 100644 --- a/pype/vendor/ftrack_api_old/session.py +++ b/pype/vendor/ftrack_api_old/session.py @@ -16,6 +16,7 @@ import hashlib import tempfile import threading import atexit +import warnings import requests import requests.auth @@ -42,8 +43,14 @@ import ftrack_api_old.structure.origin import ftrack_api_old.structure.entity_id import ftrack_api_old.accessor.server import ftrack_api_old._centralized_storage_scenario +import ftrack_api_old.logging from ftrack_api_old.logging import LazyLogMessage as L +try: + from weakref import WeakMethod +except ImportError: + from ftrack_api_old._weakref import WeakMethod + class SessionAuthentication(requests.auth.AuthBase): '''Attach ftrack session authentication information to requests.''' @@ -69,7 +76,7 @@ class Session(object): def __init__( self, server_url=None, api_key=None, api_user=None, auto_populate=True, plugin_paths=None, cache=None, cache_key_maker=None, - auto_connect_event_hub=True, schema_cache_path=None, + auto_connect_event_hub=None, schema_cache_path=None, plugin_arguments=None ): '''Initialise session. @@ -233,7 +240,8 @@ class Session(object): self._api_key ) - if auto_connect_event_hub: + self._auto_connect_event_hub_thread = None + if auto_connect_event_hub in (None, True): # Connect to event hub in background thread so as not to block main # session usage waiting for event hub connection. self._auto_connect_event_hub_thread = threading.Thread( @@ -242,8 +250,14 @@ class Session(object): self._auto_connect_event_hub_thread.daemon = True self._auto_connect_event_hub_thread.start() + # To help with migration from auto_connect_event_hub default changing + # from True to False. + self._event_hub._deprecation_warning_auto_connect = ( + auto_connect_event_hub is None + ) + # Register to auto-close session on exit. - atexit.register(self.close) + atexit.register(WeakMethod(self.close)) self._plugin_paths = plugin_paths if self._plugin_paths is None: @@ -271,6 +285,15 @@ class Session(object): ftrack_api_old._centralized_storage_scenario.register(self) self._configure_locations() + self.event_hub.publish( + ftrack_api_old.event.base.Event( + topic='ftrack.api.session.ready', + data=dict( + session=self + ) + ), + synchronous=True + ) def __enter__(self): '''Return session as context manager.''' @@ -389,7 +412,8 @@ class Session(object): try: self.event_hub.disconnect() - self._auto_connect_event_hub_thread.join() + if self._auto_connect_event_hub_thread: + self._auto_connect_event_hub_thread.join() except ftrack_api_old.exception.EventHubConnectionError: pass @@ -428,6 +452,16 @@ class Session(object): # Re-configure certain session aspects that may be dependant on cache. self._configure_locations() + self.event_hub.publish( + ftrack_api_old.event.base.Event( + topic='ftrack.api.session.reset', + data=dict( + session=self + ) + ), + synchronous=True + ) + def auto_populating(self, auto_populate): '''Temporarily set auto populate to *auto_populate*. @@ -508,7 +542,7 @@ class Session(object): 'entity_key': entity.get('id') }) - result = self._call( + result = self.call( [payload] ) @@ -790,12 +824,13 @@ class Session(object): }] # TODO: When should this execute? How to handle background=True? - results = self._call(batch) + results = self.call(batch) # Merge entities into local cache and return merged entities. data = [] + merged = dict() for entity in results[0]['data']: - data.append(self.merge(entity)) + data.append(self._merge_recursive(entity, merged)) return data, results[0]['metadata'] @@ -856,6 +891,48 @@ class Session(object): else: return value + def _merge_recursive(self, entity, merged=None): + '''Merge *entity* and all its attributes recursivly.''' + log_debug = self.logger.isEnabledFor(logging.DEBUG) + + if merged is None: + merged = {} + + attached = self.merge(entity, merged) + + for attribute in entity.attributes: + # Remote attributes. + remote_value = attribute.get_remote_value(entity) + + if isinstance( + remote_value, + ( + ftrack_api_old.entity.base.Entity, + ftrack_api_old.collection.Collection, + ftrack_api_old.collection.MappedCollectionProxy + ) + ): + log_debug and self.logger.debug( + 'Merging remote value for attribute {0}.'.format(attribute) + ) + + if isinstance(remote_value, ftrack_api_old.entity.base.Entity): + self._merge_recursive(remote_value, merged=merged) + + elif isinstance( + remote_value, ftrack_api_old.collection.Collection + ): + for entry in remote_value: + self._merge_recursive(entry, merged=merged) + + elif isinstance( + remote_value, ftrack_api_old.collection.MappedCollectionProxy + ): + for entry in remote_value.collection: + self._merge_recursive(entry, merged=merged) + + return attached + def _merge_entity(self, entity, merged=None): '''Merge *entity* into session returning merged entity. @@ -1185,7 +1262,7 @@ class Session(object): # Process batch. if batch: - result = self._call(batch) + result = self.call(batch) # Clear recorded operations. self.recorded_operations.clear() @@ -1260,7 +1337,7 @@ class Session(object): def _fetch_server_information(self): '''Return server information.''' - result = self._call([{'action': 'query_server_information'}]) + result = self.call([{'action': 'query_server_information'}]) return result[0] def _discover_plugins(self, plugin_arguments=None): @@ -1362,7 +1439,7 @@ class Session(object): 'Loading schemas from server due to hash not matching.' 'Local: {0!r} != Server: {1!r}', local_schema_hash, server_hash )) - schemas = self._call([{'action': 'query_schemas'}])[0] + schemas = self.call([{'action': 'query_schemas'}])[0] if schema_cache_path: try: @@ -1525,8 +1602,24 @@ class Session(object): synchronous=True ) + @ftrack_api_old.logging.deprecation_warning( + 'Session._call is now available as public method Session.call. The ' + 'private method will be removed in version 2.0.' + ) def _call(self, data): - '''Make request to server with *data*.''' + '''Make request to server with *data* batch describing the actions. + + .. note:: + + This private method is now available as public method + :meth:`entity_reference`. This alias remains for backwards + compatibility, but will be removed in version 2.0. + + ''' + return self.call(data) + + def call(self, data): + '''Make request to server with *data* batch describing the actions.''' url = self._server_url + '/api' headers = { 'content-type': 'application/json', @@ -1553,7 +1646,7 @@ class Session(object): 'Server reported error in unexpected format. Raw error was: {0}' .format(response.text) ) - self.logger.error(error_message) + self.logger.exception(error_message) raise ftrack_api_old.exception.ServerError(error_message) else: @@ -1562,7 +1655,7 @@ class Session(object): error_message = 'Server reported error: {0}({1})'.format( result['exception'], result['content'] ) - self.logger.error(error_message) + self.logger.exception(error_message) raise ftrack_api_old.exception.ServerError(error_message) return result @@ -1620,12 +1713,12 @@ class Session(object): if "entity_data" in data: for key, value in data["entity_data"].items(): if isinstance(value, ftrack_api_old.entity.base.Entity): - data["entity_data"][key] = self._entity_reference(value) + data["entity_data"][key] = self.entity_reference(value) return data if isinstance(item, ftrack_api_old.entity.base.Entity): - data = self._entity_reference(item) + data = self.entity_reference(item) with self.auto_populating(True): @@ -1646,14 +1739,15 @@ class Session(object): value = attribute.get_local_value(item) elif entity_attribute_strategy == 'persisted_only': - value = attribute.get_remote_value(item) + if not attribute.computed: + value = attribute.get_remote_value(item) if value is not ftrack_api_old.symbol.NOT_SET: if isinstance( attribute, ftrack_api_old.attribute.ReferenceAttribute ): if isinstance(value, ftrack_api_old.entity.base.Entity): - value = self._entity_reference(value) + value = self.entity_reference(value) data[attribute.name] = value @@ -1668,14 +1762,14 @@ class Session(object): if isinstance(item, ftrack_api_old.collection.Collection): data = [] for entity in item: - data.append(self._entity_reference(entity)) + data.append(self.entity_reference(entity)) return data raise TypeError('{0!r} is not JSON serializable'.format(item)) - def _entity_reference(self, entity): - '''Return reference to *entity*. + def entity_reference(self, entity): + '''Return entity reference that uniquely identifies *entity*. Return a mapping containing the __entity_type__ of the entity along with the key, value pairs that make up it's primary key. @@ -1689,6 +1783,26 @@ class Session(object): return reference + @ftrack_api_old.logging.deprecation_warning( + 'Session._entity_reference is now available as public method ' + 'Session.entity_reference. The private method will be removed ' + 'in version 2.0.' + ) + def _entity_reference(self, entity): + '''Return entity reference that uniquely identifies *entity*. + + Return a mapping containing the __entity_type__ of the entity along + with the key, value pairs that make up it's primary key. + + .. note:: + + This private method is now available as public method + :meth:`entity_reference`. This alias remains for backwards + compatibility, but will be removed in version 2.0. + + ''' + return self.entity_reference(entity) + def decode(self, string): '''Return decoded JSON *string* as Python object.''' with self.operation_recording(False): @@ -2016,6 +2130,10 @@ class Session(object): return availabilities + @ftrack_api_old.logging.deprecation_warning( + 'Session.delayed_job has been deprecated in favour of session.call. ' + 'Please refer to the release notes for more information.' + ) def delayed_job(self, job_type): '''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned. @@ -2033,7 +2151,7 @@ class Session(object): } try: - result = self._call( + result = self.call( [operation] )[0] @@ -2070,7 +2188,7 @@ class Session(object): ) try: - result = self._call([operation]) + result = self.call([operation]) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2172,7 +2290,7 @@ class Session(object): } try: - result = self._call([operation]) + result = self.call([operation]) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2212,7 +2330,7 @@ class Session(object): } try: - result = self._call([operation]) + result = self.call([operation]) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2258,7 +2376,7 @@ class Session(object): ) try: - self._call(operations) + self.call(operations) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2306,7 +2424,7 @@ class Session(object): ) try: - self._call(operations) + self.call(operations) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. if 'Invalid action u\'send_review_session_invite\'' in error.message: diff --git a/pype/vendor/ftrack_api_old/symbol.py b/pype/vendor/ftrack_api_old/symbol.py index 10b3f55bd5..f46760f634 100644 --- a/pype/vendor/ftrack_api_old/symbol.py +++ b/pype/vendor/ftrack_api_old/symbol.py @@ -1,6 +1,8 @@ # :coding: utf-8 # :copyright: Copyright (c) 2014 ftrack +import os + class Symbol(object): '''A constant symbol.''' @@ -68,8 +70,8 @@ CONNECT_LOCATION_ID = '07b82a97-8cf9-11e3-9383-20c9d081909b' #: Identifier of builtin server location. SERVER_LOCATION_ID = '3a372bde-05bc-11e4-8908-20c9d081909b' -#: Chunk size used when working with data. -CHUNK_SIZE = 8192 +#: Chunk size used when working with data, default to 1Mb. +CHUNK_SIZE = int(os.getenv('FTRACK_API_FILE_CHUNK_SIZE', 0)) or 1024*1024 #: Symbol representing syncing users with ldap JOB_SYNC_USERS_LDAP = Symbol('SYNC_USERS_LDAP') diff --git a/res/app_icons/blender.png b/res/app_icons/blender.png new file mode 100644 index 0000000000..6070a51fae Binary files /dev/null and b/res/app_icons/blender.png differ diff --git a/res/ftrack/action_icons/Delivery.svg b/res/ftrack/action_icons/Delivery.svg new file mode 100644 index 0000000000..3380487c31 --- /dev/null +++ b/res/ftrack/action_icons/Delivery.svg @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/schema/application-1.0.json b/schema/application-1.0.json new file mode 100644 index 0000000000..e2418037c6 --- /dev/null +++ b/schema/application-1.0.json @@ -0,0 +1,68 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:application-1.0", + "description": "An application definition.", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "label", + "application_dir", + "executable" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "label": { + "description": "Nice name of application.", + "type": "string" + }, + "application_dir": { + "description": "Name of directory used for application resources.", + "type": "string" + }, + "executable": { + "description": "Name of callable executable, this is called to launch the application", + "type": "string" + }, + "description": { + "description": "Description of application.", + "type": "string" + }, + "environment": { + "description": "Key/value pairs for environment variables related to this application. Supports lists for paths, such as PYTHONPATH.", + "type": "object", + "items": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + } + }, + "default_dirs": { + "type": "array", + "items": { + "type": "string" + } + }, + "copy": { + "type": "object", + "patternProperties": { + "^.*$": { + "anyOf": [ + {"type": "string"}, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + } +} diff --git a/schema/asset-1.0.json b/schema/asset-1.0.json new file mode 100644 index 0000000000..6f3665c628 --- /dev/null +++ b/schema/asset-1.0.json @@ -0,0 +1,35 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:asset-1.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "name", + "subsets" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "name": { + "description": "Name of directory", + "type": "string" + }, + "subsets": { + "type": "array", + "items": { + "$ref": "subset.json" + } + } + }, + + "definitions": {} +} \ No newline at end of file diff --git a/schema/asset-2.0.json b/schema/asset-2.0.json new file mode 100644 index 0000000000..066cb33498 --- /dev/null +++ b/schema/asset-2.0.json @@ -0,0 +1,55 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:asset-2.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "silo", + "data" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:asset-2.0"], + "example": "avalon-core:asset-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["asset"], + "example": "asset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of asset", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "Bruce" + }, + "silo": { + "description": "Group or container of asset", + "type": "string", + "example": "assets" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": {"key": "value"} + } + }, + + "definitions": {} +} diff --git a/schema/asset-3.0.json b/schema/asset-3.0.json new file mode 100644 index 0000000000..a3a22e917b --- /dev/null +++ b/schema/asset-3.0.json @@ -0,0 +1,55 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:asset-3.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:asset-3.0", "pype:asset-3.0"], + "example": "avalon-core:asset-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["asset"], + "example": "asset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of asset", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "Bruce" + }, + "silo": { + "description": "Group or container of asset", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "assets" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": {"key": "value"} + } + }, + + "definitions": {} +} diff --git a/schema/config-1.0.json b/schema/config-1.0.json new file mode 100644 index 0000000000..b3c4362f41 --- /dev/null +++ b/schema/config-1.0.json @@ -0,0 +1,86 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:config-1.0", + "description": "A project configuration.", + + "type": "object", + + "additionalProperties": false, + "required": [ + "template", + "tasks", + "apps" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "template": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^.*$": { + "type": "string" + } + } + }, + "tasks": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "group": {"type": "string"}, + "label": {"type": "string"} + }, + "required": ["name"] + } + }, + "apps": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "group": {"type": "string"}, + "label": {"type": "string"} + }, + "required": ["name"] + } + }, + "families": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "label": {"type": "string"}, + "hideFilter": {"type": "boolean"} + }, + "required": ["name"] + } + }, + "groups": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "color": {"type": "string"}, + "order": {"type": ["integer", "number"]} + }, + "required": ["name"] + } + }, + "copy": { + "type": "object" + } + } +} diff --git a/schema/container-1.0.json b/schema/container-1.0.json new file mode 100644 index 0000000000..d9e4e39f7f --- /dev/null +++ b/schema/container-1.0.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:container-1.0", + "description": "A loaded asset", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "id", + "objectName", + "name", + "author", + "loader", + "families", + "time", + "subset", + "asset", + "representation", + "version", + "silo", + "path", + "source" + ], + "properties": { + "id": { + "description": "Identifier for finding object in host", + "type": "string", + "enum": ["pyblish.mindbender.container"], + "example": "pyblish.mindbender.container" + }, + "objectName": { + "description": "Name of internal object, such as the objectSet in Maya.", + "type": "string", + "example": "Bruce_:rigDefault_CON" + }, + "name": { + "description": "Full name of application object", + "type": "string", + "example": "modelDefault" + }, + "author": { + "description": "Name of the author of the published version", + "type": "string", + "example": "Marcus Ottosson" + }, + "loader": { + "description": "Name of loader plug-in used to produce this container", + "type": "string", + "example": "ModelLoader" + }, + "families": { + "description": "Families associated with the this subset", + "type": "string", + "example": "mindbender.model" + }, + "time": { + "description": "File-system safe, formatted time", + "type": "string", + "example": "20170329T131545Z" + }, + "subset": { + "description": "Name of source subset", + "type": "string", + "example": "modelDefault" + }, + "asset": { + "description": "Name of source asset", + "type": "string" , + "example": "Bruce" + }, + "representation": { + "description": "Name of source representation", + "type": "string" , + "example": ".ma" + }, + "version": { + "description": "Version number", + "type": "number", + "example": 12 + }, + "silo": { + "description": "Silo of parent asset", + "type": "string", + "example": "assets" + }, + "path": { + "description": "Absolute path on disk", + "type": "string", + "example": "{root}/assets/Bruce/publish/rigDefault/v002" + }, + "source": { + "description": "Absolute path to file from which this version was published", + "type": "string", + "example": "{root}/assets/Bruce/work/rigging/maya/scenes/rig_v001.ma" + } + } +} diff --git a/schema/container-2.0.json b/schema/container-2.0.json new file mode 100644 index 0000000000..7b84209ea0 --- /dev/null +++ b/schema/container-2.0.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:container-2.0", + "description": "A loaded asset", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "id", + "objectName", + "name", + "namespace", + "loader", + "representation" + ], + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:container-2.0", "pype:container-2.0"], + "example": "pype:container-2.0" + }, + "id": { + "description": "Identifier for finding object in host", + "type": "string", + "enum": ["pyblish.avalon.container"], + "example": "pyblish.avalon.container" + }, + "objectName": { + "description": "Name of internal object, such as the objectSet in Maya.", + "type": "string", + "example": "Bruce_:rigDefault_CON" + }, + "loader": { + "description": "Name of loader plug-in used to produce this container", + "type": "string", + "example": "ModelLoader" + }, + "name": { + "description": "Internal object name of container in application", + "type": "string", + "example": "modelDefault_01" + }, + "namespace": { + "description": "Internal namespace of container in application", + "type": "string", + "example": "Bruce_" + }, + "representation": { + "description": "Unique id of representation in database", + "type": "string", + "example": "59523f355f8c1b5f6c5e8348" + } + } +} \ No newline at end of file diff --git a/schema/inventory-1.0.json b/schema/inventory-1.0.json new file mode 100644 index 0000000000..888ba7945a --- /dev/null +++ b/schema/inventory-1.0.json @@ -0,0 +1,10 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:config-1.0", + "description": "A project configuration.", + + "type": "object", + + "additionalProperties": true +} diff --git a/schema/project-2.0.json b/schema/project-2.0.json new file mode 100644 index 0000000000..ad0e460f4d --- /dev/null +++ b/schema/project-2.0.json @@ -0,0 +1,86 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:project-2.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "data", + "config" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:project-2.0", "pype:project-2.0"], + "example": "avalon-core:project-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["project"], + "example": "project" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "hulk" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": { + "fps": 24, + "width": 1920, + "height": 1080 + } + }, + "config": { + "type": "object", + "description": "Document metadata", + "example": { + "schema": "pype:config-1.0", + "apps": [ + { + "name": "maya2016", + "label": "Autodesk Maya 2016" + }, + { + "name": "nuke10", + "label": "The Foundry Nuke 10.0" + } + ], + "tasks": [ + {"name": "model"}, + {"name": "render"}, + {"name": "animate"}, + {"name": "rig"}, + {"name": "lookdev"}, + {"name": "layout"} + ], + "template": { + "work": + "{root}/{project}/{silo}/{asset}/work/{task}/{app}", + "publish": + "{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/{subset}.{representation}" + } + }, + "$ref": "config-1.0.json" + } + }, + + "definitions": {} +} diff --git a/schema/representation-1.0.json b/schema/representation-1.0.json new file mode 100644 index 0000000000..10ae72928e --- /dev/null +++ b/schema/representation-1.0.json @@ -0,0 +1,28 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:representation-1.0", + "description": "The inverse of an instance", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "format", + "path" + ], + + "properties": { + "schema": {"type": "string"}, + "format": { + "description": "File extension, including '.'", + "type": "string" + }, + "path": { + "description": "Unformatted path to version.", + "type": "string" + } + } +} diff --git a/schema/representation-2.0.json b/schema/representation-2.0.json new file mode 100644 index 0000000000..e12dea8564 --- /dev/null +++ b/schema/representation-2.0.json @@ -0,0 +1,78 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:representation-2.0", + "description": "The inverse of an instance", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:representation-2.0", "pype:representation-2.0"], + "example": "pype:representation-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["representation"], + "example": "representation" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of representation", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "abc" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": { + "label": "Alembic" + } + }, + "dependencies": { + "description": "Other representation that this representation depends on", + "type": "array", + "items": {"type": "string"}, + "example": [ + "592d547a5f8c1b388093c145" + ] + }, + "context": { + "description": "Summary of the context to which this representation belong.", + "type": "object", + "properties": { + "project": {"type": "object"}, + "asset": {"type": "string"}, + "silo": {"type": ["string", "null"]}, + "subset": {"type": "string"}, + "version": {"type": "number"}, + "representation": {"type": "string"} + }, + "example": { + "project": "hulk", + "asset": "Bruce", + "silo": "assets", + "subset": "rigDefault", + "version": 12, + "representation": "ma" + } + } + } +} diff --git a/schema/session-1.0.json b/schema/session-1.0.json new file mode 100644 index 0000000000..2b201f9c61 --- /dev/null +++ b/schema/session-1.0.json @@ -0,0 +1,143 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:session-1.0", + "description": "The Avalon environment", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "AVALON_PROJECTS", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_SILO", + "AVALON_CONFIG" + ], + + "properties": { + "AVALON_PROJECTS": { + "description": "Absolute path to root of project directories", + "type": "string", + "example": "/nas/projects" + }, + "AVALON_PROJECT": { + "description": "Name of project", + "type": "string", + "pattern": "^\\w*$", + "example": "Hulk" + }, + "AVALON_ASSET": { + "description": "Name of asset", + "type": "string", + "pattern": "^\\w*$", + "example": "Bruce" + }, + "AVALON_SILO": { + "description": "Name of asset group or container", + "type": "string", + "pattern": "^\\w*$", + "example": "assets" + }, + "AVALON_TASK": { + "description": "Name of task", + "type": "string", + "pattern": "^\\w*$", + "example": "modeling" + }, + "AVALON_CONFIG": { + "description": "Name of Avalon configuration", + "type": "string", + "pattern": "^\\w*$", + "example": "polly" + }, + "AVALON_APP": { + "description": "Name of application", + "type": "string", + "pattern": "^\\w*$", + "example": "maya2016" + }, + "AVALON_MONGO": { + "description": "Address to the asset database", + "type": "string", + "pattern": "^mongodb://[\\w/@:.]*$", + "example": "mongodb://localhost:27017", + "default": "mongodb://localhost:27017" + }, + "AVALON_DB": { + "description": "Name of database", + "type": "string", + "pattern": "^\\w*$", + "example": "avalon", + "default": "avalon" + }, + "AVALON_LABEL": { + "description": "Nice name of Avalon, used in e.g. graphical user interfaces", + "type": "string", + "example": "Mindbender", + "default": "Avalon" + }, + "AVALON_SENTRY": { + "description": "Address to Sentry", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "https://5b872b280de742919b115bdc8da076a5:8d278266fe764361b8fa6024af004a9c@logs.mindbender.com/2", + "default": null + }, + "AVALON_DEADLINE": { + "description": "Address to Deadline", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "http://192.168.99.101", + "default": null + }, + "AVALON_TIMEOUT": { + "description": "Wherever there is a need for a timeout, this is the default value.", + "type": "string", + "pattern": "^[0-9]*$", + "default": "1000", + "example": "1000" + }, + "AVALON_UPLOAD": { + "description": "Boolean of whether to upload published material to central asset repository", + "type": "string", + "default": null, + "example": "True" + }, + "AVALON_USERNAME": { + "description": "Generic username", + "type": "string", + "pattern": "^\\w*$", + "default": "avalon", + "example": "myself" + }, + "AVALON_PASSWORD": { + "description": "Generic password", + "type": "string", + "pattern": "^\\w*$", + "default": "secret", + "example": "abc123" + }, + "AVALON_INSTANCE_ID": { + "description": "Unique identifier for instances in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.instance", + "example": "avalon.instance" + }, + "AVALON_CONTAINER_ID": { + "description": "Unique identifier for a loaded representation in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.container", + "example": "avalon.container" + }, + "AVALON_DEBUG": { + "description": "Enable debugging mode. Some applications may use this for e.g. extended verbosity or mock plug-ins.", + "type": "string", + "default": null, + "example": "True" + } + } +} \ No newline at end of file diff --git a/schema/session-2.0.json b/schema/session-2.0.json new file mode 100644 index 0000000000..006a9e2dbf --- /dev/null +++ b/schema/session-2.0.json @@ -0,0 +1,142 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:session-2.0", + "description": "The Avalon environment", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "AVALON_PROJECTS", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_CONFIG" + ], + + "properties": { + "AVALON_PROJECTS": { + "description": "Absolute path to root of project directories", + "type": "string", + "example": "/nas/projects" + }, + "AVALON_PROJECT": { + "description": "Name of project", + "type": "string", + "pattern": "^\\w*$", + "example": "Hulk" + }, + "AVALON_ASSET": { + "description": "Name of asset", + "type": "string", + "pattern": "^\\w*$", + "example": "Bruce" + }, + "AVALON_SILO": { + "description": "Name of asset group or container", + "type": "string", + "pattern": "^\\w*$", + "example": "assets" + }, + "AVALON_TASK": { + "description": "Name of task", + "type": "string", + "pattern": "^\\w*$", + "example": "modeling" + }, + "AVALON_CONFIG": { + "description": "Name of Avalon configuration", + "type": "string", + "pattern": "^\\w*$", + "example": "polly" + }, + "AVALON_APP": { + "description": "Name of application", + "type": "string", + "pattern": "^\\w*$", + "example": "maya2016" + }, + "AVALON_MONGO": { + "description": "Address to the asset database", + "type": "string", + "pattern": "^mongodb://[\\w/@:.]*$", + "example": "mongodb://localhost:27017", + "default": "mongodb://localhost:27017" + }, + "AVALON_DB": { + "description": "Name of database", + "type": "string", + "pattern": "^\\w*$", + "example": "avalon", + "default": "avalon" + }, + "AVALON_LABEL": { + "description": "Nice name of Avalon, used in e.g. graphical user interfaces", + "type": "string", + "example": "Mindbender", + "default": "Avalon" + }, + "AVALON_SENTRY": { + "description": "Address to Sentry", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "https://5b872b280de742919b115bdc8da076a5:8d278266fe764361b8fa6024af004a9c@logs.mindbender.com/2", + "default": null + }, + "AVALON_DEADLINE": { + "description": "Address to Deadline", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "http://192.168.99.101", + "default": null + }, + "AVALON_TIMEOUT": { + "description": "Wherever there is a need for a timeout, this is the default value.", + "type": "string", + "pattern": "^[0-9]*$", + "default": "1000", + "example": "1000" + }, + "AVALON_UPLOAD": { + "description": "Boolean of whether to upload published material to central asset repository", + "type": "string", + "default": null, + "example": "True" + }, + "AVALON_USERNAME": { + "description": "Generic username", + "type": "string", + "pattern": "^\\w*$", + "default": "avalon", + "example": "myself" + }, + "AVALON_PASSWORD": { + "description": "Generic password", + "type": "string", + "pattern": "^\\w*$", + "default": "secret", + "example": "abc123" + }, + "AVALON_INSTANCE_ID": { + "description": "Unique identifier for instances in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.instance", + "example": "avalon.instance" + }, + "AVALON_CONTAINER_ID": { + "description": "Unique identifier for a loaded representation in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.container", + "example": "avalon.container" + }, + "AVALON_DEBUG": { + "description": "Enable debugging mode. Some applications may use this for e.g. extended verbosity or mock plug-ins.", + "type": "string", + "default": null, + "example": "True" + } + } +} diff --git a/schema/shaders-1.0.json b/schema/shaders-1.0.json new file mode 100644 index 0000000000..e66cc735e8 --- /dev/null +++ b/schema/shaders-1.0.json @@ -0,0 +1,32 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:shaders-1.0", + "description": "Relationships between shaders and Avalon IDs", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "shader" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "shader": { + "description": "Name of directory", + "type": "array", + "items": { + "type": "str", + "description": "Avalon ID and optional face indexes, e.g. 'f9520572-ac1d-11e6-b39e-3085a99791c9.f[5002:5185]'" + } + } + }, + + "definitions": {} +} diff --git a/schema/subset-1.0.json b/schema/subset-1.0.json new file mode 100644 index 0000000000..90ae0349fa --- /dev/null +++ b/schema/subset-1.0.json @@ -0,0 +1,35 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:subset-1.0", + "description": "A container of instances", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "name", + "versions" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "name": { + "description": "Name of directory", + "type": "string" + }, + "versions": { + "type": "array", + "items": { + "$ref": "version.json" + } + } + }, + + "definitions": {} +} \ No newline at end of file diff --git a/schema/subset-2.0.json b/schema/subset-2.0.json new file mode 100644 index 0000000000..98f39c4f3e --- /dev/null +++ b/schema/subset-2.0.json @@ -0,0 +1,51 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:subset-2.0", + "description": "A container of instances", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["pype:subset-2.0"], + "example": "pype:subset-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["subset"], + "example": "subset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "shot01" + }, + "data": { + "type": "object", + "description": "Document metadata", + "example": { + "frameStart": 1000, + "frameEnd": 1201 + } + } + } +} diff --git a/schema/subset-3.0.json b/schema/subset-3.0.json new file mode 100644 index 0000000000..a0af9d340f --- /dev/null +++ b/schema/subset-3.0.json @@ -0,0 +1,62 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:subset-3.0", + "description": "A container of instances", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["avalon-core:subset-3.0", "pype:subset-3.0"], + "example": "pype:subset-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["subset"], + "example": "subset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "shot01" + }, + "data": { + "description": "Document metadata", + "type": "object", + "required": ["families"], + "properties": { + "families": { + "type": "array", + "items": {"type": "string"}, + "description": "One or more families associated with this subset" + } + }, + "example": { + "families" : [ + "avalon.camera" + ], + "frameStart": 1000, + "frameEnd": 1201 + } + } + } +} diff --git a/schema/thumbnail-1.0.json b/schema/thumbnail-1.0.json new file mode 100644 index 0000000000..96b540ab7e --- /dev/null +++ b/schema/thumbnail-1.0.json @@ -0,0 +1,42 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:thumbnail-1.0", + "description": "Entity with thumbnail data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["pype:thumbnail-1.0"], + "example": "pype:thumbnail-1.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["thumbnail"], + "example": "thumbnail" + }, + "data": { + "description": "Thumbnail data", + "type": "object", + "example": { + "binary_data": "Binary({byte data of image})", + "template": "{thumbnail_root}/{project[name]}/{_id}{ext}}", + "template_data": { + "ext": ".jpg" + } + } + } + } +} diff --git a/schema/version-1.0.json b/schema/version-1.0.json new file mode 100644 index 0000000000..c784a25175 --- /dev/null +++ b/schema/version-1.0.json @@ -0,0 +1,50 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:version-1.0", + "description": "An individual version", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "version", + "path", + "time", + "author", + "source", + "representations" + ], + + "properties": { + "schema": {"type": "string"}, + "representations": { + "type": "array", + "items": { + "$ref": "representation.json" + } + }, + "time": { + "description": "ISO formatted, file-system compatible time", + "type": "string" + }, + "author": { + "description": "User logged on to the machine at time of publish", + "type": "string" + }, + "version": { + "description": "Number of this version", + "type": "number" + }, + "path": { + "description": "Unformatted path, e.g. '{root}/assets/Bruce/publish/lookdevDefault/v001", + "type": "string" + }, + "source": { + "description": "Original file from which this version was made.", + "type": "string" + } + } +} diff --git a/schema/version-2.0.json b/schema/version-2.0.json new file mode 100644 index 0000000000..5bb4a56f96 --- /dev/null +++ b/schema/version-2.0.json @@ -0,0 +1,92 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:version-2.0", + "description": "An individual version", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["pype:version-2.0"], + "example": "pype:version-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["version"], + "example": "version" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Number of version", + "type": "number", + "example": 12 + }, + "locations": { + "description": "Where on the planet this version can be found.", + "type": "array", + "items": {"type": "string"}, + "example": ["data.avalon.com"] + }, + "data": { + "description": "Document metadata", + "type": "object", + "required": ["families", "author", "source", "time"], + "properties": { + "time": { + "description": "ISO formatted, file-system compatible time", + "type": "string" + }, + "timeFormat": { + "description": "ISO format of time", + "type": "string" + }, + "author": { + "description": "User logged on to the machine at time of publish", + "type": "string" + }, + "version": { + "description": "Number of this version", + "type": "number" + }, + "path": { + "description": "Unformatted path, e.g. '{root}/assets/Bruce/publish/lookdevDefault/v001", + "type": "string" + }, + "source": { + "description": "Original file from which this version was made.", + "type": "string" + }, + "families": { + "type": "array", + "items": {"type": "string"}, + "description": "One or more families associated with this version" + } + }, + "example": { + "source" : "{root}/f02_prod/assets/BubbleWitch/work/modeling/marcus/maya/scenes/model_v001.ma", + "author" : "marcus", + "families" : [ + "avalon.model" + ], + "time" : "20170510T090203Z" + } + } + } +} diff --git a/schema/version-3.0.json b/schema/version-3.0.json new file mode 100644 index 0000000000..808650da0d --- /dev/null +++ b/schema/version-3.0.json @@ -0,0 +1,84 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:version-3.0", + "description": "An individual version", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["avalon-core:version-3.0", "pype:version-3.0"], + "example": "pype:version-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["version"], + "example": "version" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Number of version", + "type": "number", + "example": 12 + }, + "locations": { + "description": "Where on the planet this version can be found.", + "type": "array", + "items": {"type": "string"}, + "example": ["data.avalon.com"] + }, + "data": { + "description": "Document metadata", + "type": "object", + "required": ["author", "source", "time"], + "properties": { + "time": { + "description": "ISO formatted, file-system compatible time", + "type": "string" + }, + "timeFormat": { + "description": "ISO format of time", + "type": "string" + }, + "author": { + "description": "User logged on to the machine at time of publish", + "type": "string" + }, + "version": { + "description": "Number of this version", + "type": "number" + }, + "path": { + "description": "Unformatted path, e.g. '{root}/assets/Bruce/publish/lookdevDefault/v001", + "type": "string" + }, + "source": { + "description": "Original file from which this version was made.", + "type": "string" + } + }, + "example": { + "source" : "{root}/f02_prod/assets/BubbleWitch/work/modeling/marcus/maya/scenes/model_v001.ma", + "author" : "marcus", + "time" : "20170510T090203Z" + } + } + } +} diff --git a/setup/blender/init.py b/setup/blender/init.py new file mode 100644 index 0000000000..05c15eaeb2 --- /dev/null +++ b/setup/blender/init.py @@ -0,0 +1,3 @@ +from pype import blender + +blender.install() diff --git a/setup/nuke/nuke_path/KnobScripter/__init__.py b/setup/nuke/nuke_path/KnobScripter/__init__.py new file mode 100644 index 0000000000..8fe91d63f5 --- /dev/null +++ b/setup/nuke/nuke_path/KnobScripter/__init__.py @@ -0,0 +1 @@ +import knob_scripter \ No newline at end of file diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_clearConsole.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_clearConsole.png new file mode 100644 index 0000000000..75ac04ef84 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_clearConsole.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_download.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_download.png new file mode 100644 index 0000000000..1e3e9b7631 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_download.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_exitnode.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_exitnode.png new file mode 100644 index 0000000000..7714cd2b92 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_exitnode.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_pick.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_pick.png new file mode 100644 index 0000000000..2395537550 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_pick.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_prefs.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_prefs.png new file mode 100644 index 0000000000..efef5ffc92 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_prefs.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_prefs2.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_prefs2.png new file mode 100644 index 0000000000..5c3c941d59 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_prefs2.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_refresh.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_refresh.png new file mode 100644 index 0000000000..559bfd74ab Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_refresh.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_run.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_run.png new file mode 100644 index 0000000000..6b2e4ddc23 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_run.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_save.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_save.png new file mode 100644 index 0000000000..e29c667f34 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_save.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_search.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_search.png new file mode 100644 index 0000000000..d4ed2e1a2b Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_search.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_snippets.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_snippets.png new file mode 100644 index 0000000000..479c44f19e Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_snippets.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/knob_scripter.py b/setup/nuke/nuke_path/KnobScripter/knob_scripter.py new file mode 100644 index 0000000000..f03067aa4b --- /dev/null +++ b/setup/nuke/nuke_path/KnobScripter/knob_scripter.py @@ -0,0 +1,4196 @@ +# ------------------------------------------------- +# KnobScripter by Adrian Pueyo +# Complete python sript editor for Nuke +# adrianpueyo.com, 2016-2019 +import string +import traceback +from webbrowser import open as openUrl +from threading import Event, Thread +import platform +import subprocess +from functools import partial +import re +import sys +from nukescripts import panels +import json +import os +import nuke +version = "2.3 wip" +date = "Aug 12 2019" +# ------------------------------------------------- + + +# Symlinks on windows... +if os.name == "nt": + def symlink_ms(source, link_name): + import ctypes + csl = ctypes.windll.kernel32.CreateSymbolicLinkW + csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32) + csl.restype = ctypes.c_ubyte + flags = 1 if os.path.isdir(source) else 0 + try: + if csl(link_name, source.replace('/', '\\'), flags) == 0: + raise ctypes.WinError() + except: + pass + os.symlink = symlink_ms + +try: + if nuke.NUKE_VERSION_MAJOR < 11: + from PySide import QtCore, QtGui, QtGui as QtWidgets + from PySide.QtCore import Qt + else: + from PySide2 import QtWidgets, QtGui, QtCore + from PySide2.QtCore import Qt +except ImportError: + from Qt import QtCore, QtGui, QtWidgets + +KS_DIR = os.path.dirname(__file__) +icons_path = KS_DIR + "/icons/" +DebugMode = False +AllKnobScripters = [] # All open instances at a given time + +PrefsPanel = "" +SnippetEditPanel = "" + +nuke.tprint('KnobScripter v{}, built {}.\nCopyright (c) 2016-2019 Adrian Pueyo. All Rights Reserved.'.format(version, date)) + + +class KnobScripter(QtWidgets.QWidget): + + def __init__(self, node="", knob="knobChanged"): + super(KnobScripter, self).__init__() + + # Autosave the other knobscripters and add this one + for ks in AllKnobScripters: + try: + ks.autosave() + except: + pass + if self not in AllKnobScripters: + AllKnobScripters.append(self) + + self.nodeMode = (node != "") + if node == "": + self.node = nuke.toNode("root") + else: + self.node = node + + self.isPane = False + self.knob = knob + # For the option to also display the knob labels on the knob dropdown + self.show_labels = False + self.unsavedKnobs = {} + self.modifiedKnobs = set() + self.scrollPos = {} + self.cursorPos = {} + self.fontSize = 10 + self.font = "Monospace" + self.tabSpaces = 4 + self.windowDefaultSize = [500, 300] + self.color_scheme = "sublime" # Can be nuke or sublime + self.pinned = 1 + self.toLoadKnob = True + self.frw_open = False # Find replace widget closed by default + self.icon_size = 17 + self.btn_size = 24 + self.qt_icon_size = QtCore.QSize(self.icon_size, self.icon_size) + self.qt_btn_size = QtCore.QSize(self.btn_size, self.btn_size) + self.origConsoleText = "" + self.nukeSE = self.findSE() + self.nukeSEOutput = self.findSEOutput(self.nukeSE) + self.nukeSEInput = self.findSEInput(self.nukeSE) + self.nukeSERunBtn = self.findSERunBtn(self.nukeSE) + + self.scripts_dir = os.path.expandvars( + os.path.expanduser("~/.nuke/KnobScripter_Scripts")) + self.current_folder = "scripts" + self.folder_index = 0 + self.current_script = "Untitled.py" + self.current_script_modified = False + self.script_index = 0 + self.toAutosave = False + + # Load prefs + self.prefs_txt = os.path.expandvars( + os.path.expanduser("~/.nuke/KnobScripter_Prefs.txt")) + self.loadedPrefs = self.loadPrefs() + if self.loadedPrefs != []: + try: + if "font_size" in self.loadedPrefs: + self.fontSize = self.loadedPrefs['font_size'] + self.windowDefaultSize = [ + self.loadedPrefs['window_default_w'], self.loadedPrefs['window_default_h']] + self.tabSpaces = self.loadedPrefs['tab_spaces'] + self.pinned = self.loadedPrefs['pin_default'] + if "font" in self.loadedPrefs: + self.font = self.loadedPrefs['font'] + if "color_scheme" in self.loadedPrefs: + self.color_scheme = self.loadedPrefs['color_scheme'] + if "show_labels" in self.loadedPrefs: + self.show_labels = self.loadedPrefs['show_labels'] + except TypeError: + log("KnobScripter: Failed to load preferences.") + + # Load snippets + self.snippets_txt_path = os.path.expandvars( + os.path.expanduser("~/.nuke/KnobScripter_Snippets.txt")) + self.snippets = self.loadSnippets(maxDepth=5) + + # Current state of script (loaded when exiting node mode) + self.state_txt_path = os.path.expandvars( + os.path.expanduser("~/.nuke/KnobScripter_State.txt")) + + # Init UI + self.initUI() + + # Talk to Nuke's Script Editor + self.setSEOutputEvent() # Make the output windowS listen! + self.clearConsole() + + def initUI(self): + ''' Initializes the tool UI''' + # ------------------- + # 1. MAIN WINDOW + # ------------------- + self.resize(self.windowDefaultSize[0], self.windowDefaultSize[1]) + self.setWindowTitle("KnobScripter - %s %s" % + (self.node.fullName(), self.knob)) + self.setObjectName("com.adrianpueyo.knobscripter") + self.move(QtGui.QCursor().pos() - QtCore.QPoint(32, 74)) + + # --------------------- + # 2. TOP BAR + # --------------------- + # --- + # 2.1. Left buttons + self.change_btn = QtWidgets.QToolButton() + # self.exit_node_btn.setIcon(QtGui.QIcon(KS_DIR+"/KnobScripter/icons/icons8-delete-26.png")) + self.change_btn.setIcon(QtGui.QIcon(icons_path + "icon_pick.png")) + self.change_btn.setIconSize(self.qt_icon_size) + self.change_btn.setFixedSize(self.qt_btn_size) + self.change_btn.setToolTip( + "Change to node if selected. Otherwise, change to Script Mode.") + self.change_btn.clicked.connect(self.changeClicked) + + # --- + # 2.2.A. Node mode UI + self.exit_node_btn = QtWidgets.QToolButton() + self.exit_node_btn.setIcon(QtGui.QIcon( + icons_path + "icon_exitnode.png")) + self.exit_node_btn.setIconSize(self.qt_icon_size) + self.exit_node_btn.setFixedSize(self.qt_btn_size) + self.exit_node_btn.setToolTip( + "Exit the node, and change to Script Mode.") + self.exit_node_btn.clicked.connect(self.exitNodeMode) + self.current_node_label_node = QtWidgets.QLabel(" Node:") + self.current_node_label_name = QtWidgets.QLabel(self.node.fullName()) + self.current_node_label_name.setStyleSheet("font-weight:bold;") + self.current_knob_label = QtWidgets.QLabel("Knob: ") + self.current_knob_dropdown = QtWidgets.QComboBox() + self.current_knob_dropdown.setSizeAdjustPolicy( + QtWidgets.QComboBox.AdjustToContents) + self.updateKnobDropdown() + self.current_knob_dropdown.currentIndexChanged.connect( + lambda: self.loadKnobValue(False, updateDict=True)) + + # Layout + self.node_mode_bar_layout = QtWidgets.QHBoxLayout() + self.node_mode_bar_layout.addWidget(self.exit_node_btn) + self.node_mode_bar_layout.addSpacing(2) + self.node_mode_bar_layout.addWidget(self.current_node_label_node) + self.node_mode_bar_layout.addWidget(self.current_node_label_name) + self.node_mode_bar_layout.addSpacing(2) + self.node_mode_bar_layout.addWidget(self.current_knob_dropdown) + self.node_mode_bar = QtWidgets.QWidget() + self.node_mode_bar.setLayout(self.node_mode_bar_layout) + + self.node_mode_bar_layout.setContentsMargins(0, 0, 0, 0) + + # --- + # 2.2.B. Script mode UI + self.script_label = QtWidgets.QLabel("Script: ") + + self.current_folder_dropdown = QtWidgets.QComboBox() + self.current_folder_dropdown.setSizeAdjustPolicy( + QtWidgets.QComboBox.AdjustToContents) + self.current_folder_dropdown.currentIndexChanged.connect( + self.folderDropdownChanged) + # self.current_folder_dropdown.setEditable(True) + # self.current_folder_dropdown.lineEdit().setReadOnly(True) + # self.current_folder_dropdown.lineEdit().setAlignment(Qt.AlignRight) + + self.current_script_dropdown = QtWidgets.QComboBox() + self.current_script_dropdown.setSizeAdjustPolicy( + QtWidgets.QComboBox.AdjustToContents) + self.updateFoldersDropdown() + self.updateScriptsDropdown() + self.current_script_dropdown.currentIndexChanged.connect( + self.scriptDropdownChanged) + + # Layout + self.script_mode_bar_layout = QtWidgets.QHBoxLayout() + self.script_mode_bar_layout.addWidget(self.script_label) + self.script_mode_bar_layout.addSpacing(2) + self.script_mode_bar_layout.addWidget(self.current_folder_dropdown) + self.script_mode_bar_layout.addWidget(self.current_script_dropdown) + self.script_mode_bar = QtWidgets.QWidget() + self.script_mode_bar.setLayout(self.script_mode_bar_layout) + + self.script_mode_bar_layout.setContentsMargins(0, 0, 0, 0) + + # --- + # 2.3. File-system buttons + # Refresh dropdowns + self.refresh_btn = QtWidgets.QToolButton() + self.refresh_btn.setIcon(QtGui.QIcon(icons_path + "icon_refresh.png")) + self.refresh_btn.setIconSize(QtCore.QSize(50, 50)) + self.refresh_btn.setIconSize(self.qt_icon_size) + self.refresh_btn.setFixedSize(self.qt_btn_size) + self.refresh_btn.setToolTip("Refresh the dropdowns.\nShortcut: F5") + self.refresh_btn.setShortcut('F5') + self.refresh_btn.clicked.connect(self.refreshClicked) + + # Reload script + self.reload_btn = QtWidgets.QToolButton() + self.reload_btn.setIcon(QtGui.QIcon(icons_path + "icon_download.png")) + self.reload_btn.setIconSize(QtCore.QSize(50, 50)) + self.reload_btn.setIconSize(self.qt_icon_size) + self.reload_btn.setFixedSize(self.qt_btn_size) + self.reload_btn.setToolTip( + "Reload the current script. Will overwrite any changes made to it.\nShortcut: Ctrl+R") + self.reload_btn.setShortcut('Ctrl+R') + self.reload_btn.clicked.connect(self.reloadClicked) + + # Save script + self.save_btn = QtWidgets.QToolButton() + self.save_btn.setIcon(QtGui.QIcon(icons_path + "icon_save.png")) + self.save_btn.setIconSize(QtCore.QSize(50, 50)) + self.save_btn.setIconSize(self.qt_icon_size) + self.save_btn.setFixedSize(self.qt_btn_size) + self.save_btn.setToolTip( + "Save the script into the selected knob or python file.\nShortcut: Ctrl+S") + self.save_btn.setShortcut('Ctrl+S') + self.save_btn.clicked.connect(self.saveClicked) + + # Layout + self.top_file_bar_layout = QtWidgets.QHBoxLayout() + self.top_file_bar_layout.addWidget(self.refresh_btn) + self.top_file_bar_layout.addWidget(self.reload_btn) + self.top_file_bar_layout.addWidget(self.save_btn) + + # --- + # 2.4. Right Side buttons + + # Run script + self.run_script_button = QtWidgets.QToolButton() + self.run_script_button.setIcon( + QtGui.QIcon(icons_path + "icon_run.png")) + self.run_script_button.setIconSize(self.qt_icon_size) + # self.run_script_button.setIconSize(self.qt_icon_size) + self.run_script_button.setFixedSize(self.qt_btn_size) + self.run_script_button.setToolTip( + "Execute the current selection on the KnobScripter, or the whole script if no selection.\nShortcut: Ctrl+Enter") + self.run_script_button.clicked.connect(self.runScript) + + # Clear console + self.clear_console_button = QtWidgets.QToolButton() + self.clear_console_button.setIcon( + QtGui.QIcon(icons_path + "icon_clearConsole.png")) + self.clear_console_button.setIconSize(QtCore.QSize(50, 50)) + self.clear_console_button.setIconSize(self.qt_icon_size) + self.clear_console_button.setFixedSize(self.qt_btn_size) + self.clear_console_button.setToolTip( + "Clear the text in the console window.\nShortcut: Click Backspace on the console.") + self.clear_console_button.clicked.connect(self.clearConsole) + + # FindReplace button + self.find_button = QtWidgets.QToolButton() + self.find_button.setIcon(QtGui.QIcon(icons_path + "icon_search.png")) + self.find_button.setIconSize(self.qt_icon_size) + self.find_button.setFixedSize(self.qt_btn_size) + self.find_button.setToolTip( + "Call the snippets by writing the shortcut and pressing Tab.\nShortcut: Ctrl+F") + self.find_button.setShortcut('Ctrl+F') + #self.find_button.setMaximumWidth(self.find_button.fontMetrics().boundingRect("Find").width() + 20) + self.find_button.setCheckable(True) + self.find_button.setFocusPolicy(QtCore.Qt.NoFocus) + self.find_button.clicked[bool].connect(self.toggleFRW) + if self.frw_open: + self.find_button.toggle() + + # Snippets + self.snippets_button = QtWidgets.QToolButton() + self.snippets_button.setIcon( + QtGui.QIcon(icons_path + "icon_snippets.png")) + self.snippets_button.setIconSize(QtCore.QSize(50, 50)) + self.snippets_button.setIconSize(self.qt_icon_size) + self.snippets_button.setFixedSize(self.qt_btn_size) + self.snippets_button.setToolTip( + "Call the snippets by writing the shortcut and pressing Tab.") + self.snippets_button.clicked.connect(self.openSnippets) + + # PIN + ''' + self.pin_button = QtWidgets.QPushButton("P") + self.pin_button.setCheckable(True) + if self.pinned: + self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint) + self.pin_button.toggle() + self.pin_button.setToolTip("Toggle 'Always On Top'. Keeps the KnobScripter on top of all other windows.") + self.pin_button.setFocusPolicy(QtCore.Qt.NoFocus) + self.pin_button.setFixedSize(self.qt_btn_size) + self.pin_button.clicked[bool].connect(self.pin) + ''' + + # Prefs + self.createPrefsMenu() + self.prefs_button = QtWidgets.QPushButton() + self.prefs_button.setIcon(QtGui.QIcon(icons_path + "icon_prefs.png")) + self.prefs_button.setIconSize(self.qt_icon_size) + self.prefs_button.setFixedSize( + QtCore.QSize(self.btn_size + 10, self.btn_size)) + # self.prefs_button.clicked.connect(self.openPrefs) + self.prefs_button.setMenu(self.prefsMenu) + self.prefs_button.setStyleSheet("text-align:left;padding-left:2px;") + #self.prefs_button.setMaximumWidth(self.prefs_button.fontMetrics().boundingRect("Prefs").width() + 12) + + # Layout + self.top_right_bar_layout = QtWidgets.QHBoxLayout() + self.top_right_bar_layout.addWidget(self.run_script_button) + self.top_right_bar_layout.addWidget(self.clear_console_button) + self.top_right_bar_layout.addWidget(self.find_button) + # self.top_right_bar_layout.addWidget(self.snippets_button) + # self.top_right_bar_layout.addWidget(self.pin_button) + # self.top_right_bar_layout.addSpacing(10) + self.top_right_bar_layout.addWidget(self.prefs_button) + + # --- + # Layout + self.top_layout = QtWidgets.QHBoxLayout() + self.top_layout.setContentsMargins(0, 0, 0, 0) + # self.top_layout.setSpacing(10) + self.top_layout.addWidget(self.change_btn) + self.top_layout.addWidget(self.node_mode_bar) + self.top_layout.addWidget(self.script_mode_bar) + self.node_mode_bar.setVisible(False) + # self.top_layout.addSpacing(10) + self.top_layout.addLayout(self.top_file_bar_layout) + self.top_layout.addStretch() + self.top_layout.addLayout(self.top_right_bar_layout) + + # ---------------------- + # 3. SCRIPTING SECTION + # ---------------------- + # Splitter + self.splitter = QtWidgets.QSplitter(Qt.Vertical) + + # Output widget + self.script_output = ScriptOutputWidget(parent=self) + self.script_output.setReadOnly(1) + self.script_output.setAcceptRichText(0) + self.script_output.setTabStopWidth( + self.script_output.tabStopWidth() / 4) + self.script_output.setFocusPolicy(Qt.ClickFocus) + self.script_output.setAutoFillBackground(0) + self.script_output.installEventFilter(self) + + # Script Editor + self.script_editor = KnobScripterTextEditMain(self, self.script_output) + self.script_editor.setMinimumHeight(30) + self.script_editor.setStyleSheet( + 'background:#282828;color:#EEE;') # Main Colors + self.script_editor.textChanged.connect(self.setModified) + self.highlighter = KSScriptEditorHighlighter( + self.script_editor.document(), self) + self.script_editor.cursorPositionChanged.connect(self.setTextSelection) + self.script_editor_font = QtGui.QFont() + self.script_editor_font.setFamily(self.font) + self.script_editor_font.setStyleHint(QtGui.QFont.Monospace) + self.script_editor_font.setFixedPitch(True) + self.script_editor_font.setPointSize(self.fontSize) + self.script_editor.setFont(self.script_editor_font) + self.script_editor.setTabStopWidth( + self.tabSpaces * QtGui.QFontMetrics(self.script_editor_font).width(' ')) + + # Add input and output to splitter + self.splitter.addWidget(self.script_output) + self.splitter.addWidget(self.script_editor) + self.splitter.setStretchFactor(0, 0) + + # FindReplace widget + self.frw = FindReplaceWidget(self) + self.frw.setVisible(self.frw_open) + + # --- + # Layout + self.scripting_layout = QtWidgets.QVBoxLayout() + self.scripting_layout.setContentsMargins(0, 0, 0, 0) + self.scripting_layout.setSpacing(0) + self.scripting_layout.addWidget(self.splitter) + self.scripting_layout.addWidget(self.frw) + + # --------------- + # MASTER LAYOUT + # --------------- + self.master_layout = QtWidgets.QVBoxLayout() + self.master_layout.setSpacing(5) + self.master_layout.setContentsMargins(8, 8, 8, 8) + self.master_layout.addLayout(self.top_layout) + self.master_layout.addLayout(self.scripting_layout) + # self.master_layout.addLayout(self.bottom_layout) + self.setLayout(self.master_layout) + + # ---------------- + # MAIN WINDOW UI + # ---------------- + size_policy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) + self.setSizePolicy(size_policy) + self.setMinimumWidth(160) + + if self.pinned: + self.setWindowFlags(self.windowFlags() | + QtCore.Qt.WindowStaysOnTopHint) + + # Set default values based on mode + if self.nodeMode: + self.current_knob_dropdown.blockSignals(True) + self.node_mode_bar.setVisible(True) + self.script_mode_bar.setVisible(False) + self.setCurrentKnob(self.knob) + self.loadKnobValue(check=False) + self.setKnobModified(False) + self.current_knob_dropdown.blockSignals(False) + self.splitter.setSizes([0, 1]) + else: + self.exitNodeMode() + self.script_editor.setFocus() + + # Preferences submenus + def createPrefsMenu(self): + + # Actions + self.echoAct = QtWidgets.QAction("Echo python commands", self, checkable=True, + statusTip="Toggle nuke's 'Echo all python commands to ScriptEditor'", triggered=self.toggleEcho) + if nuke.toNode("preferences").knob("echoAllCommands").value(): + self.echoAct.toggle() + self.pinAct = QtWidgets.QAction("Always on top", self, checkable=True, + statusTip="Keeps the KnobScripter window always on top or not.", triggered=self.togglePin) + if self.pinned: + self.setWindowFlags(self.windowFlags() | + QtCore.Qt.WindowStaysOnTopHint) + self.pinAct.toggle() + self.helpAct = QtWidgets.QAction( + "&Help", self, statusTip="Open the KnobScripter help in your browser.", shortcut="F1", triggered=self.showHelp) + self.nukepediaAct = QtWidgets.QAction( + "Show in Nukepedia", self, statusTip="Open the KnobScripter download page on Nukepedia.", triggered=self.showInNukepedia) + self.githubAct = QtWidgets.QAction( + "Show in GitHub", self, statusTip="Open the KnobScripter repo on GitHub.", triggered=self.showInGithub) + self.snippetsAct = QtWidgets.QAction( + "Snippets", self, statusTip="Open the Snippets editor.", triggered=self.openSnippets) + self.snippetsAct.setIcon(QtGui.QIcon(icons_path + "icon_snippets.png")) + # self.snippetsAct = QtWidgets.QAction("Keywords", self, statusTip="Add custom keywords.", triggered=self.openSnippets) #TODO THIS + self.prefsAct = QtWidgets.QAction( + "Preferences", self, statusTip="Open the Preferences panel.", triggered=self.openPrefs) + self.prefsAct.setIcon(QtGui.QIcon(icons_path + "icon_prefs.png")) + + # Menus + self.prefsMenu = QtWidgets.QMenu("Preferences") + self.prefsMenu.addAction(self.echoAct) + self.prefsMenu.addAction(self.pinAct) + self.prefsMenu.addSeparator() + self.prefsMenu.addAction(self.nukepediaAct) + self.prefsMenu.addAction(self.githubAct) + self.prefsMenu.addSeparator() + self.prefsMenu.addAction(self.helpAct) + self.prefsMenu.addSeparator() + self.prefsMenu.addAction(self.snippetsAct) + self.prefsMenu.addAction(self.prefsAct) + + def initEcho(self): + ''' Initializes the echo chechable QAction based on nuke's state ''' + echo_knob = nuke.toNode("preferences").knob("echoAllCommands") + self.echoAct.setChecked(echo_knob.value()) + + def toggleEcho(self): + ''' Toggle the "Echo python commands" from Nuke ''' + echo_knob = nuke.toNode("preferences").knob("echoAllCommands") + echo_knob.setValue(self.echoAct.isChecked()) + + def togglePin(self): + ''' Toggle "always on top" based on the submenu button ''' + self.pin(self.pinAct.isChecked()) + + def showInNukepedia(self): + openUrl("http://www.nukepedia.com/python/ui/knobscripter") + + def showInGithub(self): + openUrl("https://github.com/adrianpueyo/KnobScripter") + + def showHelp(self): + openUrl("https://vimeo.com/adrianpueyo/knobscripter2") + + # Node Mode + + def updateKnobDropdown(self): + ''' Populate knob dropdown list ''' + self.current_knob_dropdown.clear() # First remove all items + defaultKnobs = ["knobChanged", "onCreate", "onScriptLoad", "onScriptSave", "onScriptClose", "onDestroy", + "updateUI", "autolabel", "beforeRender", "beforeFrameRender", "afterFrameRender", "afterRender"] + permittedKnobClasses = ["PyScript_Knob", "PythonCustomKnob"] + counter = 0 + for i in self.node.knobs(): + if i not in defaultKnobs and self.node.knob(i).Class() in permittedKnobClasses: + if self.show_labels: + i_full = "{} ({})".format(self.node.knob(i).label(), i) + else: + i_full = i + + if i in self.unsavedKnobs.keys(): + self.current_knob_dropdown.addItem(i_full + "(*)", i) + else: + self.current_knob_dropdown.addItem(i_full, i) + + counter += 1 + if counter > 0: + self.current_knob_dropdown.insertSeparator(counter) + counter += 1 + self.current_knob_dropdown.insertSeparator(counter) + counter += 1 + for i in self.node.knobs(): + if i in defaultKnobs: + if i in self.unsavedKnobs.keys(): + self.current_knob_dropdown.addItem(i + "(*)", i) + else: + self.current_knob_dropdown.addItem(i, i) + counter += 1 + return + + def loadKnobValue(self, check=True, updateDict=False): + ''' Get the content of the knob value and populate the editor ''' + if self.toLoadKnob == False: + return + dropdown_value = self.current_knob_dropdown.itemData( + self.current_knob_dropdown.currentIndex()) # knobChanged... + try: + obtained_knobValue = str(self.node[dropdown_value].value()) + obtained_scrollValue = 0 + edited_knobValue = self.script_editor.toPlainText() + except: + error_message = QtWidgets.QMessageBox.information( + None, "", "Unable to find %s.%s" % (self.node.name(), dropdown_value)) + error_message.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + error_message.exec_() + return + # If there were changes to the previous knob, update the dictionary + if updateDict == True: + self.unsavedKnobs[self.knob] = edited_knobValue + self.scrollPos[self.knob] = self.script_editor.verticalScrollBar( + ).value() + prev_knob = self.knob # knobChanged... + + self.knob = self.current_knob_dropdown.itemData( + self.current_knob_dropdown.currentIndex()) # knobChanged... + + if check and obtained_knobValue != edited_knobValue: + msgBox = QtWidgets.QMessageBox() + msgBox.setText("The Script Editor has been modified.") + msgBox.setInformativeText( + "Do you want to overwrite the current code on this editor?") + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + self.setCurrentKnob(prev_knob) + return + # If order comes from a dropdown update, update value from dictionary if possible, otherwise update normally + self.setWindowTitle("KnobScripter - %s %s" % + (self.node.name(), self.knob)) + if updateDict: + if self.knob in self.unsavedKnobs: + if self.unsavedKnobs[self.knob] == obtained_knobValue: + self.script_editor.setPlainText(obtained_knobValue) + self.setKnobModified(False) + else: + obtained_knobValue = self.unsavedKnobs[self.knob] + self.script_editor.setPlainText(obtained_knobValue) + self.setKnobModified(True) + else: + self.script_editor.setPlainText(obtained_knobValue) + self.setKnobModified(False) + + if self.knob in self.scrollPos: + obtained_scrollValue = self.scrollPos[self.knob] + else: + self.script_editor.setPlainText(obtained_knobValue) + + cursor = self.script_editor.textCursor() + self.script_editor.setTextCursor(cursor) + self.script_editor.verticalScrollBar().setValue(obtained_scrollValue) + return + + def loadAllKnobValues(self): + ''' Load all knobs button's function ''' + if len(self.unsavedKnobs) >= 1: + msgBox = QtWidgets.QMessageBox() + msgBox.setText( + "Do you want to reload all python and callback knobs?") + msgBox.setInformativeText( + "Unsaved changes on this editor will be lost.") + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + return + self.unsavedKnobs = {} + return + + def saveKnobValue(self, check=True): + ''' Save the text from the editor to the node's knobChanged knob ''' + dropdown_value = self.current_knob_dropdown.itemData( + self.current_knob_dropdown.currentIndex()) + try: + obtained_knobValue = str(self.node[dropdown_value].value()) + self.knob = dropdown_value + except: + error_message = QtWidgets.QMessageBox.information( + None, "", "Unable to find %s.%s" % (self.node.name(), dropdown_value)) + error_message.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + error_message.exec_() + return + edited_knobValue = self.script_editor.toPlainText() + if check and obtained_knobValue != edited_knobValue: + msgBox = QtWidgets.QMessageBox() + msgBox.setText("Do you want to overwrite %s.%s?" % + (self.node.name(), dropdown_value)) + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + return + self.node[dropdown_value].setValue(edited_knobValue) + self.setKnobModified( + modified=False, knob=dropdown_value, changeTitle=True) + nuke.tcl("modified 1") + if self.knob in self.unsavedKnobs: + del self.unsavedKnobs[self.knob] + return + + def saveAllKnobValues(self, check=True): + ''' Save all knobs button's function ''' + if self.updateUnsavedKnobs() > 0 and check: + msgBox = QtWidgets.QMessageBox() + msgBox.setText( + "Do you want to save all modified python and callback knobs?") + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + return + saveErrors = 0 + savedCount = 0 + for k in self.unsavedKnobs.copy(): + try: + self.node.knob(k).setValue(self.unsavedKnobs[k]) + del self.unsavedKnobs[k] + savedCount += 1 + nuke.tcl("modified 1") + except: + saveErrors += 1 + if saveErrors > 0: + errorBox = QtWidgets.QMessageBox() + errorBox.setText("Error saving %s knob%s." % + (str(saveErrors), int(saveErrors > 1) * "s")) + errorBox.setIcon(QtWidgets.QMessageBox.Warning) + errorBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + errorBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = errorBox.exec_() + else: + log("KnobScripter: %s knobs saved" % str(savedCount)) + return + + def setCurrentKnob(self, knobToSet): + ''' Set current knob ''' + KnobDropdownItems = [] + for i in range(self.current_knob_dropdown.count()): + if self.current_knob_dropdown.itemData(i) is not None: + KnobDropdownItems.append( + self.current_knob_dropdown.itemData(i)) + else: + KnobDropdownItems.append("---") + if knobToSet in KnobDropdownItems: + index = KnobDropdownItems.index(knobToSet) + self.current_knob_dropdown.setCurrentIndex(index) + return + + def updateUnsavedKnobs(self, first_time=False): + ''' Clear unchanged knobs from the dict and return the number of unsaved knobs ''' + if not self.node: + # Node has been deleted, so simply return 0. Who cares. + return 0 + edited_knobValue = self.script_editor.toPlainText() + self.unsavedKnobs[self.knob] = edited_knobValue + if len(self.unsavedKnobs) > 0: + for k in self.unsavedKnobs.copy(): + if self.node.knob(k): + if str(self.node.knob(k).value()) == str(self.unsavedKnobs[k]): + del self.unsavedKnobs[k] + else: + del self.unsavedKnobs[k] + # Set appropriate knobs modified... + knobs_dropdown = self.current_knob_dropdown + all_knobs = [knobs_dropdown.itemData(i) + for i in range(knobs_dropdown.count())] + for key in all_knobs: + if key in self.unsavedKnobs.keys(): + self.setKnobModified( + modified=True, knob=key, changeTitle=False) + else: + self.setKnobModified( + modified=False, knob=key, changeTitle=False) + + return len(self.unsavedKnobs) + + def setKnobModified(self, modified=True, knob="", changeTitle=True): + ''' Sets the current knob modified, title and whatever else we need ''' + if knob == "": + knob = self.knob + if modified: + self.modifiedKnobs.add(knob) + else: + self.modifiedKnobs.discard(knob) + + if changeTitle: + title_modified_string = " [modified]" + windowTitle = self.windowTitle().split(title_modified_string)[0] + if modified == True: + windowTitle += title_modified_string + self.setWindowTitle(windowTitle) + + try: + knobs_dropdown = self.current_knob_dropdown + kd_index = knobs_dropdown.currentIndex() + kd_data = knobs_dropdown.itemData(kd_index) + if self.show_labels and i not in defaultKnobs: + kd_data = "{} ({})".format( + self.node.knob(kd_data).label(), kd_data) + if modified == False: + knobs_dropdown.setItemText(kd_index, kd_data) + else: + knobs_dropdown.setItemText(kd_index, kd_data + "(*)") + except: + pass + + # Script Mode + def updateFoldersDropdown(self): + ''' Populate folders dropdown list ''' + self.current_folder_dropdown.blockSignals(True) + self.current_folder_dropdown.clear() # First remove all items + defaultFolders = ["scripts"] + scriptFolders = [] + counter = 0 + for f in defaultFolders: + self.makeScriptFolder(f) + self.current_folder_dropdown.addItem(f + "/", f) + counter += 1 + + try: + scriptFolders = sorted([f for f in os.listdir(self.scripts_dir) if os.path.isdir( + os.path.join(self.scripts_dir, f))]) # Accepts symlinks!!! + except: + log("Couldn't read any script folders.") + + for f in scriptFolders: + fname = f.split("/")[-1] + if fname in defaultFolders: + continue + self.current_folder_dropdown.addItem(fname + "/", fname) + counter += 1 + + # print scriptFolders + if counter > 0: + self.current_folder_dropdown.insertSeparator(counter) + counter += 1 + # self.current_folder_dropdown.insertSeparator(counter) + #counter += 1 + self.current_folder_dropdown.addItem("New", "create new") + self.current_folder_dropdown.addItem("Open...", "open in browser") + self.current_folder_dropdown.addItem("Add custom", "add custom path") + self.folder_index = self.current_folder_dropdown.currentIndex() + self.current_folder = self.current_folder_dropdown.itemData( + self.folder_index) + self.current_folder_dropdown.blockSignals(False) + return + + def updateScriptsDropdown(self): + ''' Populate py scripts dropdown list ''' + self.current_script_dropdown.blockSignals(True) + self.current_script_dropdown.clear() # First remove all items + QtWidgets.QApplication.processEvents() + log("# Updating scripts dropdown...") + log("scripts dir:" + self.scripts_dir) + log("current folder:" + self.current_folder) + log("previous current script:" + self.current_script) + #current_folder = self.current_folder_dropdown.itemData(self.current_folder_dropdown.currentIndex()) + current_folder_path = os.path.join( + self.scripts_dir, self.current_folder) + defaultScripts = ["Untitled.py"] + found_scripts = [] + counter = 0 + # All files and folders inside of the folder + dir_list = os.listdir(current_folder_path) + try: + found_scripts = sorted([f for f in dir_list if f.endswith(".py")]) + found_temp_scripts = [ + f for f in dir_list if f.endswith(".py.autosave")] + except: + log("Couldn't find any scripts in the selected folder.") + if not len(found_scripts): + for s in defaultScripts: + if s + ".autosave" in found_temp_scripts: + self.current_script_dropdown.addItem(s + "(*)", s) + else: + self.current_script_dropdown.addItem(s, s) + counter += 1 + else: + for s in defaultScripts: + if s + ".autosave" in found_temp_scripts: + self.current_script_dropdown.addItem(s + "(*)", s) + elif s in found_scripts: + self.current_script_dropdown.addItem(s, s) + for s in found_scripts: + if s in defaultScripts: + continue + sname = s.split("/")[-1] + if s + ".autosave" in found_temp_scripts: + self.current_script_dropdown.addItem(sname + "(*)", sname) + else: + self.current_script_dropdown.addItem(sname, sname) + counter += 1 + # else: #Add the found scripts to the dropdown + if counter > 0: + counter += 1 + self.current_script_dropdown.insertSeparator(counter) + counter += 1 + self.current_script_dropdown.insertSeparator(counter) + self.current_script_dropdown.addItem("New", "create new") + self.current_script_dropdown.addItem("Duplicate", "create duplicate") + self.current_script_dropdown.addItem("Delete", "delete script") + self.current_script_dropdown.addItem("Open", "open in browser") + #self.script_index = self.current_script_dropdown.currentIndex() + self.script_index = 0 + self.current_script = self.current_script_dropdown.itemData( + self.script_index) + log("Finished updating scripts dropdown.") + log("current_script:" + self.current_script) + self.current_script_dropdown.blockSignals(False) + return + + def makeScriptFolder(self, name="scripts"): + folder_path = os.path.join(self.scripts_dir, name) + if not os.path.exists(folder_path): + try: + os.makedirs(folder_path) + return True + except: + print "Couldn't create the scripting folders.\nPlease check your OS write permissions." + return False + + def makeScriptFile(self, name="Untitled.py", folder="scripts", empty=True): + script_path = os.path.join(self.scripts_dir, self.current_folder, name) + if not os.path.isfile(script_path): + try: + self.current_script_file = open(script_path, 'w') + return True + except: + print "Couldn't create the scripting folders.\nPlease check your OS write permissions." + return False + + def setCurrentFolder(self, folderName): + ''' Set current folder ON THE DROPDOWN ONLY''' + folderList = [self.current_folder_dropdown.itemData( + i) for i in range(self.current_folder_dropdown.count())] + if folderName in folderList: + index = folderList.index(folderName) + self.current_folder_dropdown.setCurrentIndex(index) + self.current_folder = folderName + self.folder_index = self.current_folder_dropdown.currentIndex() + self.current_folder = self.current_folder_dropdown.itemData( + self.folder_index) + return + + def setCurrentScript(self, scriptName): + ''' Set current script ON THE DROPDOWN ONLY ''' + scriptList = [self.current_script_dropdown.itemData( + i) for i in range(self.current_script_dropdown.count())] + if scriptName in scriptList: + index = scriptList.index(scriptName) + self.current_script_dropdown.setCurrentIndex(index) + self.current_script = scriptName + self.script_index = self.current_script_dropdown.currentIndex() + self.current_script = self.current_script_dropdown.itemData( + self.script_index) + return + + def loadScriptContents(self, check=False, pyOnly=False, folder=""): + ''' Get the contents of the selected script and populate the editor ''' + log("# About to load script contents now.") + obtained_scrollValue = 0 + obtained_cursorPosValue = [0, 0] # Position, anchor + if folder == "": + folder = self.current_folder + script_path = os.path.join( + self.scripts_dir, folder, self.current_script) + script_path_temp = script_path + ".autosave" + if (self.current_folder + "/" + self.current_script) in self.scrollPos: + obtained_scrollValue = self.scrollPos[self.current_folder + + "/" + self.current_script] + if (self.current_folder + "/" + self.current_script) in self.cursorPos: + obtained_cursorPosValue = self.cursorPos[self.current_folder + + "/" + self.current_script] + + # 1: If autosave exists and pyOnly is false, load it + if os.path.isfile(script_path_temp) and not pyOnly: + log("Loading .py.autosave file\n---") + with open(script_path_temp, 'r') as script: + content = script.read() + self.script_editor.setPlainText(content) + self.setScriptModified(True) + self.script_editor.verticalScrollBar().setValue(obtained_scrollValue) + + # 2: Try to load the .py as first priority, if it exists + elif os.path.isfile(script_path): + log("Loading .py file\n---") + with open(script_path, 'r') as script: + content = script.read() + current_text = self.script_editor.toPlainText().encode("utf8") + if check and current_text != content and current_text.strip() != "": + msgBox = QtWidgets.QMessageBox() + msgBox.setText("The script has been modified.") + msgBox.setInformativeText( + "Do you want to overwrite the current code on this editor?") + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + return + # Clear trash + if os.path.isfile(script_path_temp): + os.remove(script_path_temp) + log("Removed " + script_path_temp) + self.setScriptModified(False) + self.script_editor.setPlainText(content) + self.script_editor.verticalScrollBar().setValue(obtained_scrollValue) + self.setScriptModified(False) + self.loadScriptState() + self.setScriptState() + + # 3: If .py doesn't exist... only then stick to the autosave + elif os.path.isfile(script_path_temp): + with open(script_path_temp, 'r') as script: + content = script.read() + + msgBox = QtWidgets.QMessageBox() + msgBox.setText("The .py file hasn't been found.") + msgBox.setInformativeText( + "Do you want to clear the current code on this editor?") + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + return + + # Clear trash + os.remove(script_path_temp) + log("Removed " + script_path_temp) + self.script_editor.setPlainText("") + self.updateScriptsDropdown() + self.loadScriptContents(check=False) + self.loadScriptState() + self.setScriptState() + + else: + content = "" + self.script_editor.setPlainText(content) + self.setScriptModified(False) + if self.current_folder + "/" + self.current_script in self.scrollPos: + del self.scrollPos[self.current_folder + + "/" + self.current_script] + if self.current_folder + "/" + self.current_script in self.cursorPos: + del self.cursorPos[self.current_folder + + "/" + self.current_script] + + self.setWindowTitle("KnobScripter - %s/%s" % + (self.current_folder, self.current_script)) + return + + def saveScriptContents(self, temp=True): + ''' Save the current contents of the editor into the python file. If temp == True, saves a .py.autosave file ''' + log("\n# About to save script contents now.") + log("Temp mode is: " + str(temp)) + log("self.current_folder: " + self.current_folder) + log("self.current_script: " + self.current_script) + script_path = os.path.join( + self.scripts_dir, self.current_folder, self.current_script) + script_path_temp = script_path + ".autosave" + orig_content = "" + content = self.script_editor.toPlainText().encode('utf8') + + if temp == True: + if os.path.isfile(script_path): + with open(script_path, 'r') as script: + orig_content = script.read() + # If script path doesn't exist and autosave does but the script is empty... + elif content == "" and os.path.isfile(script_path_temp): + os.remove(script_path_temp) + return + if content != orig_content: + with open(script_path_temp, 'w') as script: + script.write(content) + else: + if os.path.isfile(script_path_temp): + os.remove(script_path_temp) + log("Nothing to save") + return + else: + with open(script_path, 'w') as script: + script.write(self.script_editor.toPlainText().encode('utf8')) + # Clear trash + if os.path.isfile(script_path_temp): + os.remove(script_path_temp) + log("Removed " + script_path_temp) + self.setScriptModified(False) + self.saveScrollValue() + self.saveCursorPosValue() + log("Saved " + script_path + "\n---") + return + + def deleteScript(self, check=True, folder=""): + ''' Get the contents of the selected script and populate the editor ''' + log("# About to delete the .py and/or autosave script now.") + if folder == "": + folder = self.current_folder + script_path = os.path.join( + self.scripts_dir, folder, self.current_script) + script_path_temp = script_path + ".autosave" + if check: + msgBox = QtWidgets.QMessageBox() + msgBox.setText("You're about to delete this script.") + msgBox.setInformativeText( + "Are you sure you want to delete {}?".format(self.current_script)) + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.No) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + return False + + if os.path.isfile(script_path_temp): + os.remove(script_path_temp) + log("Removed " + script_path_temp) + + if os.path.isfile(script_path): + os.remove(script_path) + log("Removed " + script_path) + + return True + + def folderDropdownChanged(self): + '''Executed when the current folder dropdown is changed''' + self.saveScriptState() + log("# folder dropdown changed") + folders_dropdown = self.current_folder_dropdown + fd_value = folders_dropdown.currentText() + fd_index = folders_dropdown.currentIndex() + fd_data = folders_dropdown.itemData(fd_index) + if fd_data == "create new": + panel = FileNameDialog(self, mode="folder") + # panel.setWidth(260) + # panel.addSingleLineInput("Name:","") + if panel.exec_(): + # Accepted + folder_name = panel.text + if os.path.isdir(os.path.join(self.scripts_dir, folder_name)): + self.messageBox("Folder already exists.") + self.setCurrentFolder(self.current_folder) + if self.makeScriptFolder(name=folder_name): + self.saveScriptContents(temp=True) + # Success creating the folder + self.current_folder = folder_name + self.updateFoldersDropdown() + self.setCurrentFolder(folder_name) + self.updateScriptsDropdown() + self.loadScriptContents(check=False) + else: + self.messageBox("There was a problem creating the folder.") + self.current_folder_dropdown.blockSignals(True) + self.current_folder_dropdown.setCurrentIndex( + self.folder_index) + self.current_folder_dropdown.blockSignals(False) + else: + # Canceled/rejected + self.current_folder_dropdown.blockSignals(True) + self.current_folder_dropdown.setCurrentIndex(self.folder_index) + self.current_folder_dropdown.blockSignals(False) + return + + elif fd_data == "open in browser": + current_folder_path = os.path.join( + self.scripts_dir, self.current_folder) + self.openInFileBrowser(current_folder_path) + self.current_folder_dropdown.blockSignals(True) + self.current_folder_dropdown.setCurrentIndex(self.folder_index) + self.current_folder_dropdown.blockSignals(False) + return + + elif fd_data == "add custom path": + folder_path = nuke.getFilename('Select custom folder.') + if folder_path is not None: + if folder_path.endswith("/"): + aliasName = folder_path.split("/")[-2] + else: + aliasName = folder_path.split("/")[-1] + if not os.path.isdir(folder_path): + self.messageBox( + "Folder not found. Please try again with the full path to a folder.") + elif not len(aliasName): + self.messageBox( + "Folder with the same name already exists. Please delete or rename it first.") + else: + # All good + os.symlink(folder_path, os.path.join( + self.scripts_dir, aliasName)) + self.saveScriptContents(temp=True) + self.current_folder = aliasName + self.updateFoldersDropdown() + self.setCurrentFolder(aliasName) + self.updateScriptsDropdown() + self.loadScriptContents(check=False) + self.script_editor.setFocus() + return + self.current_folder_dropdown.blockSignals(True) + self.current_folder_dropdown.setCurrentIndex(self.folder_index) + self.current_folder_dropdown.blockSignals(False) + else: + # 1: Save current script as temp if needed + self.saveScriptContents(temp=True) + # 2: Set the new folder in the variables + self.current_folder = fd_data + self.folder_index = fd_index + # 3: Update the scripts dropdown + self.updateScriptsDropdown() + # 4: Load the current script! + self.loadScriptContents() + self.script_editor.setFocus() + + self.loadScriptState() + self.setScriptState() + + return + + def scriptDropdownChanged(self): + '''Executed when the current script dropdown is changed. Should only be called by the manual dropdown change. Not by other functions.''' + self.saveScriptState() + scripts_dropdown = self.current_script_dropdown + sd_value = scripts_dropdown.currentText() + sd_index = scripts_dropdown.currentIndex() + sd_data = scripts_dropdown.itemData(sd_index) + if sd_data == "create new": + self.current_script_dropdown.blockSignals(True) + panel = FileNameDialog(self, mode="script") + if panel.exec_(): + # Accepted + script_name = panel.text + ".py" + script_path = os.path.join( + self.scripts_dir, self.current_folder, script_name) + log(script_name) + log(script_path) + if os.path.isfile(script_path): + self.messageBox("Script already exists.") + self.current_script_dropdown.setCurrentIndex( + self.script_index) + if self.makeScriptFile(name=script_name, folder=self.current_folder): + # Success creating the folder + self.saveScriptContents(temp=True) + self.updateScriptsDropdown() + if self.current_script != "Untitled.py": + self.script_editor.setPlainText("") + self.current_script = script_name + self.setCurrentScript(script_name) + self.saveScriptContents(temp=False) + # self.loadScriptContents() + else: + self.messageBox("There was a problem creating the script.") + self.current_script_dropdown.setCurrentIndex( + self.script_index) + else: + # Canceled/rejected + self.current_script_dropdown.setCurrentIndex(self.script_index) + return + self.current_script_dropdown.blockSignals(False) + + elif sd_data == "create duplicate": + self.current_script_dropdown.blockSignals(True) + current_folder_path = os.path.join( + self.scripts_dir, self.current_folder, self.current_script) + current_script_path = os.path.join( + self.scripts_dir, self.current_folder, self.current_script) + + current_name = self.current_script + if self.current_script.endswith(".py"): + current_name = current_name[:-3] + + test_name = current_name + while True: + test_name += "_copy" + new_script_path = os.path.join( + self.scripts_dir, self.current_folder, test_name + ".py") + if not os.path.isfile(new_script_path): + break + + script_name = test_name + ".py" + + if self.makeScriptFile(name=script_name, folder=self.current_folder): + # Success creating the folder + self.saveScriptContents(temp=True) + self.updateScriptsDropdown() + # self.script_editor.setPlainText("") + self.current_script = script_name + self.setCurrentScript(script_name) + self.script_editor.setFocus() + else: + self.messageBox("There was a problem duplicating the script.") + self.current_script_dropdown.setCurrentIndex(self.script_index) + + self.current_script_dropdown.blockSignals(False) + + elif sd_data == "open in browser": + current_script_path = os.path.join( + self.scripts_dir, self.current_folder, self.current_script) + self.openInFileBrowser(current_script_path) + self.current_script_dropdown.blockSignals(True) + self.current_script_dropdown.setCurrentIndex(self.script_index) + self.current_script_dropdown.blockSignals(False) + return + + elif sd_data == "delete script": + if self.deleteScript(): + self.updateScriptsDropdown() + self.loadScriptContents() + else: + self.current_script_dropdown.blockSignals(True) + self.current_script_dropdown.setCurrentIndex(self.script_index) + self.current_script_dropdown.blockSignals(False) + + else: + self.saveScriptContents() + self.current_script = sd_data + self.script_index = sd_index + self.setCurrentScript(self.current_script) + self.loadScriptContents() + self.script_editor.setFocus() + self.loadScriptState() + self.setScriptState() + return + + def setScriptModified(self, modified=True): + ''' Sets self.current_script_modified, title and whatever else we need ''' + self.current_script_modified = modified + title_modified_string = " [modified]" + windowTitle = self.windowTitle().split(title_modified_string)[0] + if modified == True: + windowTitle += title_modified_string + self.setWindowTitle(windowTitle) + try: + scripts_dropdown = self.current_script_dropdown + sd_index = scripts_dropdown.currentIndex() + sd_data = scripts_dropdown.itemData(sd_index) + if modified == False: + scripts_dropdown.setItemText(sd_index, sd_data) + else: + scripts_dropdown.setItemText(sd_index, sd_data + "(*)") + except: + pass + + def openInFileBrowser(self, path=""): + OS = platform.system() + if not os.path.exists(path): + path = KS_DIR + if OS == "Windows": + os.startfile(path) + elif OS == "Darwin": + subprocess.Popen(["open", path]) + else: + subprocess.Popen(["xdg-open", path]) + + def loadScriptState(self): + ''' + Loads the last state of the script from a file inside the SE directory's root. + SAVES self.scroll_pos, self.cursor_pos, self.last_open_script + ''' + self.state_dict = {} + if not os.path.isfile(self.state_txt_path): + return False + else: + with open(self.state_txt_path, "r") as f: + self.state_dict = json.load(f) + + log("Loading script state into self.state_dict, self.scrollPos, self.cursorPos") + log(self.state_dict) + + if "scroll_pos" in self.state_dict: + self.scrollPos = self.state_dict["scroll_pos"] + if "cursor_pos" in self.state_dict: + self.cursorPos = self.state_dict["cursor_pos"] + + def setScriptState(self): + ''' + Sets the already script state from self.state_dict into the current script if applicable + ''' + script_fullname = self.current_folder + "/" + self.current_script + + if "scroll_pos" in self.state_dict: + if script_fullname in self.state_dict["scroll_pos"]: + self.script_editor.verticalScrollBar().setValue( + int(self.state_dict["scroll_pos"][script_fullname])) + + if "cursor_pos" in self.state_dict: + if script_fullname in self.state_dict["cursor_pos"]: + cursor = self.script_editor.textCursor() + cursor.setPosition(int( + self.state_dict["cursor_pos"][script_fullname][1]), QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(int( + self.state_dict["cursor_pos"][script_fullname][0]), QtGui.QTextCursor.KeepAnchor) + self.script_editor.setTextCursor(cursor) + + if 'splitter_sizes' in self.state_dict: + self.splitter.setSizes(self.state_dict['splitter_sizes']) + + def setLastScript(self): + if 'last_folder' in self.state_dict and 'last_script' in self.state_dict: + self.updateFoldersDropdown() + self.setCurrentFolder(self.state_dict['last_folder']) + self.updateScriptsDropdown() + self.setCurrentScript(self.state_dict['last_script']) + self.loadScriptContents() + self.script_editor.setFocus() + + def saveScriptState(self): + ''' Stores the current state of the script into a file inside the SE directory's root ''' + log("About to save script state...") + ''' + # self.state_dict = {} + if os.path.isfile(self.state_txt_path): + with open(self.state_txt_path, "r") as f: + self.state_dict = json.load(f) + + if "scroll_pos" in self.state_dict: + self.scrollPos = self.state_dict["scroll_pos"] + if "cursor_pos" in self.state_dict: + self.cursorPos = self.state_dict["cursor_pos"] + + ''' + self.loadScriptState() + + # Overwrite current values into the scriptState + self.saveScrollValue() + self.saveCursorPosValue() + + self.state_dict['scroll_pos'] = self.scrollPos + self.state_dict['cursor_pos'] = self.cursorPos + self.state_dict['last_folder'] = self.current_folder + self.state_dict['last_script'] = self.current_script + self.state_dict['splitter_sizes'] = self.splitter.sizes() + + with open(self.state_txt_path, "w") as f: + state = json.dump(self.state_dict, f, sort_keys=True, indent=4) + return state + + # Autosave background loop + def autosave(self): + if self.toAutosave: + # Save the script... + self.saveScriptContents() + self.toAutosave = False + self.saveScriptState() + log("autosaving...") + return + + # Global stuff + def setTextSelection(self): + self.highlighter.selected_text = self.script_editor.textCursor().selection().toPlainText() + return + + def eventFilter(self, object, event): + if event.type() == QtCore.QEvent.KeyPress: + return QtWidgets.QWidget.eventFilter(self, object, event) + else: + return QtWidgets.QWidget.eventFilter(self, object, event) + + def resizeEvent(self, res_event): + w = self.frameGeometry().width() + self.current_node_label_node.setVisible(w > 460) + self.script_label.setVisible(w > 460) + return super(KnobScripter, self).resizeEvent(res_event) + + def changeClicked(self, newNode=""): + ''' Change node ''' + try: + print "Changing from " + self.node.name() + except: + self.node = None + if not len(nuke.selectedNodes()): + self.exitNodeMode() + return + nuke.menu("Nuke").findItem( + "Edit/Node/Update KnobScripter Context").invoke() + selection = knobScripterSelectedNodes + if self.nodeMode: # Only update the number of unsaved knobs if we were already in node mode + if self.node is not None: + updatedCount = self.updateUnsavedKnobs() + else: + updatedCount = 0 + else: + updatedCount = 0 + self.autosave() + if newNode != "" and nuke.exists(newNode): + selection = [newNode] + elif not len(selection): + node_dialog = ChooseNodeDialog(self) + if node_dialog.exec_(): + # Accepted + selection = [nuke.toNode(node_dialog.name)] + else: + return + + # Change to node mode... + self.node_mode_bar.setVisible(True) + self.script_mode_bar.setVisible(False) + if not self.nodeMode: + self.saveScriptContents() + self.toAutosave = False + self.saveScriptState() + self.splitter.setSizes([0, 1]) + self.nodeMode = True + + # If already selected, pass + if self.node is not None and selection[0].fullName() == self.node.fullName(): + self.messageBox("Please select a different node first!") + return + elif updatedCount > 0: + msgBox = QtWidgets.QMessageBox() + msgBox.setText( + "Save changes to %s knob%s before changing the node?" % (str(updatedCount), int(updatedCount > 1) * "s")) + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.Yes: + self.saveAllKnobValues(check=False) + elif reply == QtWidgets.QMessageBox.Cancel: + return + if len(selection) > 1: + self.messageBox( + "More than one node selected.\nChanging knobChanged editor to %s" % selection[0].fullName()) + # Reinitialise everything, wooo! + self.current_knob_dropdown.blockSignals(True) + self.node = selection[0] + + self.script_editor.setPlainText("") + self.unsavedKnobs = {} + self.scrollPos = {} + self.setWindowTitle("KnobScripter - %s %s" % + (self.node.fullName(), self.knob)) + self.current_node_label_name.setText(self.node.fullName()) + + self.toLoadKnob = False + self.updateKnobDropdown() # onee + # self.current_knob_dropdown.repaint() + # self.current_knob_dropdown.setMinimumWidth(self.current_knob_dropdown.minimumSizeHint().width()) + self.toLoadKnob = True + self.setCurrentKnob(self.knob) + self.loadKnobValue(False) + self.script_editor.setFocus() + self.setKnobModified(False) + self.current_knob_dropdown.blockSignals(False) + # self.current_knob_dropdown.setMinimumContentsLength(80) + return + + def exitNodeMode(self): + self.nodeMode = False + self.setWindowTitle("KnobScripter - Script Mode") + self.node_mode_bar.setVisible(False) + self.script_mode_bar.setVisible(True) + self.node = nuke.toNode("root") + # self.updateFoldersDropdown() + # self.updateScriptsDropdown() + self.splitter.setSizes([1, 1]) + self.loadScriptState() + self.setLastScript() + + self.loadScriptContents(check=False) + self.setScriptState() + + def clearConsole(self): + self.origConsoleText = self.nukeSEOutput.document().toPlainText().encode("utf8") + self.script_output.setPlainText("") + + def toggleFRW(self, frw_pressed): + self.frw_open = frw_pressed + self.frw.setVisible(self.frw_open) + if self.frw_open: + self.frw.find_lineEdit.setFocus() + self.frw.find_lineEdit.selectAll() + else: + self.script_editor.setFocus() + return + + def openSnippets(self): + ''' Whenever the 'snippets' button is pressed... open the panel ''' + global SnippetEditPanel + if SnippetEditPanel == "": + SnippetEditPanel = SnippetsPanel(self) + + if not SnippetEditPanel.isVisible(): + SnippetEditPanel.reload() + + if SnippetEditPanel.show(): + self.snippets = self.loadSnippets(maxDepth=5) + SnippetEditPanel = "" + + def loadSnippets(self, path="", maxDepth=5, depth=0): + ''' + Load prefs recursive. When maximum recursion depth, ignores paths. + ''' + max_depth = maxDepth + cur_depth = depth + if path == "": + path = self.snippets_txt_path + if not os.path.isfile(path): + return {} + else: + loaded_snippets = {} + with open(path, "r") as f: + file = json.load(f) + for i, (key, val) in enumerate(file.items()): + if re.match(r"\[custom-path-[0-9]+\]$", key): + if cur_depth < max_depth: + new_dict = self.loadSnippets( + path=val, maxDepth=max_depth, depth=cur_depth + 1) + loaded_snippets.update(new_dict) + else: + loaded_snippets[key] = val + return loaded_snippets + + def messageBox(self, the_text=""): + ''' Just a simple message box ''' + if self.isPane: + msgBox = QtWidgets.QMessageBox() + else: + msgBox = QtWidgets.QMessageBox(self) + msgBox.setText(the_text) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.exec_() + + def openPrefs(self): + ''' Open the preferences panel ''' + global PrefsPanel + if PrefsPanel == "": + PrefsPanel = KnobScripterPrefs(self) + + if PrefsPanel.show(): + PrefsPanel = "" + + def loadPrefs(self): + ''' Load prefs ''' + if not os.path.isfile(self.prefs_txt): + return [] + else: + with open(self.prefs_txt, "r") as f: + prefs = json.load(f) + return prefs + + def runScript(self): + ''' Run the current script... ''' + self.script_editor.runScript() + + def saveScrollValue(self): + ''' Save scroll values ''' + if self.nodeMode: + self.scrollPos[self.knob] = self.script_editor.verticalScrollBar( + ).value() + else: + self.scrollPos[self.current_folder + "/" + + self.current_script] = self.script_editor.verticalScrollBar().value() + + def saveCursorPosValue(self): + ''' Save cursor pos and anchor values ''' + self.cursorPos[self.current_folder + "/" + self.current_script] = [ + self.script_editor.textCursor().position(), self.script_editor.textCursor().anchor()] + + def closeEvent(self, close_event): + if self.nodeMode: + updatedCount = self.updateUnsavedKnobs() + if updatedCount > 0: + msgBox = QtWidgets.QMessageBox() + msgBox.setText("Save changes to %s knob%s before closing?" % ( + str(updatedCount), int(updatedCount > 1) * "s")) + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.Yes: + self.saveAllKnobValues(check=False) + close_event.accept() + return + elif reply == QtWidgets.QMessageBox.Cancel: + close_event.ignore() + return + else: + close_event.accept() + else: + self.autosave() + if self in AllKnobScripters: + AllKnobScripters.remove(self) + close_event.accept() + + # Landing functions + + def refreshClicked(self): + ''' Function to refresh the dropdowns ''' + if self.nodeMode: + knob = self.current_knob_dropdown.itemData( + self.current_knob_dropdown.currentIndex()).encode('UTF8') + self.current_knob_dropdown.blockSignals(True) + self.current_knob_dropdown.clear() # First remove all items + self.updateKnobDropdown() + availableKnobs = [] + for i in range(self.current_knob_dropdown.count()): + if self.current_knob_dropdown.itemData(i) is not None: + availableKnobs.append( + self.current_knob_dropdown.itemData(i).encode('UTF8')) + if knob in availableKnobs: + self.setCurrentKnob(knob) + self.current_knob_dropdown.blockSignals(False) + else: + folder = self.current_folder + script = self.current_script + self.autosave() + self.updateFoldersDropdown() + self.setCurrentFolder(folder) + self.updateScriptsDropdown() + self.setCurrentScript(script) + self.script_editor.setFocus() + + def reloadClicked(self): + if self.nodeMode: + self.loadKnobValue() + else: + log("Node mode is off") + self.loadScriptContents(check=True, pyOnly=True) + + def saveClicked(self): + if self.nodeMode: + self.saveKnobValue(False) + else: + self.saveScriptContents(temp=False) + + def setModified(self): + if self.nodeMode: + self.setKnobModified(True) + elif not self.current_script_modified: + self.setScriptModified(True) + if not self.nodeMode: + self.toAutosave = True + + def pin(self, pressed): + if pressed: + self.setWindowFlags(self.windowFlags() | + QtCore.Qt.WindowStaysOnTopHint) + self.pinned = True + self.show() + else: + self.setWindowFlags(self.windowFlags() & ~ + QtCore.Qt.WindowStaysOnTopHint) + self.pinned = False + self.show() + + def findSE(self): + for widget in QtWidgets.QApplication.allWidgets(): + if "Script Editor" in widget.windowTitle(): + return widget + + # FunctiosaveScrollValuens for Nuke's Script Editor + def findScriptEditors(self): + script_editors = [] + for widget in QtWidgets.QApplication.allWidgets(): + if "Script Editor" in widget.windowTitle() and len(widget.children()) > 5: + script_editors.append(widget) + return script_editors + + def findSEInput(self, se): + return se.children()[-1].children()[0] + + def findSEOutput(self, se): + return se.children()[-1].children()[1] + + def findSERunBtn(self, se): + for btn in se.children(): + try: + if "Run the current script" in btn.toolTip(): + return btn + except: + pass + return False + + def setSEOutputEvent(self): + nukeScriptEditors = self.findScriptEditors() + # Take the console from the first script editor found... + self.origConsoleText = self.nukeSEOutput.document().toPlainText().encode("utf8") + for se in nukeScriptEditors: + se_output = self.findSEOutput(se) + se_output.textChanged.connect( + partial(consoleChanged, se_output, self)) + consoleChanged(se_output, self) # Initialise. + + +class KnobScripterPane(KnobScripter): + def __init__(self, node="", knob="knobChanged"): + super(KnobScripterPane, self).__init__() + self.isPane = True + + def showEvent(self, the_event): + try: + killPaneMargins(self) + except: + pass + return KnobScripter.showEvent(self, the_event) + + def hideEvent(self, the_event): + self.autosave() + return KnobScripter.hideEvent(self, the_event) + + +def consoleChanged(self, ks): + ''' This will be called every time the ScriptEditor Output text is changed ''' + try: + if ks: # KS exists + ksOutput = ks.script_output # The console TextEdit widget + ksText = self.document().toPlainText().encode("utf8") + # The text from the console that will be omitted + origConsoleText = ks.origConsoleText + if ksText.startswith(origConsoleText): + ksText = ksText[len(origConsoleText):] + else: + ks.origConsoleText = "" + ksOutput.setPlainText(ksText) + ksOutput.verticalScrollBar().setValue(ksOutput.verticalScrollBar().maximum()) + except: + pass + + +def killPaneMargins(widget_object): + if widget_object: + target_widgets = set() + target_widgets.add(widget_object.parentWidget().parentWidget()) + target_widgets.add(widget_object.parentWidget( + ).parentWidget().parentWidget().parentWidget()) + + for widget_layout in target_widgets: + try: + widget_layout.layout().setContentsMargins(0, 0, 0, 0) + except: + pass + + +def debug(lev=0): + ''' Convenience function to set the KnobScripter on debug mode''' + # levels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] + # for handler in logging.root.handlers[:]: + # logging.root.removeHandler(handler) + # logging.basicConfig(level=levels[lev]) + # Changed to a shitty way for now + global DebugMode + DebugMode = True + + +def log(text): + ''' Display a debug info message. Yes, in a stupid way. I know.''' + global DebugMode + if DebugMode: + print(text) + + +# --------------------------------------------------------------------- +# Dialogs +# --------------------------------------------------------------------- +class FileNameDialog(QtWidgets.QDialog): + ''' + Dialog for creating new... (mode = "folder", "script" or "knob"). + ''' + + def __init__(self, parent=None, mode="folder", text=""): + if parent.isPane: + super(FileNameDialog, self).__init__() + else: + super(FileNameDialog, self).__init__(parent) + #self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint) + self.mode = mode + self.text = text + + title = "Create new {}.".format(self.mode) + self.setWindowTitle(title) + + self.initUI() + + def initUI(self): + # Widgets + self.name_label = QtWidgets.QLabel("Name: ") + self.name_label.setAlignment( + QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) + self.name_lineEdit = QtWidgets.QLineEdit() + self.name_lineEdit.setText(self.text) + self.name_lineEdit.textChanged.connect(self.nameChanged) + + # Buttons + self.button_box = QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) + self.button_box.button( + QtWidgets.QDialogButtonBox.Ok).setEnabled(self.text != "") + self.button_box.accepted.connect(self.clickedOk) + self.button_box.rejected.connect(self.clickedCancel) + + # Layout + self.master_layout = QtWidgets.QVBoxLayout() + self.name_layout = QtWidgets.QHBoxLayout() + self.name_layout.addWidget(self.name_label) + self.name_layout.addWidget(self.name_lineEdit) + self.master_layout.addLayout(self.name_layout) + self.master_layout.addWidget(self.button_box) + self.setLayout(self.master_layout) + + self.name_lineEdit.setFocus() + self.setMinimumWidth(250) + + def nameChanged(self): + txt = self.name_lineEdit.text() + m = r"[\w]*$" + if self.mode == "knob": # Knobs can't start with a number... + m = r"[a-zA-Z_]+" + m + + if re.match(m, txt) or txt == "": + self.text = txt + else: + self.name_lineEdit.setText(self.text) + + self.button_box.button( + QtWidgets.QDialogButtonBox.Ok).setEnabled(self.text != "") + return + + def clickedOk(self): + self.accept() + return + + def clickedCancel(self): + self.reject() + return + + +class TextInputDialog(QtWidgets.QDialog): + ''' + Simple dialog for a text input. + ''' + + def __init__(self, parent=None, name="", text="", title=""): + if parent.isPane: + super(TextInputDialog, self).__init__() + else: + super(TextInputDialog, self).__init__(parent) + #self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint) + + self.name = name # title of textinput + self.text = text # default content of textinput + + self.setWindowTitle(title) + + self.initUI() + + def initUI(self): + # Widgets + self.name_label = QtWidgets.QLabel(self.name + ": ") + self.name_label.setAlignment( + QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) + self.name_lineEdit = QtWidgets.QLineEdit() + self.name_lineEdit.setText(self.text) + self.name_lineEdit.textChanged.connect(self.nameChanged) + + # Buttons + self.button_box = QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) + #self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(self.text != "") + self.button_box.accepted.connect(self.clickedOk) + self.button_box.rejected.connect(self.clickedCancel) + + # Layout + self.master_layout = QtWidgets.QVBoxLayout() + self.name_layout = QtWidgets.QHBoxLayout() + self.name_layout.addWidget(self.name_label) + self.name_layout.addWidget(self.name_lineEdit) + self.master_layout.addLayout(self.name_layout) + self.master_layout.addWidget(self.button_box) + self.setLayout(self.master_layout) + + self.name_lineEdit.setFocus() + self.setMinimumWidth(250) + + def nameChanged(self): + self.text = self.name_lineEdit.text() + + def clickedOk(self): + self.accept() + return + + def clickedCancel(self): + self.reject() + return + + +class ChooseNodeDialog(QtWidgets.QDialog): + ''' + Dialog for selecting a node by its name. Only admits nodes that exist (including root, preferences...) + ''' + + def __init__(self, parent=None, name=""): + if parent.isPane: + super(ChooseNodeDialog, self).__init__() + else: + super(ChooseNodeDialog, self).__init__(parent) + + self.name = name # Name of node (will be "" by default) + self.allNodes = [] + + self.setWindowTitle("Enter the node's name...") + + self.initUI() + + def initUI(self): + # Widgets + self.name_label = QtWidgets.QLabel("Name: ") + self.name_label.setAlignment( + QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) + self.name_lineEdit = QtWidgets.QLineEdit() + self.name_lineEdit.setText(self.name) + self.name_lineEdit.textChanged.connect(self.nameChanged) + + self.allNodes = self.getAllNodes() + completer = QtWidgets.QCompleter(self.allNodes, self) + completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive) + self.name_lineEdit.setCompleter(completer) + + # Buttons + self.button_box = QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) + self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setEnabled( + nuke.exists(self.name)) + self.button_box.accepted.connect(self.clickedOk) + self.button_box.rejected.connect(self.clickedCancel) + + # Layout + self.master_layout = QtWidgets.QVBoxLayout() + self.name_layout = QtWidgets.QHBoxLayout() + self.name_layout.addWidget(self.name_label) + self.name_layout.addWidget(self.name_lineEdit) + self.master_layout.addLayout(self.name_layout) + self.master_layout.addWidget(self.button_box) + self.setLayout(self.master_layout) + + self.name_lineEdit.setFocus() + self.setMinimumWidth(250) + + def getAllNodes(self): + self.allNodes = [n.fullName() for n in nuke.allNodes( + recurseGroups=True)] # if parent is in current context?? + self.allNodes.extend(["root", "preferences"]) + return self.allNodes + + def nameChanged(self): + self.name = self.name_lineEdit.text() + self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setEnabled( + self.name in self.allNodes) + + def clickedOk(self): + self.accept() + return + + def clickedCancel(self): + self.reject() + return + + +# ------------------------------------------------------------------------------------------------------ +# Script Editor Widget +# Wouter Gilsing built an incredibly useful python script editor for his Hotbox Manager, so I had it +# really easy for this part! +# Starting from his script editor, I changed the style and added the sublime-like functionality. +# I think this bit of code has the potential to get used in many nuke tools. +# Credit to him: http://www.woutergilsing.com/ +# Originally used on W_Hotbox v1.5: http://www.nukepedia.com/python/ui/w_hotbox +# ------------------------------------------------------------------------------------------------------ +class KnobScripterTextEdit(QtWidgets.QPlainTextEdit): + # Signal that will be emitted when the user has changed the text + userChangedEvent = QtCore.Signal() + + def __init__(self, knobScripter=""): + super(KnobScripterTextEdit, self).__init__() + + self.knobScripter = knobScripter + self.selected_text = "" + + # Setup line numbers + if self.knobScripter != "": + self.tabSpaces = self.knobScripter.tabSpaces + else: + self.tabSpaces = 4 + self.lineNumberArea = KSLineNumberArea(self) + self.blockCountChanged.connect(self.updateLineNumberAreaWidth) + self.updateRequest.connect(self.updateLineNumberArea) + self.updateLineNumberAreaWidth() + + # Highlight line + self.cursorPositionChanged.connect(self.highlightCurrentLine) + + # -------------------------------------------------------------------------------------------------- + # This is adapted from an original version by Wouter Gilsing. + # Extract from his original comments: + # While researching the implementation of line number, I had a look at Nuke's Blinkscript node. [..] + # thefoundry.co.uk/products/nuke/developers/100/pythonreference/nukescripts.blinkscripteditor-pysrc.html + # I stripped and modified the useful bits of the line number related parts of the code [..] + # Credits to theFoundry for writing the blinkscripteditor, best example code I could wish for. + # -------------------------------------------------------------------------------------------------- + + def lineNumberAreaWidth(self): + digits = 1 + maxNum = max(1, self.blockCount()) + while (maxNum >= 10): + maxNum /= 10 + digits += 1 + + space = 7 + self.fontMetrics().width('9') * digits + return space + + def updateLineNumberAreaWidth(self): + self.setViewportMargins(self.lineNumberAreaWidth(), 0, 0, 0) + + def updateLineNumberArea(self, rect, dy): + + if (dy): + self.lineNumberArea.scroll(0, dy) + else: + self.lineNumberArea.update( + 0, rect.y(), self.lineNumberArea.width(), rect.height()) + + if (rect.contains(self.viewport().rect())): + self.updateLineNumberAreaWidth() + + def resizeEvent(self, event): + QtWidgets.QPlainTextEdit.resizeEvent(self, event) + + cr = self.contentsRect() + self.lineNumberArea.setGeometry(QtCore.QRect( + cr.left(), cr.top(), self.lineNumberAreaWidth(), cr.height())) + + def lineNumberAreaPaintEvent(self, event): + + if self.isReadOnly(): + return + + painter = QtGui.QPainter(self.lineNumberArea) + painter.fillRect(event.rect(), QtGui.QColor(36, 36, 36)) # Number bg + + block = self.firstVisibleBlock() + blockNumber = block.blockNumber() + top = int(self.blockBoundingGeometry( + block).translated(self.contentOffset()).top()) + bottom = top + int(self.blockBoundingRect(block).height()) + currentLine = self.document().findBlock( + self.textCursor().position()).blockNumber() + + painter.setPen(self.palette().color(QtGui.QPalette.Text)) + + painterFont = QtGui.QFont() + painterFont.setFamily("Courier") + painterFont.setStyleHint(QtGui.QFont.Monospace) + painterFont.setFixedPitch(True) + if self.knobScripter != "": + painterFont.setPointSize(self.knobScripter.fontSize) + painter.setFont(self.knobScripter.script_editor_font) + + while (block.isValid() and top <= event.rect().bottom()): + + textColor = QtGui.QColor(110, 110, 110) # Numbers + + if blockNumber == currentLine and self.hasFocus(): + textColor = QtGui.QColor(255, 170, 0) # Number highlighted + + painter.setPen(textColor) + + number = "%s" % str(blockNumber + 1) + painter.drawText(-3, top, self.lineNumberArea.width(), + self.fontMetrics().height(), QtCore.Qt.AlignRight, number) + + # Move to the next block + block = block.next() + top = bottom + bottom = top + int(self.blockBoundingRect(block).height()) + blockNumber += 1 + + def keyPressEvent(self, event): + ''' + Custom actions for specific keystrokes + ''' + key = event.key() + ctrl = bool(event.modifiers() & Qt.ControlModifier) + alt = bool(event.modifiers() & Qt.AltModifier) + shift = bool(event.modifiers() & Qt.ShiftModifier) + pre_scroll = self.verticalScrollBar().value() + #modifiers = QtWidgets.QApplication.keyboardModifiers() + #ctrl = (modifiers == Qt.ControlModifier) + #shift = (modifiers == Qt.ShiftModifier) + + up_arrow = 16777235 + down_arrow = 16777237 + + # if Tab convert to Space + if key == 16777217: + self.indentation('indent') + + # if Shift+Tab remove indent + elif key == 16777218: + self.indentation('unindent') + + # if BackSpace try to snap to previous indent level + elif key == 16777219: + if not self.unindentBackspace(): + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + else: + # COOL BEHAVIORS SIMILAR TO SUBLIME GO NEXT! + cursor = self.textCursor() + cpos = cursor.position() + apos = cursor.anchor() + text_before_cursor = self.toPlainText()[:min(cpos, apos)] + text_after_cursor = self.toPlainText()[max(cpos, apos):] + text_all = self.toPlainText() + to_line_start = text_before_cursor[::-1].find("\n") + if to_line_start == -1: + # Position of the start of the line that includes the cursor selection start + linestart_pos = 0 + else: + linestart_pos = len(text_before_cursor) - to_line_start + + to_line_end = text_after_cursor.find("\n") + if to_line_end == -1: + # Position of the end of the line that includes the cursor selection end + lineend_pos = len(text_all) + else: + lineend_pos = max(cpos, apos) + to_line_end + + text_before_lines = text_all[:linestart_pos] + text_after_lines = text_all[lineend_pos:] + if len(text_after_lines) and text_after_lines.startswith("\n"): + text_after_lines = text_after_lines[1:] + text_lines = text_all[linestart_pos:lineend_pos] + + if cursor.hasSelection(): + selection = cursor.selection().toPlainText() + else: + selection = "" + if key == Qt.Key_ParenLeft and (len(selection) > 0 or re.match(r"[\s)}\];]+", text_after_cursor) or not len(text_after_cursor)): # ( + cursor.insertText("(" + selection + ")") + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + # ) + elif key == Qt.Key_ParenRight and text_after_cursor.startswith(")"): + cursor.movePosition(QtGui.QTextCursor.NextCharacter) + self.setTextCursor(cursor) + elif key == Qt.Key_BracketLeft and (len(selection) > 0 or re.match(r"[\s)}\];]+", text_after_cursor) or not len(text_after_cursor)): # [ + cursor.insertText("[" + selection + "]") + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + # ] + elif key in [Qt.Key_BracketRight, 43] and text_after_cursor.startswith("]"): + cursor.movePosition(QtGui.QTextCursor.NextCharacter) + self.setTextCursor(cursor) + elif key == Qt.Key_BraceLeft and (len(selection) > 0 or re.match(r"[\s)}\];]+", text_after_cursor) or not len(text_after_cursor)): # { + cursor.insertText("{" + selection + "}") + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + # } + elif key in [199, Qt.Key_BraceRight] and text_after_cursor.startswith("}"): + cursor.movePosition(QtGui.QTextCursor.NextCharacter) + self.setTextCursor(cursor) + elif key == 34: # " + if len(selection) > 0: + cursor.insertText('"' + selection + '"') + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + # and not re.search(r"(?:[\s)\]]+|$)",text_before_cursor): + elif text_after_cursor.startswith('"') and '"' in text_before_cursor.split("\n")[-1]: + cursor.movePosition(QtGui.QTextCursor.NextCharacter) + # If chars after cursor, act normal + elif not re.match(r"(?:[\s)\]]+|$)", text_after_cursor): + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + # If chars before cursor, act normal + elif not re.search(r"[\s.({\[,]$", text_before_cursor) and text_before_cursor != "": + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + else: + cursor.insertText('"' + selection + '"') + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + elif key == 39: # ' + if len(selection) > 0: + cursor.insertText("'" + selection + "'") + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + # and not re.search(r"(?:[\s)\]]+|$)",text_before_cursor): + elif text_after_cursor.startswith("'") and "'" in text_before_cursor.split("\n")[-1]: + cursor.movePosition(QtGui.QTextCursor.NextCharacter) + # If chars after cursor, act normal + elif not re.match(r"(?:[\s)\]]+|$)", text_after_cursor): + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + # If chars before cursor, act normal + elif not re.search(r"[\s.({\[,]$", text_before_cursor) and text_before_cursor != "": + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + else: + cursor.insertText("'" + selection + "'") + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + elif key == 35 and len(selection): # (yes, a hash) + # If there's a selection, insert a hash at the start of each line.. how the fuck? + if selection != "": + selection_split = selection.split("\n") + if all(i.startswith("#") for i in selection_split): + selection_commented = "\n".join( + [s[1:] for s in selection_split]) # Uncommented + else: + selection_commented = "#" + "\n#".join(selection_split) + cursor.insertText(selection_commented) + if apos > cpos: + cursor.setPosition( + apos + len(selection_commented) - len(selection), QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos, QtGui.QTextCursor.KeepAnchor) + else: + cursor.setPosition(apos, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition( + cpos + len(selection_commented) - len(selection), QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + + elif key == 68 and ctrl and shift: # Ctrl+Shift+D, to duplicate text or line/s + + if not len(selection): + self.setPlainText( + text_before_lines + text_lines + "\n" + text_lines + "\n" + text_after_lines) + cursor.setPosition( + apos + len(text_lines) + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition( + cpos + len(text_lines) + 1, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + self.verticalScrollBar().setValue(pre_scroll) + self.scrollToCursor() + else: + if text_before_cursor.endswith("\n") and not selection.startswith("\n"): + cursor.insertText(selection + "\n" + selection) + cursor.setPosition( + apos + len(selection) + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition( + cpos + len(selection) + 1, QtGui.QTextCursor.KeepAnchor) + else: + cursor.insertText(selection + selection) + cursor.setPosition( + apos + len(selection), QtGui.QTextCursor.MoveAnchor) + cursor.setPosition( + cpos + len(selection), QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + + # Ctrl+Shift+Up, to move the selected line/s up + elif key == up_arrow and ctrl and shift and len(text_before_lines): + prev_line_start_distance = text_before_lines[:-1][::-1].find( + "\n") + if prev_line_start_distance == -1: + prev_line_start_pos = 0 # Position of the start of the previous line + else: + prev_line_start_pos = len( + text_before_lines) - 1 - prev_line_start_distance + prev_line = text_before_lines[prev_line_start_pos:] + + text_before_prev_line = text_before_lines[:prev_line_start_pos] + + if prev_line.endswith("\n"): + prev_line = prev_line[:-1] + + if len(text_after_lines): + text_after_lines = "\n" + text_after_lines + + self.setPlainText( + text_before_prev_line + text_lines + "\n" + prev_line + text_after_lines) + cursor.setPosition(apos - len(prev_line) - 1, + QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos - len(prev_line) - 1, + QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + self.verticalScrollBar().setValue(pre_scroll) + self.scrollToCursor() + return + + elif key == down_arrow and ctrl and shift: # Ctrl+Shift+Up, to move the selected line/s up + if not len(text_after_lines): + text_after_lines = "" + next_line_end_distance = text_after_lines.find("\n") + if next_line_end_distance == -1: + next_line_end_pos = len(text_all) + else: + next_line_end_pos = next_line_end_distance + next_line = text_after_lines[:next_line_end_pos] + text_after_next_line = text_after_lines[next_line_end_pos:] + + self.setPlainText(text_before_lines + next_line + + "\n" + text_lines + text_after_next_line) + cursor.setPosition(apos + len(next_line) + 1, + QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + len(next_line) + 1, + QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + self.verticalScrollBar().setValue(pre_scroll) + self.scrollToCursor() + return + + # If up key and nothing happens, go to start + elif key == up_arrow and not len(text_before_lines): + if not shift: + cursor.setPosition(0, QtGui.QTextCursor.MoveAnchor) + self.setTextCursor(cursor) + else: + cursor.setPosition(0, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + + # If up key and nothing happens, go to start + elif key == down_arrow and not len(text_after_lines): + if not shift: + cursor.setPosition( + len(text_all), QtGui.QTextCursor.MoveAnchor) + self.setTextCursor(cursor) + else: + cursor.setPosition( + len(text_all), QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + + # if enter or return, match indent level + elif key in [16777220, 16777221]: + self.indentNewLine() + else: + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + + self.scrollToCursor() + + def scrollToCursor(self): + self.cursor = self.textCursor() + # Does nothing, but makes the scroll go to the right place... + self.cursor.movePosition(QtGui.QTextCursor.NoMove) + self.setTextCursor(self.cursor) + + def getCursorInfo(self): + + self.cursor = self.textCursor() + + self.firstChar = self.cursor.selectionStart() + self.lastChar = self.cursor.selectionEnd() + + self.noSelection = False + if self.firstChar == self.lastChar: + self.noSelection = True + + self.originalPosition = self.cursor.position() + self.cursorBlockPos = self.cursor.positionInBlock() + + def unindentBackspace(self): + ''' + #snap to previous indent level + ''' + self.getCursorInfo() + + if not self.noSelection or self.cursorBlockPos == 0: + return False + + # check text in front of cursor + textInFront = self.document().findBlock( + self.firstChar).text()[:self.cursorBlockPos] + + # check whether solely spaces + if textInFront != ' ' * self.cursorBlockPos: + return False + + # snap to previous indent level + spaces = len(textInFront) + for space in range(spaces - ((spaces - 1) / self.tabSpaces) * self.tabSpaces - 1): + self.cursor.deletePreviousChar() + + def indentNewLine(self): + + # in case selection covers multiple line, make it one line first + self.insertPlainText('') + + self.getCursorInfo() + + # check how many spaces after cursor + text = self.document().findBlock(self.firstChar).text() + + textInFront = text[:self.cursorBlockPos] + + if len(textInFront) == 0: + self.insertPlainText('\n') + return + + indentLevel = 0 + for i in textInFront: + if i == ' ': + indentLevel += 1 + else: + break + + indentLevel /= self.tabSpaces + + # find out whether textInFront's last character was a ':' + # if that's the case add another indent. + # ignore any spaces at the end, however also + # make sure textInFront is not just an indent + if textInFront.count(' ') != len(textInFront): + while textInFront[-1] == ' ': + textInFront = textInFront[:-1] + + if textInFront[-1] == ':': + indentLevel += 1 + + # new line + self.insertPlainText('\n') + # match indent + self.insertPlainText(' ' * (self.tabSpaces * indentLevel)) + + def indentation(self, mode): + + pre_scroll = self.verticalScrollBar().value() + self.getCursorInfo() + + # if nothing is selected and mode is set to indent, simply insert as many + # space as needed to reach the next indentation level. + if self.noSelection and mode == 'indent': + + remainingSpaces = self.tabSpaces - \ + (self.cursorBlockPos % self.tabSpaces) + self.insertPlainText(' ' * remainingSpaces) + return + + selectedBlocks = self.findBlocks(self.firstChar, self.lastChar) + beforeBlocks = self.findBlocks( + last=self.firstChar - 1, exclude=selectedBlocks) + afterBlocks = self.findBlocks( + first=self.lastChar + 1, exclude=selectedBlocks) + + beforeBlocksText = self.blocks2list(beforeBlocks) + selectedBlocksText = self.blocks2list(selectedBlocks, mode) + afterBlocksText = self.blocks2list(afterBlocks) + + combinedText = '\n'.join( + beforeBlocksText + selectedBlocksText + afterBlocksText) + + # make sure the line count stays the same + originalBlockCount = len(self.toPlainText().split('\n')) + combinedText = '\n'.join(combinedText.split('\n')[:originalBlockCount]) + + self.clear() + self.setPlainText(combinedText) + + if self.noSelection: + self.cursor.setPosition(self.lastChar) + + # check whether the the orignal selection was from top to bottom or vice versa + else: + if self.originalPosition == self.firstChar: + first = self.lastChar + last = self.firstChar + firstBlockSnap = QtGui.QTextCursor.EndOfBlock + lastBlockSnap = QtGui.QTextCursor.StartOfBlock + else: + first = self.firstChar + last = self.lastChar + firstBlockSnap = QtGui.QTextCursor.StartOfBlock + lastBlockSnap = QtGui.QTextCursor.EndOfBlock + + self.cursor.setPosition(first) + self.cursor.movePosition( + firstBlockSnap, QtGui.QTextCursor.MoveAnchor) + self.cursor.setPosition(last, QtGui.QTextCursor.KeepAnchor) + self.cursor.movePosition( + lastBlockSnap, QtGui.QTextCursor.KeepAnchor) + + self.setTextCursor(self.cursor) + self.verticalScrollBar().setValue(pre_scroll) + + def findBlocks(self, first=0, last=None, exclude=[]): + blocks = [] + if last == None: + last = self.document().characterCount() + for pos in range(first, last + 1): + block = self.document().findBlock(pos) + if block not in blocks and block not in exclude: + blocks.append(block) + return blocks + + def blocks2list(self, blocks, mode=None): + text = [] + for block in blocks: + blockText = block.text() + if mode == 'unindent': + if blockText.startswith(' ' * self.tabSpaces): + blockText = blockText[self.tabSpaces:] + self.lastChar -= self.tabSpaces + elif blockText.startswith('\t'): + blockText = blockText[1:] + self.lastChar -= 1 + + elif mode == 'indent': + blockText = ' ' * self.tabSpaces + blockText + self.lastChar += self.tabSpaces + + text.append(blockText) + + return text + + def highlightCurrentLine(self): + ''' + Highlight currently selected line + ''' + extraSelections = [] + + selection = QtWidgets.QTextEdit.ExtraSelection() + + lineColor = QtGui.QColor(62, 62, 62, 255) + + selection.format.setBackground(lineColor) + selection.format.setProperty( + QtGui.QTextFormat.FullWidthSelection, True) + selection.cursor = self.textCursor() + selection.cursor.clearSelection() + + extraSelections.append(selection) + + self.setExtraSelections(extraSelections) + self.scrollToCursor() + + def format(self, rgb, style=''): + ''' + Return a QtWidgets.QTextCharFormat with the given attributes. + ''' + color = QtGui.QColor(*rgb) + textFormat = QtGui.QTextCharFormat() + textFormat.setForeground(color) + + if 'bold' in style: + textFormat.setFontWeight(QtGui.QFont.Bold) + if 'italic' in style: + textFormat.setFontItalic(True) + if 'underline' in style: + textFormat.setUnderlineStyle(QtGui.QTextCharFormat.SingleUnderline) + + return textFormat + + +class KSLineNumberArea(QtWidgets.QWidget): + def __init__(self, scriptEditor): + super(KSLineNumberArea, self).__init__(scriptEditor) + + self.scriptEditor = scriptEditor + self.setStyleSheet("text-align: center;") + + def paintEvent(self, event): + self.scriptEditor.lineNumberAreaPaintEvent(event) + return + + +class KSScriptEditorHighlighter(QtGui.QSyntaxHighlighter): + ''' + This is also adapted from an original version by Wouter Gilsing. His comments: + + Modified, simplified version of some code found I found when researching: + wiki.python.org/moin/PyQt/Python%20syntax%20highlighting + They did an awesome job, so credits to them. I only needed to make some + modifications to make it fit my needs. + ''' + + def __init__(self, document, parent=None): + + super(KSScriptEditorHighlighter, self).__init__(document) + self.knobScripter = parent + self.script_editor = self.knobScripter.script_editor + self.selected_text = "" + self.selected_text_prev = "" + self.rules_sublime = "" + + self.styles = { + 'keyword': self.format([238, 117, 181], 'bold'), + 'string': self.format([242, 136, 135]), + 'comment': self.format([143, 221, 144]), + 'numbers': self.format([174, 129, 255]), + 'custom': self.format([255, 170, 0], 'italic'), + 'selected': self.format([255, 255, 255], 'bold underline'), + 'underline': self.format([240, 240, 240], 'underline'), + } + + self.keywords = [ + 'and', 'assert', 'break', 'class', 'continue', 'def', + 'del', 'elif', 'else', 'except', 'exec', 'finally', + 'for', 'from', 'global', 'if', 'import', 'in', + 'is', 'lambda', 'not', 'or', 'pass', 'print', + 'raise', 'return', 'try', 'while', 'yield', 'with', 'as' + ] + + self.operatorKeywords = [ + '=', '==', '!=', '<', '<=', '>', '>=', + '\+', '-', '\*', '/', '//', '\%', '\*\*', + '\+=', '-=', '\*=', '/=', '\%=', + '\^', '\|', '\&', '\~', '>>', '<<' + ] + + self.variableKeywords = ['int', 'str', + 'float', 'bool', 'list', 'dict', 'set'] + + self.numbers = ['True', 'False', 'None'] + self.loadAltStyles() + + self.tri_single = (QtCore.QRegExp("'''"), 1, self.styles['comment']) + self.tri_double = (QtCore.QRegExp('"""'), 2, self.styles['comment']) + + # rules + rules = [] + + rules += [(r'\b%s\b' % i, 0, self.styles['keyword']) + for i in self.keywords] + rules += [(i, 0, self.styles['keyword']) + for i in self.operatorKeywords] + rules += [(r'\b%s\b' % i, 0, self.styles['numbers']) + for i in self.numbers] + + rules += [ + + # integers + (r'\b[0-9]+\b', 0, self.styles['numbers']), + # Double-quoted string, possibly containing escape sequences + (r'"[^"\\]*(\\.[^"\\]*)*"', 0, self.styles['string']), + # Single-quoted string, possibly containing escape sequences + (r"'[^'\\]*(\\.[^'\\]*)*'", 0, self.styles['string']), + # From '#' until a newline + (r'#[^\n]*', 0, self.styles['comment']), + ] + + # Build a QRegExp for each pattern + self.rules_nuke = [(QtCore.QRegExp(pat), index, fmt) + for (pat, index, fmt) in rules] + self.rules = self.rules_nuke + + def loadAltStyles(self): + ''' Loads other color styles apart from Nuke's default. ''' + self.styles_sublime = { + 'base': self.format([255, 255, 255]), + 'keyword': self.format([237, 36, 110]), + 'string': self.format([237, 229, 122]), + 'comment': self.format([125, 125, 125]), + 'numbers': self.format([165, 120, 255]), + 'functions': self.format([184, 237, 54]), + 'blue': self.format([130, 226, 255], 'italic'), + 'arguments': self.format([255, 170, 10], 'italic'), + 'custom': self.format([200, 200, 200], 'italic'), + 'underline': self.format([240, 240, 240], 'underline'), + 'selected': self.format([255, 255, 255], 'bold underline'), + } + + self.keywords_sublime = [ + 'and', 'assert', 'break', 'continue', + 'del', 'elif', 'else', 'except', 'exec', 'finally', + 'for', 'from', 'global', 'if', 'import', 'in', + 'is', 'lambda', 'not', 'or', 'pass', 'print', + 'raise', 'return', 'try', 'while', 'yield', 'with', 'as' + ] + self.operatorKeywords_sublime = [ + '=', '==', '!=', '<', '<=', '>', '>=', + '\+', '-', '\*', '/', '//', '\%', '\*\*', + '\+=', '-=', '\*=', '/=', '\%=', + '\^', '\|', '\&', '\~', '>>', '<<' + ] + + self.baseKeywords_sublime = [ + ',', + ] + + self.customKeywords_sublime = [ + 'nuke', + ] + + self.blueKeywords_sublime = [ + 'def', 'class', 'int', 'str', 'float', 'bool', 'list', 'dict', 'set' + ] + + self.argKeywords_sublime = [ + 'self', + ] + + self.tri_single_sublime = (QtCore.QRegExp( + "'''"), 1, self.styles_sublime['comment']) + self.tri_double_sublime = (QtCore.QRegExp( + '"""'), 2, self.styles_sublime['comment']) + self.numbers_sublime = ['True', 'False', 'None'] + + # rules + + rules = [] + # First turn everything inside parentheses orange + rules += [(r"def [\w]+[\s]*\((.*)\)", 1, + self.styles_sublime['arguments'])] + # Now restore unwanted stuff... + rules += [(i, 0, self.styles_sublime['base']) + for i in self.baseKeywords_sublime] + rules += [(r"[^\(\w),.][\s]*[\w]+", 0, self.styles_sublime['base'])] + + # Everything else + rules += [(r'\b%s\b' % i, 0, self.styles_sublime['keyword']) + for i in self.keywords_sublime] + rules += [(i, 0, self.styles_sublime['keyword']) + for i in self.operatorKeywords_sublime] + rules += [(i, 0, self.styles_sublime['custom']) + for i in self.customKeywords_sublime] + rules += [(r'\b%s\b' % i, 0, self.styles_sublime['blue']) + for i in self.blueKeywords_sublime] + rules += [(i, 0, self.styles_sublime['arguments']) + for i in self.argKeywords_sublime] + rules += [(r'\b%s\b' % i, 0, self.styles_sublime['numbers']) + for i in self.numbers_sublime] + + rules += [ + + # integers + (r'\b[0-9]+\b', 0, self.styles_sublime['numbers']), + # Double-quoted string, possibly containing escape sequences + (r'"[^"\\]*(\\.[^"\\]*)*"', 0, self.styles_sublime['string']), + # Single-quoted string, possibly containing escape sequences + (r"'[^'\\]*(\\.[^'\\]*)*'", 0, self.styles_sublime['string']), + # From '#' until a newline + (r'#[^\n]*', 0, self.styles_sublime['comment']), + # Function definitions + (r"def[\s]+([\w\.]+)", 1, self.styles_sublime['functions']), + # Class definitions + (r"class[\s]+([\w\.]+)", 1, self.styles_sublime['functions']), + # Class argument (which is also a class so must be green) + (r"class[\s]+[\w\.]+[\s]*\((.*)\)", + 1, self.styles_sublime['functions']), + # Function arguments also pick their style... + (r"def[\s]+[\w]+[\s]*\(([\w]+)", 1, + self.styles_sublime['arguments']), + ] + + # Build a QRegExp for each pattern + self.rules_sublime = [(QtCore.QRegExp(pat), index, fmt) + for (pat, index, fmt) in rules] + + def format(self, rgb, style=''): + ''' + Return a QtWidgets.QTextCharFormat with the given attributes. + ''' + + color = QtGui.QColor(*rgb) + textFormat = QtGui.QTextCharFormat() + textFormat.setForeground(color) + + if 'bold' in style: + textFormat.setFontWeight(QtGui.QFont.Bold) + if 'italic' in style: + textFormat.setFontItalic(True) + if 'underline' in style: + textFormat.setUnderlineStyle(QtGui.QTextCharFormat.SingleUnderline) + + return textFormat + + def highlightBlock(self, text): + ''' + Apply syntax highlighting to the given block of text. + ''' + # Do other syntax formatting + + if self.knobScripter.color_scheme: + self.color_scheme = self.knobScripter.color_scheme + else: + self.color_scheme = "nuke" + + if self.color_scheme == "nuke": + self.rules = self.rules_nuke + elif self.color_scheme == "sublime": + self.rules = self.rules_sublime + + for expression, nth, format in self.rules: + index = expression.indexIn(text, 0) + + while index >= 0: + # We actually want the index of the nth match + index = expression.pos(nth) + length = len(expression.cap(nth)) + self.setFormat(index, length, format) + index = expression.indexIn(text, index + length) + + self.setCurrentBlockState(0) + + # Multi-line strings etc. based on selected scheme + if self.color_scheme == "nuke": + in_multiline = self.match_multiline(text, *self.tri_single) + if not in_multiline: + in_multiline = self.match_multiline(text, *self.tri_double) + elif self.color_scheme == "sublime": + in_multiline = self.match_multiline(text, *self.tri_single_sublime) + if not in_multiline: + in_multiline = self.match_multiline( + text, *self.tri_double_sublime) + + # TODO if there's a selection, highlight same occurrences in the full document. If no selection but something highlighted, unhighlight full document. (do it thru regex or sth) + + def match_multiline(self, text, delimiter, in_state, style): + ''' + Check whether highlighting requires multiple lines. + ''' + # If inside triple-single quotes, start at 0 + if self.previousBlockState() == in_state: + start = 0 + add = 0 + # Otherwise, look for the delimiter on this line + else: + start = delimiter.indexIn(text) + # Move past this match + add = delimiter.matchedLength() + + # As long as there's a delimiter match on this line... + while start >= 0: + # Look for the ending delimiter + end = delimiter.indexIn(text, start + add) + # Ending delimiter on this line? + if end >= add: + length = end - start + add + delimiter.matchedLength() + self.setCurrentBlockState(0) + # No; multi-line string + else: + self.setCurrentBlockState(in_state) + length = len(text) - start + add + # Apply formatting + self.setFormat(start, length, style) + # Look for the next match + start = delimiter.indexIn(text, start + length) + + # Return True if still inside a multi-line string, False otherwise + if self.currentBlockState() == in_state: + return True + else: + return False + +# -------------------------------------------------------------------------------------- +# Script Output Widget +# The output logger works the same way as Nuke's python script editor output window +# -------------------------------------------------------------------------------------- + + +class ScriptOutputWidget(QtWidgets.QTextEdit): + def __init__(self, parent=None): + super(ScriptOutputWidget, self).__init__(parent) + self.knobScripter = parent + self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, + QtWidgets.QSizePolicy.Expanding) + self.setMinimumHeight(20) + + def keyPressEvent(self, event): + ctrl = ((event.modifiers() and (Qt.ControlModifier)) != 0) + alt = ((event.modifiers() and (Qt.AltModifier)) != 0) + shift = ((event.modifiers() and (Qt.ShiftModifier)) != 0) + key = event.key() + if type(event) == QtGui.QKeyEvent: + # print event.key() + if key in [32]: # Space + return KnobScripter.keyPressEvent(self.knobScripter, event) + elif key in [Qt.Key_Backspace, Qt.Key_Delete]: + self.knobScripter.clearConsole() + return QtWidgets.QTextEdit.keyPressEvent(self, event) + + # def mousePressEvent(self, QMouseEvent): + # if QMouseEvent.button() == Qt.RightButton: + # self.knobScripter.clearConsole() + # QtWidgets.QTextEdit.mousePressEvent(self, QMouseEvent) + +# --------------------------------------------------------------------- +# Modified KnobScripterTextEdit to include snippets etc. +# --------------------------------------------------------------------- + + +class KnobScripterTextEditMain(KnobScripterTextEdit): + def __init__(self, knobScripter, output=None, parent=None): + super(KnobScripterTextEditMain, self).__init__(knobScripter) + self.knobScripter = knobScripter + self.script_output = output + self.nukeCompleter = None + self.currentNukeCompletion = None + + ######## + # FROM NUKE's SCRIPT EDITOR START + ######## + self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, + QtWidgets.QSizePolicy.Expanding) + + # Setup completer + self.nukeCompleter = QtWidgets.QCompleter(self) + self.nukeCompleter.setWidget(self) + self.nukeCompleter.setCompletionMode( + QtWidgets.QCompleter.UnfilteredPopupCompletion) + self.nukeCompleter.setCaseSensitivity(Qt.CaseSensitive) + try: + self.nukeCompleter.setModel(QtGui.QStringListModel()) + except: + self.nukeCompleter.setModel(QtCore.QStringListModel()) + + self.nukeCompleter.activated.connect(self.insertNukeCompletion) + self.nukeCompleter.highlighted.connect(self.completerHighlightChanged) + ######## + # FROM NUKE's SCRIPT EDITOR END + ######## + + def findLongestEndingMatch(self, text, dic): + ''' + If the text ends with a key in the dictionary, it returns the key and value. + If there are several matches, returns the longest one. + False if no matches. + ''' + longest = 0 # len of longest match + match_key = None + match_snippet = "" + for key, val in dic.items(): + #match = re.search(r"[\s\.({\[,;=+-]"+key+r"(?:[\s)\]\"]+|$)",text) + match = re.search(r"[\s\.({\[,;=+-]" + key + r"$", text) + if match or text == key: + if len(key) > longest: + longest = len(key) + match_key = key + match_snippet = val + if match_key is None: + return False + return match_key, match_snippet + + def placeholderToEnd(self, text, placeholder): + '''Returns distance (int) from the first ocurrence of the placeholder, to the end of the string with placeholders removed''' + search = re.search(placeholder, text) + if not search: + return -1 + from_start = search.start() + total = len(re.sub(placeholder, "", text)) + to_end = total - from_start + return to_end + + def addSnippetText(self, snippet_text): + ''' Adds the selected text as a snippet (taking care of $$, $name$ etc) to the script editor ''' + cursor_placeholder_find = r"(? 1: + cursor_len = positions[1] - positions[0] - 2 + + text = re.sub(cursor_placeholder_find, "", text) + self.cursor.insertText(text) + if placeholder_to_end >= 0: + for i in range(placeholder_to_end): + self.cursor.movePosition(QtGui.QTextCursor.PreviousCharacter) + for i in range(cursor_len): + self.cursor.movePosition( + QtGui.QTextCursor.NextCharacter, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(self.cursor) + + def keyPressEvent(self, event): + + ctrl = bool(event.modifiers() & Qt.ControlModifier) + alt = bool(event.modifiers() & Qt.AltModifier) + shift = bool(event.modifiers() & Qt.ShiftModifier) + key = event.key() + + # ADAPTED FROM NUKE's SCRIPT EDITOR: + # Get completer state + self.nukeCompleterShowing = self.nukeCompleter.popup().isVisible() + + # BEFORE ANYTHING ELSE, IF SPECIAL MODIFIERS SIMPLY IGNORE THE REST + if not self.nukeCompleterShowing and (ctrl or shift or alt): + # Bypassed! + if key not in [Qt.Key_Return, Qt.Key_Enter, Qt.Key_Tab]: + KnobScripterTextEdit.keyPressEvent(self, event) + return + + # If the completer is showing + if self.nukeCompleterShowing: + tc = self.textCursor() + # If we're hitting enter, do completion + if key in [Qt.Key_Return, Qt.Key_Enter, Qt.Key_Tab]: + if not self.currentNukeCompletion: + self.nukeCompleter.setCurrentRow(0) + self.currentNukeCompletion = self.nukeCompleter.currentCompletion() + # print str(self.nukeCompleter.completionModel[0]) + self.insertNukeCompletion(self.currentNukeCompletion) + self.nukeCompleter.popup().hide() + self.nukeCompleterShowing = False + # If you're hitting right or escape, hide the popup + elif key == Qt.Key_Right or key == Qt.Key_Escape: + self.nukeCompleter.popup().hide() + self.nukeCompleterShowing = False + # If you hit tab, escape or ctrl-space, hide the completer + elif key == Qt.Key_Tab or key == Qt.Key_Escape or (ctrl and key == Qt.Key_Space): + self.currentNukeCompletion = "" + self.nukeCompleter.popup().hide() + self.nukeCompleterShowing = False + # If none of the above, update the completion model + else: + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + # Edit completion model + colNum = tc.columnNumber() + posNum = tc.position() + inputText = self.toPlainText() + inputTextSplit = inputText.splitlines() + runningLength = 0 + currentLine = None + for line in inputTextSplit: + length = len(line) + runningLength += length + if runningLength >= posNum: + currentLine = line + break + runningLength += 1 + if currentLine: + completionPart = currentLine.split(" ")[-1] + if "(" in completionPart: + completionPart = completionPart.split("(")[-1] + self.completeNukePartUnderCursor(completionPart) + return + + if type(event) == QtGui.QKeyEvent: + if key == Qt.Key_Escape: # Close the knobscripter... + self.knobScripter.close() + elif not ctrl and not alt and not shift and event.key() == Qt.Key_Tab: + self.placeholder = "$$" + # 1. Set the cursor + self.cursor = self.textCursor() + + # 2. Save text before and after + cpos = self.cursor.position() + text_before_cursor = self.toPlainText()[:cpos] + line_before_cursor = text_before_cursor.split('\n')[-1] + text_after_cursor = self.toPlainText()[cpos:] + + # 3. Check coincidences in snippets dicts + try: # Meaning snippet found + match_key, match_snippet = self.findLongestEndingMatch( + line_before_cursor, self.knobScripter.snippets) + for i in range(len(match_key)): + self.cursor.deletePreviousChar() + # This function takes care of adding the appropriate snippet and moving the cursor... + self.addSnippetText(match_snippet) + except: # Meaning snippet not found... + # ADAPTED FROM NUKE's SCRIPT EDITOR: + tc = self.textCursor() + allCode = self.toPlainText() + colNum = tc.columnNumber() + posNum = tc.position() + + # ...and if there's text in the editor + if len(allCode.split()) > 0: + # There is text in the editor + currentLine = tc.block().text() + + # If you're not at the end of the line just add a tab + if colNum < len(currentLine): + # If there isn't a ')' directly to the right of the cursor add a tab + if currentLine[colNum:colNum + 1] != ')': + KnobScripterTextEdit.keyPressEvent(self, event) + return + # Else show the completer + else: + completionPart = currentLine[:colNum].split( + " ")[-1] + if "(" in completionPart: + completionPart = completionPart.split( + "(")[-1] + + self.completeNukePartUnderCursor( + completionPart) + + return + + # If you are at the end of the line, + else: + # If there's nothing to the right of you add a tab + if currentLine[colNum - 1:] == "" or currentLine.endswith(" "): + KnobScripterTextEdit.keyPressEvent(self, event) + return + # Else update completionPart and show the completer + completionPart = currentLine.split(" ")[-1] + if "(" in completionPart: + completionPart = completionPart.split("(")[-1] + + self.completeNukePartUnderCursor(completionPart) + return + + KnobScripterTextEdit.keyPressEvent(self, event) + elif event.key() in [Qt.Key_Enter, Qt.Key_Return]: + modifiers = QtWidgets.QApplication.keyboardModifiers() + if modifiers == QtCore.Qt.ControlModifier: + self.runScript() + else: + KnobScripterTextEdit.keyPressEvent(self, event) + else: + KnobScripterTextEdit.keyPressEvent(self, event) + + def getPyObjects(self, text): + ''' Returns a list containing all the functions, classes and variables found within the selected python text (code) ''' + matches = [] + # 1: Remove text inside triple quotes (leaving the quotes) + text_clean = '""'.join(text.split('"""')[::2]) + text_clean = '""'.join(text_clean.split("'''")[::2]) + + # 2: Remove text inside of quotes (leaving the quotes) except if \" + lines = text_clean.split("\n") + text_clean = "" + for line in lines: + line_clean = '""'.join(line.split('"')[::2]) + line_clean = '""'.join(line_clean.split("'")[::2]) + line_clean = line_clean.split("#")[0] + text_clean += line_clean + "\n" + + # 3. Split into segments (lines plus ";") + segments = re.findall(r"[^\n;]+", text_clean) + + # 4. Go case by case. + for s in segments: + # Declared vars + matches += re.findall(r"([\w\.]+)(?=[,\s\w]*=[^=]+$)", s) + # Def functions and arguments + function = re.findall(r"[\s]*def[\s]+([\w\.]+)[\s]*\([\s]*", s) + if len(function): + matches += function + args = re.split(r"[\s]*def[\s]+([\w\.]+)[\s]*\([\s]*", s) + if len(args) > 1: + args = args[-1] + matches += re.findall( + r"(?adrianpueyo.com, 2016-2019') + kspSignature.setOpenExternalLinks(True) + kspSignature.setStyleSheet('''color:#555;font-size:9px;''') + kspSignature.setAlignment(QtCore.Qt.AlignRight) + + fontLabel = QtWidgets.QLabel("Font:") + self.fontBox = QtWidgets.QFontComboBox() + self.fontBox.setCurrentFont(QtGui.QFont(self.font)) + self.fontBox.currentFontChanged.connect(self.fontChanged) + + fontSizeLabel = QtWidgets.QLabel("Font size:") + self.fontSizeBox = QtWidgets.QSpinBox() + self.fontSizeBox.setValue(self.oldFontSize) + self.fontSizeBox.setMinimum(6) + self.fontSizeBox.setMaximum(100) + self.fontSizeBox.valueChanged.connect(self.fontSizeChanged) + + windowWLabel = QtWidgets.QLabel("Width (px):") + windowWLabel.setToolTip("Default window width in pixels") + self.windowWBox = QtWidgets.QSpinBox() + self.windowWBox.setValue(self.knobScripter.windowDefaultSize[0]) + self.windowWBox.setMinimum(200) + self.windowWBox.setMaximum(4000) + self.windowWBox.setToolTip("Default window width in pixels") + + windowHLabel = QtWidgets.QLabel("Height (px):") + windowHLabel.setToolTip("Default window height in pixels") + self.windowHBox = QtWidgets.QSpinBox() + self.windowHBox.setValue(self.knobScripter.windowDefaultSize[1]) + self.windowHBox.setMinimum(100) + self.windowHBox.setMaximum(2000) + self.windowHBox.setToolTip("Default window height in pixels") + + # TODO: "Grab current dimensions" button + + tabSpaceLabel = QtWidgets.QLabel("Tab spaces:") + tabSpaceLabel.setToolTip("Number of spaces to add with the tab key.") + self.tabSpace2 = QtWidgets.QRadioButton("2") + self.tabSpace4 = QtWidgets.QRadioButton("4") + tabSpaceButtonGroup = QtWidgets.QButtonGroup(self) + tabSpaceButtonGroup.addButton(self.tabSpace2) + tabSpaceButtonGroup.addButton(self.tabSpace4) + self.tabSpace2.setChecked(self.knobScripter.tabSpaces == 2) + self.tabSpace4.setChecked(self.knobScripter.tabSpaces == 4) + + pinDefaultLabel = QtWidgets.QLabel("Always on top:") + pinDefaultLabel.setToolTip("Default mode of the PIN toggle.") + self.pinDefaultOn = QtWidgets.QRadioButton("On") + self.pinDefaultOff = QtWidgets.QRadioButton("Off") + pinDefaultButtonGroup = QtWidgets.QButtonGroup(self) + pinDefaultButtonGroup.addButton(self.pinDefaultOn) + pinDefaultButtonGroup.addButton(self.pinDefaultOff) + self.pinDefaultOn.setChecked(self.knobScripter.pinned == True) + self.pinDefaultOff.setChecked(self.knobScripter.pinned == False) + self.pinDefaultOn.clicked.connect(lambda: self.knobScripter.pin(True)) + self.pinDefaultOff.clicked.connect( + lambda: self.knobScripter.pin(False)) + + colorSchemeLabel = QtWidgets.QLabel("Color scheme:") + colorSchemeLabel.setToolTip("Syntax highlighting text style.") + self.colorSchemeSublime = QtWidgets.QRadioButton("subl") + self.colorSchemeNuke = QtWidgets.QRadioButton("nuke") + colorSchemeButtonGroup = QtWidgets.QButtonGroup(self) + colorSchemeButtonGroup.addButton(self.colorSchemeSublime) + colorSchemeButtonGroup.addButton(self.colorSchemeNuke) + colorSchemeButtonGroup.buttonClicked.connect(self.colorSchemeChanged) + self.colorSchemeSublime.setChecked( + self.knobScripter.color_scheme == "sublime") + self.colorSchemeNuke.setChecked( + self.knobScripter.color_scheme == "nuke") + + showLabelsLabel = QtWidgets.QLabel("Show labels:") + showLabelsLabel.setToolTip( + "Display knob labels on the knob dropdown\nOtherwise, shows the internal name only.") + self.showLabelsOn = QtWidgets.QRadioButton("On") + self.showLabelsOff = QtWidgets.QRadioButton("Off") + showLabelsButtonGroup = QtWidgets.QButtonGroup(self) + showLabelsButtonGroup.addButton(self.showLabelsOn) + showLabelsButtonGroup.addButton(self.showLabelsOff) + self.showLabelsOn.setChecked(self.knobScripter.pinned == True) + self.showLabelsOff.setChecked(self.knobScripter.pinned == False) + self.showLabelsOn.clicked.connect(lambda: self.knobScripter.pin(True)) + self.showLabelsOff.clicked.connect( + lambda: self.knobScripter.pin(False)) + + self.buttonBox = QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) + self.buttonBox.accepted.connect(self.savePrefs) + self.buttonBox.rejected.connect(self.cancelPrefs) + + # Loaded custom values + self.ksPrefs = self.knobScripter.loadPrefs() + if self.ksPrefs != []: + try: + self.fontSizeBox.setValue(self.ksPrefs['font_size']) + self.windowWBox.setValue(self.ksPrefs['window_default_w']) + self.windowHBox.setValue(self.ksPrefs['window_default_h']) + self.tabSpace2.setChecked(self.ksPrefs['tab_spaces'] == 2) + self.tabSpace4.setChecked(self.ksPrefs['tab_spaces'] == 4) + self.pinDefaultOn.setChecked(self.ksPrefs['pin_default'] == 1) + self.pinDefaultOff.setChecked(self.ksPrefs['pin_default'] == 0) + self.showLabelsOn.setChecked(self.ksPrefs['show_labels'] == 1) + self.showLabelsOff.setChecked(self.ksPrefs['show_labels'] == 0) + self.colorSchemeSublime.setChecked( + self.ksPrefs['color_scheme'] == "sublime") + self.colorSchemeNuke.setChecked( + self.ksPrefs['color_scheme'] == "nuke") + except: + pass + + # Layouts + font_layout = QtWidgets.QHBoxLayout() + font_layout.addWidget(fontLabel) + font_layout.addWidget(self.fontBox) + + fontSize_layout = QtWidgets.QHBoxLayout() + fontSize_layout.addWidget(fontSizeLabel) + fontSize_layout.addWidget(self.fontSizeBox) + + windowW_layout = QtWidgets.QHBoxLayout() + windowW_layout.addWidget(windowWLabel) + windowW_layout.addWidget(self.windowWBox) + + windowH_layout = QtWidgets.QHBoxLayout() + windowH_layout.addWidget(windowHLabel) + windowH_layout.addWidget(self.windowHBox) + + tabSpacesButtons_layout = QtWidgets.QHBoxLayout() + tabSpacesButtons_layout.addWidget(self.tabSpace2) + tabSpacesButtons_layout.addWidget(self.tabSpace4) + tabSpaces_layout = QtWidgets.QHBoxLayout() + tabSpaces_layout.addWidget(tabSpaceLabel) + tabSpaces_layout.addLayout(tabSpacesButtons_layout) + + pinDefaultButtons_layout = QtWidgets.QHBoxLayout() + pinDefaultButtons_layout.addWidget(self.pinDefaultOn) + pinDefaultButtons_layout.addWidget(self.pinDefaultOff) + pinDefault_layout = QtWidgets.QHBoxLayout() + pinDefault_layout.addWidget(pinDefaultLabel) + pinDefault_layout.addLayout(pinDefaultButtons_layout) + + showLabelsButtons_layout = QtWidgets.QHBoxLayout() + showLabelsButtons_layout.addWidget(self.showLabelsOn) + showLabelsButtons_layout.addWidget(self.showLabelsOff) + showLabels_layout = QtWidgets.QHBoxLayout() + showLabels_layout.addWidget(showLabelsLabel) + showLabels_layout.addLayout(showLabelsButtons_layout) + + colorSchemeButtons_layout = QtWidgets.QHBoxLayout() + colorSchemeButtons_layout.addWidget(self.colorSchemeSublime) + colorSchemeButtons_layout.addWidget(self.colorSchemeNuke) + colorScheme_layout = QtWidgets.QHBoxLayout() + colorScheme_layout.addWidget(colorSchemeLabel) + colorScheme_layout.addLayout(colorSchemeButtons_layout) + + self.master_layout = QtWidgets.QVBoxLayout() + self.master_layout.addWidget(kspTitle) + self.master_layout.addWidget(kspSignature) + self.master_layout.addWidget(kspLine) + self.master_layout.addLayout(font_layout) + self.master_layout.addLayout(fontSize_layout) + self.master_layout.addLayout(windowW_layout) + self.master_layout.addLayout(windowH_layout) + self.master_layout.addLayout(tabSpaces_layout) + self.master_layout.addLayout(pinDefault_layout) + self.master_layout.addLayout(showLabels_layout) + self.master_layout.addLayout(colorScheme_layout) + self.master_layout.addWidget(self.buttonBox) + self.setLayout(self.master_layout) + self.setFixedSize(self.minimumSize()) + + def savePrefs(self): + self.font = self.fontBox.currentFont().family() + ks_prefs = { + 'font_size': self.fontSizeBox.value(), + 'window_default_w': self.windowWBox.value(), + 'window_default_h': self.windowHBox.value(), + 'tab_spaces': self.tabSpaceValue(), + 'pin_default': self.pinDefaultValue(), + 'show_labels': self.showLabelsValue(), + 'font': self.font, + 'color_scheme': self.colorSchemeValue(), + } + self.knobScripter.script_editor_font.setFamily(self.font) + self.knobScripter.script_editor.setFont( + self.knobScripter.script_editor_font) + self.knobScripter.font = self.font + self.knobScripter.color_scheme = self.colorSchemeValue() + self.knobScripter.tabSpaces = self.tabSpaceValue() + self.knobScripter.script_editor.tabSpaces = self.tabSpaceValue() + with open(self.prefs_txt, "w") as f: + prefs = json.dump(ks_prefs, f, sort_keys=True, indent=4) + self.accept() + self.knobScripter.highlighter.rehighlight() + self.knobScripter.show_labels = self.showLabelsValue() + if self.knobScripter.nodeMode: + self.knobScripter.refreshClicked() + return prefs + + def cancelPrefs(self): + self.knobScripter.script_editor_font.setPointSize(self.oldFontSize) + self.knobScripter.script_editor.setFont( + self.knobScripter.script_editor_font) + self.knobScripter.color_scheme = self.oldScheme + self.knobScripter.highlighter.rehighlight() + self.reject() + + def fontSizeChanged(self): + self.knobScripter.script_editor_font.setPointSize( + self.fontSizeBox.value()) + self.knobScripter.script_editor.setFont( + self.knobScripter.script_editor_font) + return + + def fontChanged(self): + self.font = self.fontBox.currentFont().family() + self.knobScripter.script_editor_font.setFamily(self.font) + self.knobScripter.script_editor.setFont( + self.knobScripter.script_editor_font) + return + + def colorSchemeChanged(self): + self.knobScripter.color_scheme = self.colorSchemeValue() + self.knobScripter.highlighter.rehighlight() + return + + def tabSpaceValue(self): + return 2 if self.tabSpace2.isChecked() else 4 + + def pinDefaultValue(self): + return 1 if self.pinDefaultOn.isChecked() else 0 + + def showLabelsValue(self): + return 1 if self.showLabelsOn.isChecked() else 0 + + def colorSchemeValue(self): + return "nuke" if self.colorSchemeNuke.isChecked() else "sublime" + + def closeEvent(self, event): + self.cancelPrefs() + self.close() + + +def updateContext(): + ''' + Get the current selection of nodes with their appropiate context + Doing this outside the KnobScripter -> forces context update inside groups when needed + ''' + global knobScripterSelectedNodes + knobScripterSelectedNodes = nuke.selectedNodes() + return + +# -------------------------------- +# FindReplace +# -------------------------------- + + +class FindReplaceWidget(QtWidgets.QWidget): + ''' SearchReplace Widget for the knobscripter. FindReplaceWidget(editor = QPlainTextEdit) ''' + + def __init__(self, parent): + super(FindReplaceWidget, self).__init__(parent) + + self.editor = parent.script_editor + + self.initUI() + + def initUI(self): + + # -------------- + # Find Row + # -------------- + + # Widgets + self.find_label = QtWidgets.QLabel("Find:") + # self.find_label.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed) + self.find_label.setFixedWidth(50) + self.find_label.setAlignment( + QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) + self.find_lineEdit = QtWidgets.QLineEdit() + self.find_next_button = QtWidgets.QPushButton("Next") + self.find_next_button.clicked.connect(self.find) + self.find_prev_button = QtWidgets.QPushButton("Previous") + self.find_prev_button.clicked.connect(self.findBack) + self.find_lineEdit.returnPressed.connect(self.find_next_button.click) + + # Layout + self.find_layout = QtWidgets.QHBoxLayout() + self.find_layout.addWidget(self.find_label) + self.find_layout.addWidget(self.find_lineEdit, stretch=1) + self.find_layout.addWidget(self.find_next_button) + self.find_layout.addWidget(self.find_prev_button) + + # -------------- + # Replace Row + # -------------- + + # Widgets + self.replace_label = QtWidgets.QLabel("Replace:") + # self.replace_label.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed) + self.replace_label.setFixedWidth(50) + self.replace_label.setAlignment( + QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) + self.replace_lineEdit = QtWidgets.QLineEdit() + self.replace_button = QtWidgets.QPushButton("Replace") + self.replace_button.clicked.connect(self.replace) + self.replace_all_button = QtWidgets.QPushButton("Replace All") + self.replace_all_button.clicked.connect( + lambda: self.replace(rep_all=True)) + self.replace_lineEdit.returnPressed.connect(self.replace_button.click) + + # Layout + self.replace_layout = QtWidgets.QHBoxLayout() + self.replace_layout.addWidget(self.replace_label) + self.replace_layout.addWidget(self.replace_lineEdit, stretch=1) + self.replace_layout.addWidget(self.replace_button) + self.replace_layout.addWidget(self.replace_all_button) + + # Info text + self.info_text = QtWidgets.QLabel("") + self.info_text.setVisible(False) + self.info_text.mousePressEvent = lambda x: self.info_text.setVisible( + False) + #f = self.info_text.font() + # f.setItalic(True) + # self.info_text.setFont(f) + # self.info_text.clicked.connect(lambda:self.info_text.setVisible(False)) + + # Divider line + line = QtWidgets.QFrame() + line.setFrameShape(QtWidgets.QFrame.HLine) + line.setFrameShadow(QtWidgets.QFrame.Sunken) + line.setLineWidth(0) + line.setMidLineWidth(1) + line.setFrameShadow(QtWidgets.QFrame.Sunken) + + # -------------- + # Main Layout + # -------------- + + self.layout = QtWidgets.QVBoxLayout() + self.layout.addSpacing(4) + self.layout.addWidget(self.info_text) + self.layout.addLayout(self.find_layout) + self.layout.addLayout(self.replace_layout) + self.layout.setSpacing(4) + try: # >n11 + self.layout.setMargin(2) + except: # 0: # If not found but there are matches, start over + cursor.movePosition(QtGui.QTextCursor.Start) + self.editor.setTextCursor(cursor) + self.editor.find(find_str, flags) + else: + cursor.insertText(rep_str) + self.editor.find( + rep_str, flags | QtGui.QTextDocument.FindBackward) + + cursor.endEditBlock() + self.replace_lineEdit.setFocus() + return + + +# -------------------------------- +# Snippets +# -------------------------------- +class SnippetsPanel(QtWidgets.QDialog): + def __init__(self, parent): + super(SnippetsPanel, self).__init__(parent) + + self.knobScripter = parent + + self.setWindowFlags(self.windowFlags() | + QtCore.Qt.WindowStaysOnTopHint) + self.setWindowTitle("Snippet editor") + + self.snippets_txt_path = self.knobScripter.snippets_txt_path + self.snippets_dict = self.loadSnippetsDict(path=self.snippets_txt_path) + #self.snippets_dict = snippets_dic + + # self.saveSnippets(snippets_dic) + + self.initUI() + self.resize(500, 300) + + def initUI(self): + self.layout = QtWidgets.QVBoxLayout() + + # First Area (Titles) + title_layout = QtWidgets.QHBoxLayout() + shortcuts_label = QtWidgets.QLabel("Shortcut") + code_label = QtWidgets.QLabel("Code snippet") + title_layout.addWidget(shortcuts_label, stretch=1) + title_layout.addWidget(code_label, stretch=2) + self.layout.addLayout(title_layout) + + # Main Scroll area + self.scroll_content = QtWidgets.QWidget() + self.scroll_layout = QtWidgets.QVBoxLayout() + + self.buildSnippetWidgets() + + self.scroll_content.setLayout(self.scroll_layout) + + # Scroll Area Properties + self.scroll = QtWidgets.QScrollArea() + self.scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn) + self.scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) + self.scroll.setWidgetResizable(True) + self.scroll.setWidget(self.scroll_content) + + self.layout.addWidget(self.scroll) + + # File knob test + #self.filePath_lineEdit = SnippetFilePath(self) + # self.filePath_lineEdit + # self.layout.addWidget(self.filePath_lineEdit) + + # Lower buttons + self.bottom_layout = QtWidgets.QHBoxLayout() + + self.add_btn = QtWidgets.QPushButton("Add snippet") + self.add_btn.setToolTip("Create empty fields for an extra snippet.") + self.add_btn.clicked.connect(self.addSnippet) + self.bottom_layout.addWidget(self.add_btn) + + self.addPath_btn = QtWidgets.QPushButton("Add custom path") + self.addPath_btn.setToolTip( + "Add a custom path to an external snippets .txt file.") + self.addPath_btn.clicked.connect(self.addCustomPath) + self.bottom_layout.addWidget(self.addPath_btn) + + self.bottom_layout.addStretch() + + self.save_btn = QtWidgets.QPushButton('OK') + self.save_btn.setToolTip( + "Save the snippets into a json file and close the panel.") + self.save_btn.clicked.connect(self.okPressed) + self.bottom_layout.addWidget(self.save_btn) + + self.cancel_btn = QtWidgets.QPushButton("Cancel") + self.cancel_btn.setToolTip("Cancel any new snippets or modifications.") + self.cancel_btn.clicked.connect(self.close) + self.bottom_layout.addWidget(self.cancel_btn) + + self.apply_btn = QtWidgets.QPushButton('Apply') + self.apply_btn.setToolTip("Save the snippets into a json file.") + self.apply_btn.setShortcut('Ctrl+S') + self.apply_btn.clicked.connect(self.applySnippets) + self.bottom_layout.addWidget(self.apply_btn) + + self.help_btn = QtWidgets.QPushButton('Help') + self.help_btn.setShortcut('F1') + self.help_btn.clicked.connect(self.showHelp) + self.bottom_layout.addWidget(self.help_btn) + + self.layout.addLayout(self.bottom_layout) + + self.setLayout(self.layout) + + def reload(self): + ''' + Clears everything without saving and redoes the widgets etc. + Only to be called if the panel isn't shown meaning it's closed. + ''' + for i in reversed(range(self.scroll_layout.count())): + self.scroll_layout.itemAt(i).widget().deleteLater() + + self.snippets_dict = self.loadSnippetsDict(path=self.snippets_txt_path) + + self.buildSnippetWidgets() + + def buildSnippetWidgets(self): + for i, (key, val) in enumerate(self.snippets_dict.items()): + if re.match(r"\[custom-path-[0-9]+\]$", key): + file_edit = SnippetFilePath(val) + self.scroll_layout.insertWidget(-1, file_edit) + else: + snippet_edit = SnippetEdit(key, val, parent=self) + self.scroll_layout.insertWidget(-1, snippet_edit) + + def loadSnippetsDict(self, path=""): + ''' Load prefs. TO REMOVE ''' + if path == "": + path = self.knobScripter.snippets_txt_path + if not os.path.isfile(self.snippets_txt_path): + return {} + else: + with open(self.snippets_txt_path, "r") as f: + self.snippets = json.load(f) + return self.snippets + + def getSnippetsAsDict(self): + dic = {} + num_snippets = self.scroll_layout.count() + path_i = 1 + for s in range(num_snippets): + se = self.scroll_layout.itemAt(s).widget() + if se.__class__.__name__ == "SnippetEdit": + key = se.shortcut_editor.text() + val = se.script_editor.toPlainText() + if key != "": + dic[key] = val + else: + path = se.filepath_lineEdit.text() + if path != "": + dic["[custom-path-{}]".format(str(path_i))] = path + path_i += 1 + return dic + + def saveSnippets(self, snippets=""): + if snippets == "": + snippets = self.getSnippetsAsDict() + with open(self.snippets_txt_path, "w") as f: + prefs = json.dump(snippets, f, sort_keys=True, indent=4) + return prefs + + def applySnippets(self): + self.saveSnippets() + self.knobScripter.snippets = self.knobScripter.loadSnippets(maxDepth=5) + self.knobScripter.loadSnippets() + + def okPressed(self): + self.applySnippets() + self.accept() + + def addSnippet(self, key="", val=""): + se = SnippetEdit(key, val, parent=self) + self.scroll_layout.insertWidget(0, se) + self.show() + return se + + def addCustomPath(self, path=""): + cpe = SnippetFilePath(path) + self.scroll_layout.insertWidget(0, cpe) + self.show() + cpe.browseSnippets() + return cpe + + def showHelp(self): + ''' Create a new snippet, auto-completed with the help ''' + help_key = "help" + help_val = """Snippets are a convenient way to have code blocks that you can call through a shortcut.\n\n1. Simply write a shortcut on the text input field on the left. You can see this one is set to "test".\n\n2. Then, write a code or whatever in this script editor. You can include $$ as the placeholder for where you'll want the mouse cursor to appear.\n\n3. Finally, click OK or Apply to save the snippets. On the main script editor, you'll be able to call any snippet by writing the shortcut (in this example: help) and pressing the Tab key.\n\nIn order to remove a snippet, simply leave the shortcut and contents blank, and save the snippets.""" + help_se = self.addSnippet(help_key, help_val) + help_se.script_editor.resize(160, 160) + + +class SnippetEdit(QtWidgets.QWidget): + ''' Simple widget containing two fields, for the snippet shortcut and content ''' + + def __init__(self, key="", val="", parent=None): + super(SnippetEdit, self).__init__(parent) + + self.knobScripter = parent.knobScripter + self.color_scheme = self.knobScripter.color_scheme + self.layout = QtWidgets.QHBoxLayout() + + self.shortcut_editor = QtWidgets.QLineEdit(self) + f = self.shortcut_editor.font() + f.setWeight(QtGui.QFont.Bold) + self.shortcut_editor.setFont(f) + self.shortcut_editor.setText(str(key)) + #self.script_editor = QtWidgets.QTextEdit(self) + self.script_editor = KnobScripterTextEdit() + self.script_editor.setMinimumHeight(100) + self.script_editor.setStyleSheet( + 'background:#282828;color:#EEE;') # Main Colors + self.highlighter = KSScriptEditorHighlighter( + self.script_editor.document(), self) + self.script_editor_font = self.knobScripter.script_editor_font + self.script_editor.setFont(self.script_editor_font) + self.script_editor.resize(90, 90) + self.script_editor.setPlainText(str(val)) + self.layout.addWidget(self.shortcut_editor, + stretch=1, alignment=Qt.AlignTop) + self.layout.addWidget(self.script_editor, stretch=2) + self.layout.setContentsMargins(0, 0, 0, 0) + + self.setLayout(self.layout) + + +class SnippetFilePath(QtWidgets.QWidget): + ''' Simple widget containing a filepath lineEdit and a button to open the file browser ''' + + def __init__(self, path="", parent=None): + super(SnippetFilePath, self).__init__(parent) + + self.layout = QtWidgets.QHBoxLayout() + + self.custompath_label = QtWidgets.QLabel(self) + self.custompath_label.setText("Custom path: ") + + self.filepath_lineEdit = QtWidgets.QLineEdit(self) + self.filepath_lineEdit.setText(str(path)) + #self.script_editor = QtWidgets.QTextEdit(self) + self.filepath_lineEdit.setStyleSheet( + 'background:#282828;color:#EEE;') # Main Colors + self.script_editor_font = QtGui.QFont() + self.script_editor_font.setFamily("Courier") + self.script_editor_font.setStyleHint(QtGui.QFont.Monospace) + self.script_editor_font.setFixedPitch(True) + self.script_editor_font.setPointSize(11) + self.filepath_lineEdit.setFont(self.script_editor_font) + + self.file_button = QtWidgets.QPushButton(self) + self.file_button.setText("Browse...") + self.file_button.clicked.connect(self.browseSnippets) + + self.layout.addWidget(self.custompath_label) + self.layout.addWidget(self.filepath_lineEdit) + self.layout.addWidget(self.file_button) + self.layout.setContentsMargins(0, 10, 0, 10) + + self.setLayout(self.layout) + + def browseSnippets(self): + ''' Opens file panel for ...snippets.txt ''' + browseLocation = nuke.getFilename('Select snippets file', '*.txt') + + if not browseLocation: + return + + self.filepath_lineEdit.setText(browseLocation) + return + + +# -------------------------------- +# Implementation +# -------------------------------- + +def showKnobScripter(knob="knobChanged"): + selection = nuke.selectedNodes() + if not len(selection): + pan = KnobScripter() + else: + pan = KnobScripter(selection[0], knob) + pan.show() + + +def addKnobScripterPanel(): + global knobScripterPanel + try: + knobScripterPanel = panels.registerWidgetAsPanel('nuke.KnobScripterPane', 'Knob Scripter', + 'com.adrianpueyo.KnobScripterPane') + knobScripterPanel.addToPane(nuke.getPaneFor('Properties.1')) + + except: + knobScripterPanel = panels.registerWidgetAsPanel( + 'nuke.KnobScripterPane', 'Knob Scripter', 'com.adrianpueyo.KnobScripterPane') + + +nuke.KnobScripterPane = KnobScripterPane +log("KS LOADED") +ksShortcut = "alt+z" +addKnobScripterPanel() +nuke.menu('Nuke').addCommand( + 'Edit/Node/Open Floating Knob Scripter', showKnobScripter, ksShortcut) +nuke.menu('Nuke').addCommand('Edit/Node/Update KnobScripter Context', + updateContext).setVisible(False) diff --git a/setup/nuke/nuke_path/init.py b/setup/nuke/nuke_path/init.py new file mode 100644 index 0000000000..0ea5d1ad7d --- /dev/null +++ b/setup/nuke/nuke_path/init.py @@ -0,0 +1,2 @@ +# default write mov +nuke.knobDefault('Write.mov.colorspace', 'sRGB') diff --git a/setup/nuke/nuke_path/menu.py b/setup/nuke/nuke_path/menu.py index fd87c98246..7f5de6013d 100644 --- a/setup/nuke/nuke_path/menu.py +++ b/setup/nuke/nuke_path/menu.py @@ -1,4 +1,7 @@ +import os +import sys import atom_server +import KnobScripter from pype.nuke.lib import ( writes_version_sync, @@ -16,6 +19,6 @@ log = Logger().get_logger(__name__, "nuke") nuke.addOnScriptSave(onScriptLoad) nuke.addOnScriptLoad(checkInventoryVersions) nuke.addOnScriptSave(checkInventoryVersions) -nuke.addOnScriptSave(writes_version_sync) +# nuke.addOnScriptSave(writes_version_sync) log.info('Automatic syncing of write file knob to script version')