diff --git a/pype/__init__.py b/pype/__init__.py index 91b72d7de5..89c653bf6f 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -9,7 +9,7 @@ from pypeapp import config import logging log = logging.getLogger(__name__) -__version__ = "2.3.0" +__version__ = "2.5.0" PACKAGE_DIR = os.path.dirname(__file__) PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") diff --git a/pype/blender/__init__.py b/pype/blender/__init__.py new file mode 100644 index 0000000000..8a29917e40 --- /dev/null +++ b/pype/blender/__init__.py @@ -0,0 +1,34 @@ +import logging +from pathlib import Path +import os + +import bpy + +from avalon import api as avalon +from pyblish import api as pyblish + +from .plugin import AssetLoader + +logger = logging.getLogger("pype.blender") + +PARENT_DIR = os.path.dirname(__file__) +PACKAGE_DIR = os.path.dirname(PARENT_DIR) +PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") + +PUBLISH_PATH = os.path.join(PLUGINS_DIR, "blender", "publish") +LOAD_PATH = os.path.join(PLUGINS_DIR, "blender", "load") +CREATE_PATH = os.path.join(PLUGINS_DIR, "blender", "create") + + +def install(): + """Install Blender configuration for Avalon.""" + pyblish.register_plugin_path(str(PUBLISH_PATH)) + avalon.register_plugin_path(avalon.Loader, str(LOAD_PATH)) + avalon.register_plugin_path(avalon.Creator, str(CREATE_PATH)) + + +def uninstall(): + """Uninstall Blender configuration for Avalon.""" + pyblish.deregister_plugin_path(str(PUBLISH_PATH)) + avalon.deregister_plugin_path(avalon.Loader, str(LOAD_PATH)) + avalon.deregister_plugin_path(avalon.Creator, str(CREATE_PATH)) diff --git a/pype/blender/action.py b/pype/blender/action.py new file mode 100644 index 0000000000..4bd7e303fc --- /dev/null +++ b/pype/blender/action.py @@ -0,0 +1,47 @@ +import bpy + +import pyblish.api + +from ..action import get_errored_instances_from_context + + +class SelectInvalidAction(pyblish.api.Action): + """Select invalid objects in Blender when a publish plug-in failed.""" + label = "Select Invalid" + on = "failed" + icon = "search" + + def process(self, context, plugin): + errored_instances = get_errored_instances_from_context(context) + instances = pyblish.api.instances_by_plugin(errored_instances, plugin) + + # Get the invalid nodes for the plug-ins + self.log.info("Finding invalid nodes...") + invalid = list() + for instance in instances: + invalid_nodes = plugin.get_invalid(instance) + if invalid_nodes: + if isinstance(invalid_nodes, (list, tuple)): + invalid.extend(invalid_nodes) + else: + self.log.warning( + "Failed plug-in doens't have any selectable objects." + ) + + bpy.ops.object.select_all(action='DESELECT') + + # Make sure every node is only processed once + invalid = list(set(invalid)) + if not invalid: + self.log.info("No invalid nodes found.") + return + + invalid_names = [obj.name for obj in invalid] + self.log.info( + "Selecting invalid objects: %s", ", ".join(invalid_names) + ) + # Select the objects and also make the last one the active object. + for obj in invalid: + obj.select_set(True) + + bpy.context.view_layer.objects.active = invalid[-1] diff --git a/pype/blender/plugin.py b/pype/blender/plugin.py new file mode 100644 index 0000000000..ad5a259785 --- /dev/null +++ b/pype/blender/plugin.py @@ -0,0 +1,135 @@ +"""Shared functionality for pipeline plugins for Blender.""" + +from pathlib import Path +from typing import Dict, List, Optional + +import bpy + +from avalon import api + +VALID_EXTENSIONS = [".blend"] + + +def model_name(asset: str, subset: str, namespace: Optional[str] = None) -> str: + """Return a consistent name for a model asset.""" + name = f"{asset}_{subset}" + if namespace: + name = f"{namespace}:{name}" + return name + + +class AssetLoader(api.Loader): + """A basic AssetLoader for Blender + + This will implement the basic logic for linking/appending assets + into another Blender scene. + + The `update` method should be implemented by a sub-class, because + it's different for different types (e.g. model, rig, animation, + etc.). + """ + + @staticmethod + def _get_instance_empty(instance_name: str, nodes: List) -> Optional[bpy.types.Object]: + """Get the 'instance empty' that holds the collection instance.""" + for node in nodes: + if not isinstance(node, bpy.types.Object): + continue + if (node.type == 'EMPTY' and node.instance_type == 'COLLECTION' + and node.instance_collection and node.name == instance_name): + return node + return None + + @staticmethod + def _get_instance_collection(instance_name: str, nodes: List) -> Optional[bpy.types.Collection]: + """Get the 'instance collection' (container) for this asset.""" + for node in nodes: + if not isinstance(node, bpy.types.Collection): + continue + if node.name == instance_name: + return node + return None + + @staticmethod + def _get_library_from_container(container: bpy.types.Collection) -> bpy.types.Library: + """Find the library file from the container. + + It traverses the objects from this collection, checks if there is only + 1 library from which the objects come from and returns the library. + + Warning: + No nested collections are supported at the moment! + """ + assert not container.children, "Nested collections are not supported." + assert container.objects, "The collection doesn't contain any objects." + libraries = set() + for obj in container.objects: + assert obj.library, f"'{obj.name}' is not linked." + libraries.add(obj.library) + + assert len(libraries) == 1, "'{container.name}' contains objects from more then 1 library." + + return list(libraries)[0] + + def process_asset(self, + context: dict, + name: str, + namespace: Optional[str] = None, + options: Optional[Dict] = None): + """Must be implemented by a sub-class""" + raise NotImplementedError("Must be implemented by a sub-class") + + def load(self, + context: dict, + name: Optional[str] = None, + namespace: Optional[str] = None, + options: Optional[Dict] = None) -> Optional[bpy.types.Collection]: + """Load asset via database + + Arguments: + context: Full parenthood of representation to load + name: Use pre-defined name + namespace: Use pre-defined namespace + options: Additional settings dictionary + """ + # TODO (jasper): make it possible to add the asset several times by + # just re-using the collection + assert Path(self.fname).exists(), f"{self.fname} doesn't exist." + + self.process_asset( + context=context, + name=name, + namespace=namespace, + options=options, + ) + + # Only containerise if anything was loaded by the Loader. + nodes = self[:] + if not nodes: + return None + + # Only containerise if it's not already a collection from a .blend file. + representation = context["representation"]["name"] + if representation != "blend": + from avalon.blender.pipeline import containerise + return containerise( + name=name, + namespace=namespace, + nodes=nodes, + context=context, + loader=self.__class__.__name__, + ) + + asset = context["asset"]["name"] + subset = context["subset"]["name"] + instance_name = model_name(asset, subset, namespace) + + return self._get_instance_collection(instance_name, nodes) + + def update(self, container: Dict, representation: Dict): + """Must be implemented by a sub-class""" + raise NotImplementedError("Must be implemented by a sub-class") + + def remove(self, container: Dict) -> bool: + """Must be implemented by a sub-class""" + raise NotImplementedError("Must be implemented by a sub-class") diff --git a/pype/ftrack/actions/action_delete_asset.py b/pype/ftrack/actions/action_delete_asset.py index 7eb9126fca..fc9e66e4f8 100644 --- a/pype/ftrack/actions/action_delete_asset.py +++ b/pype/ftrack/actions/action_delete_asset.py @@ -99,6 +99,7 @@ class DeleteAssetSubset(BaseAction): # Filter event even more (skip task entities) # - task entities are not relevant for avalon + entity_mapping = {} for entity in entities: ftrack_id = entity["id"] if ftrack_id not in ftrack_ids: @@ -107,6 +108,8 @@ class DeleteAssetSubset(BaseAction): if entity.entity_type.lower() == "task": ftrack_ids.remove(ftrack_id) + entity_mapping[ftrack_id] = entity + if not ftrack_ids: # It is bug if this happens! return { @@ -122,11 +125,41 @@ class DeleteAssetSubset(BaseAction): project_name = project["full_name"] self.dbcon.Session["AVALON_PROJECT"] = project_name - selected_av_entities = self.dbcon.find({ + selected_av_entities = list(self.dbcon.find({ "type": "asset", "data.ftrackId": {"$in": ftrack_ids} - }) - selected_av_entities = [ent for ent in selected_av_entities] + })) + found_without_ftrack_id = {} + if len(selected_av_entities) != len(ftrack_ids): + found_ftrack_ids = [ + ent["data"]["ftrackId"] for ent in selected_av_entities + ] + for ftrack_id, entity in entity_mapping.items(): + if ftrack_id in found_ftrack_ids: + continue + + av_ents_by_name = list(self.dbcon.find({ + "type": "asset", + "name": entity["name"] + })) + if not av_ents_by_name: + continue + + ent_path_items = [ent["name"] for ent in entity["link"]] + parents = ent_path_items[1:len(ent_path_items)-1:] + # TODO we should say to user that + # few of them are missing in avalon + for av_ent in av_ents_by_name: + if av_ent["data"]["parents"] != parents: + continue + + # TODO we should say to user that found entity + # with same name does not match same ftrack id? + if "ftrackId" not in av_ent["data"]: + selected_av_entities.append(av_ent) + found_without_ftrack_id[str(av_ent["_id"])] = ftrack_id + break + if not selected_av_entities: return { "success": False, @@ -155,7 +188,8 @@ class DeleteAssetSubset(BaseAction): "created_at": datetime.now(), "project_name": project_name, "subset_ids_by_name": {}, - "subset_ids_by_parent": {} + "subset_ids_by_parent": {}, + "without_ftrack_id": found_without_ftrack_id } id_item = { @@ -413,14 +447,21 @@ class DeleteAssetSubset(BaseAction): asset_ids_to_archive = [] ftrack_ids_to_delete = [] if len(assets_to_delete) > 0: + map_av_ftrack_id = spec_data["without_ftrack_id"] # Prepare data when deleting whole avalon asset avalon_assets = self.dbcon.find({"type": "asset"}) avalon_assets_by_parent = collections.defaultdict(list) for asset in avalon_assets: + asset_id = asset["_id"] parent_id = asset["data"]["visualParent"] avalon_assets_by_parent[parent_id].append(asset) - if asset["_id"] in assets_to_delete: - ftrack_id = asset["data"]["ftrackId"] + if asset_id in assets_to_delete: + ftrack_id = map_av_ftrack_id.get(str(asset_id)) + if not ftrack_id: + ftrack_id = asset["data"].get("ftrackId") + + if not ftrack_id: + continue ftrack_ids_to_delete.append(ftrack_id) children_queue = Queue() diff --git a/pype/ftrack/actions/action_delivery.py b/pype/ftrack/actions/action_delivery.py new file mode 100644 index 0000000000..29fdfe39ae --- /dev/null +++ b/pype/ftrack/actions/action_delivery.py @@ -0,0 +1,528 @@ +import os +import copy +import shutil +import collections +import string + +import clique +from bson.objectid import ObjectId + +from avalon import pipeline +from avalon.vendor import filelink +from avalon.tools.libraryloader.io_nonsingleton import DbConnector + +from pypeapp import Anatomy +from pype.ftrack import BaseAction +from pype.ftrack.lib.avalon_sync import CustAttrIdKey + + +class Delivery(BaseAction): + '''Edit meta data action.''' + + #: Action identifier. + identifier = "delivery.action" + #: Action label. + label = "Delivery" + #: Action description. + description = "Deliver data to client" + #: roles that are allowed to register this action + role_list = ["Pypeclub", "Administrator", "Project manager"] + icon = '{}/ftrack/action_icons/Delivery.svg'.format( + os.environ.get('PYPE_STATICS_SERVER', '') + ) + + db_con = DbConnector() + + def discover(self, session, entities, event): + ''' Validation ''' + for entity in entities: + if entity.entity_type.lower() == "assetversion": + return True + + return False + + def interface(self, session, entities, event): + if event["data"].get("values", {}): + return + + title = "Delivery data to Client" + + items = [] + item_splitter = {"type": "label", "value": "---"} + + # Prepare component names for processing + components = None + project = None + for entity in entities: + if project is None: + project_id = None + for ent_info in entity["link"]: + if ent_info["type"].lower() == "project": + project_id = ent_info["id"] + break + + if project_id is None: + project = entity["asset"]["parent"]["project"] + else: + project = session.query(( + "select id, full_name from Project where id is \"{}\"" + ).format(project_id)).one() + + _components = set( + [component["name"] for component in entity["components"]] + ) + if components is None: + components = _components + continue + + components = components.intersection(_components) + if not components: + break + + project_name = project["full_name"] + items.append({ + "type": "hidden", + "name": "__project_name__", + "value": project_name + }) + + # Prpeare anatomy data + anatomy = Anatomy(project_name) + new_anatomies = [] + first = None + for key in (anatomy.templates.get("delivery") or {}): + new_anatomies.append({ + "label": key, + "value": key + }) + if first is None: + first = key + + skipped = False + # Add message if there are any common components + if not components or not new_anatomies: + skipped = True + items.append({ + "type": "label", + "value": "

Something went wrong:

" + }) + + items.append({ + "type": "hidden", + "name": "__skipped__", + "value": skipped + }) + + if not components: + if len(entities) == 1: + items.append({ + "type": "label", + "value": ( + "- Selected entity doesn't have components to deliver." + ) + }) + else: + items.append({ + "type": "label", + "value": ( + "- Selected entities don't have common components." + ) + }) + + # Add message if delivery anatomies are not set + if not new_anatomies: + items.append({ + "type": "label", + "value": ( + "- `\"delivery\"` anatomy key is not set in config." + ) + }) + + # Skip if there are any data shortcomings + if skipped: + return { + "items": items, + "title": title + } + + items.append({ + "value": "

Choose Components to deliver

", + "type": "label" + }) + + for component in components: + items.append({ + "type": "boolean", + "value": False, + "label": component, + "name": component + }) + + items.append(item_splitter) + + items.append({ + "value": "

Location for delivery

", + "type": "label" + }) + + items.append({ + "type": "label", + "value": ( + "NOTE: It is possible to replace `root` key in anatomy." + ) + }) + + items.append({ + "type": "text", + "name": "__location_path__", + "empty_text": "Type location path here...(Optional)" + }) + + items.append(item_splitter) + + items.append({ + "value": "

Anatomy of delivery files

", + "type": "label" + }) + + items.append({ + "type": "label", + "value": ( + "

NOTE: These can be set in Anatomy.yaml" + " within `delivery` key.

" + ) + }) + + items.append({ + "type": "enumerator", + "name": "__new_anatomies__", + "data": new_anatomies, + "value": first + }) + + return { + "items": items, + "title": title + } + + def launch(self, session, entities, event): + if "values" not in event["data"]: + return + + self.report_items = collections.defaultdict(list) + + values = event["data"]["values"] + skipped = values.pop("__skipped__") + if skipped: + return None + + component_names = [] + location_path = values.pop("__location_path__") + anatomy_name = values.pop("__new_anatomies__") + project_name = values.pop("__project_name__") + + for key, value in values.items(): + if value is True: + component_names.append(key) + + if not component_names: + return { + "success": True, + "message": "Not selected components to deliver." + } + + location_path = location_path.strip() + if location_path: + location_path = os.path.normpath(location_path) + if not os.path.exists(location_path): + return { + "success": False, + "message": ( + "Entered location path does not exists. \"{}\"" + ).format(location_path) + } + + self.db_con.install() + self.db_con.Session["AVALON_PROJECT"] = project_name + + repres_to_deliver = [] + for entity in entities: + asset = entity["asset"] + subset_name = asset["name"] + version = entity["version"] + + parent = asset["parent"] + parent_mongo_id = parent["custom_attributes"].get(CustAttrIdKey) + if parent_mongo_id: + parent_mongo_id = ObjectId(parent_mongo_id) + else: + asset_ent = self.db_con.find_one({ + "type": "asset", + "data.ftrackId": parent["id"] + }) + if not asset_ent: + ent_path = "/".join( + [ent["name"] for ent in parent["link"]] + ) + msg = "Not synchronized entities to avalon" + self.report_items[msg].append(ent_path) + self.log.warning("{} <{}>".format(msg, ent_path)) + continue + + parent_mongo_id = asset_ent["_id"] + + subset_ent = self.db_con.find_one({ + "type": "subset", + "parent": parent_mongo_id, + "name": subset_name + }) + + version_ent = self.db_con.find_one({ + "type": "version", + "name": version, + "parent": subset_ent["_id"] + }) + + repre_ents = self.db_con.find({ + "type": "representation", + "parent": version_ent["_id"] + }) + + repres_by_name = {} + for repre in repre_ents: + repre_name = repre["name"] + repres_by_name[repre_name] = repre + + for component in entity["components"]: + comp_name = component["name"] + if comp_name not in component_names: + continue + + repre = repres_by_name.get(comp_name) + repres_to_deliver.append(repre) + + if not location_path: + location_path = os.environ.get("AVALON_PROJECTS") or "" + + print(location_path) + + anatomy = Anatomy(project_name) + for repre in repres_to_deliver: + # Get destination repre path + anatomy_data = copy.deepcopy(repre["context"]) + anatomy_data["root"] = location_path + + anatomy_filled = anatomy.format_all(anatomy_data) + test_path = anatomy_filled["delivery"][anatomy_name] + + if not test_path.solved: + msg = ( + "Missing keys in Representation's context" + " for anatomy template \"{}\"." + ).format(anatomy_name) + + if test_path.missing_keys: + keys = ", ".join(test_path.missing_keys) + sub_msg = ( + "Representation: {}
- Missing keys: \"{}\"
" + ).format(str(repre["_id"]), keys) + + if test_path.invalid_types: + items = [] + for key, value in test_path.invalid_types.items(): + items.append("\"{}\" {}".format(key, str(value))) + + keys = ", ".join(items) + sub_msg = ( + "Representation: {}
" + "- Invalid value DataType: \"{}\"
" + ).format(str(repre["_id"]), keys) + + self.report_items[msg].append(sub_msg) + self.log.warning( + "{} Representation: \"{}\" Filled: <{}>".format( + msg, str(repre["_id"]), str(result) + ) + ) + continue + + # Get source repre path + frame = repre['context'].get('frame') + + if frame: + repre["context"]["frame"] = len(str(frame)) * "#" + + repre_path = self.path_from_represenation(repre) + # TODO add backup solution where root of path from component + # is repalced with AVALON_PROJECTS root + if not frame: + self.process_single_file( + repre_path, anatomy, anatomy_name, anatomy_data + ) + + else: + self.process_sequence( + repre_path, anatomy, anatomy_name, anatomy_data + ) + + self.db_con.uninstall() + + return self.report() + + def process_single_file( + self, repre_path, anatomy, anatomy_name, anatomy_data + ): + anatomy_filled = anatomy.format(anatomy_data) + delivery_path = anatomy_filled["delivery"][anatomy_name] + delivery_folder = os.path.dirname(delivery_path) + if not os.path.exists(delivery_folder): + os.makedirs(delivery_folder) + + self.copy_file(repre_path, delivery_path) + + def process_sequence( + self, repre_path, anatomy, anatomy_name, anatomy_data + ): + dir_path, file_name = os.path.split(str(repre_path)) + + base_name, ext = os.path.splitext(file_name) + file_name_items = None + if "#" in base_name: + file_name_items = [part for part in base_name.split("#") if part] + + elif "%" in base_name: + file_name_items = base_name.split("%") + + if not file_name_items: + msg = "Source file was not found" + self.report_items[msg].append(repre_path) + self.log.warning("{} <{}>".format(msg, repre_path)) + return + + src_collections, remainder = clique.assemble(os.listdir(dir_path)) + src_collection = None + for col in src_collections: + if col.tail != ext: + continue + + # skip if collection don't have same basename + if not col.head.startswith(file_name_items[0]): + continue + + src_collection = col + break + + if src_collection is None: + # TODO log error! + msg = "Source collection of files was not found" + self.report_items[msg].append(repre_path) + self.log.warning("{} <{}>".format(msg, repre_path)) + return + + frame_indicator = "@####@" + + anatomy_data["frame"] = frame_indicator + anatomy_filled = anatomy.format(anatomy_data) + + delivery_path = anatomy_filled["delivery"][anatomy_name] + print(delivery_path) + delivery_folder = os.path.dirname(delivery_path) + dst_head, dst_tail = delivery_path.split(frame_indicator) + dst_padding = src_collection.padding + dst_collection = clique.Collection( + head=dst_head, + tail=dst_tail, + padding=dst_padding + ) + + if not os.path.exists(delivery_folder): + os.makedirs(delivery_folder) + + src_head = src_collection.head + src_tail = src_collection.tail + for index in src_collection.indexes: + src_padding = src_collection.format("{padding}") % index + src_file_name = "{}{}{}".format(src_head, src_padding, src_tail) + src = os.path.normpath( + os.path.join(dir_path, src_file_name) + ) + + dst_padding = dst_collection.format("{padding}") % index + dst = "{}{}{}".format(dst_head, dst_padding, dst_tail) + + self.copy_file(src, dst) + + def path_from_represenation(self, representation): + try: + template = representation["data"]["template"] + + except KeyError: + return None + + try: + context = representation["context"] + context["root"] = os.environ.get("AVALON_PROJECTS") or "" + path = pipeline.format_template_with_optional_keys( + context, template + ) + + except KeyError: + # Template references unavailable data + return None + + return os.path.normpath(path) + + def copy_file(self, src_path, dst_path): + if os.path.exists(dst_path): + return + try: + filelink.create( + src_path, + dst_path, + filelink.HARDLINK + ) + except OSError: + shutil.copyfile(src_path, dst_path) + + def report(self): + items = [] + title = "Delivery report" + for msg, _items in self.report_items.items(): + if not _items: + continue + + if items: + items.append({"type": "label", "value": "---"}) + + items.append({ + "type": "label", + "value": "# {}".format(msg) + }) + if not isinstance(_items, (list, tuple)): + _items = [_items] + __items = [] + for item in _items: + __items.append(str(item)) + + items.append({ + "type": "label", + "value": '

{}

'.format("
".join(__items)) + }) + + if not items: + return { + "success": True, + "message": "Delivery Finished" + } + + return { + "items": items, + "title": title, + "success": False, + "message": "Delivery Finished" + } + +def register(session, plugins_presets={}): + '''Register plugin. Called when used as an plugin.''' + + Delivery(session, plugins_presets).register() diff --git a/pype/ftrack/actions/action_sync_to_avalon.py b/pype/ftrack/actions/action_sync_to_avalon.py index 01d0b866bf..d2fcfb372f 100644 --- a/pype/ftrack/actions/action_sync_to_avalon.py +++ b/pype/ftrack/actions/action_sync_to_avalon.py @@ -70,7 +70,10 @@ class SyncToAvalonLocal(BaseAction): ft_project_name = in_entities[0]["project"]["full_name"] try: - self.entities_factory.launch_setup(ft_project_name) + output = self.entities_factory.launch_setup(ft_project_name) + if output is not None: + return output + time_1 = time.time() self.entities_factory.set_cutom_attributes() diff --git a/pype/ftrack/events/action_sync_to_avalon.py b/pype/ftrack/events/action_sync_to_avalon.py index 9f9deeab95..79ab1b5f7a 100644 --- a/pype/ftrack/events/action_sync_to_avalon.py +++ b/pype/ftrack/events/action_sync_to_avalon.py @@ -105,7 +105,10 @@ class SyncToAvalonServer(BaseAction): ft_project_name = in_entities[0]["project"]["full_name"] try: - self.entities_factory.launch_setup(ft_project_name) + output = self.entities_factory.launch_setup(ft_project_name) + if output is not None: + return output + time_1 = time.time() self.entities_factory.set_cutom_attributes() diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 8d75d932f8..eef24a186d 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -31,7 +31,7 @@ class SyncToAvalonEvent(BaseEvent): "timelog", "auth_userrole", "appointment" ] ignore_ent_types = ["Milestone"] - ignore_keys = ["statusid"] + ignore_keys = ["statusid", "thumbid"] project_query = ( "select full_name, name, custom_attributes" @@ -131,7 +131,9 @@ class SyncToAvalonEvent(BaseEvent): ftrack_id = proj["data"]["ftrackId"] self._avalon_ents_by_ftrack_id[ftrack_id] = proj for ent in ents: - ftrack_id = ent["data"]["ftrackId"] + ftrack_id = ent["data"].get("ftrackId") + if ftrack_id is None: + continue self._avalon_ents_by_ftrack_id[ftrack_id] = ent return self._avalon_ents_by_ftrack_id @@ -484,6 +486,14 @@ class SyncToAvalonEvent(BaseEvent): action = ent_info["action"] ftrack_id = ent_info["entityId"] + if isinstance(ftrack_id, list): + self.log.warning(( + "BUG REPORT: Entity info has `entityId` as `list` \"{}\"" + ).format(ent_info)) + if len(ftrack_id) == 0: + continue + ftrack_id = ftrack_id[0] + if action == "move": ent_keys = ent_info["keys"] # Seprate update info from move action @@ -1427,6 +1437,93 @@ class SyncToAvalonEvent(BaseEvent): parent_id = ent_info["parentId"] new_tasks_by_parent[parent_id].append(ent_info) pop_out_ents.append(ftrack_id) + continue + + name = ( + ent_info + .get("changes", {}) + .get("name", {}) + .get("new") + ) + avalon_ent_by_name = self.avalon_ents_by_name.get(name) or {} + avalon_ent_by_name_ftrack_id = ( + avalon_ent_by_name + .get("data", {}) + .get("ftrackId") + ) + if avalon_ent_by_name and avalon_ent_by_name_ftrack_id is None: + ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id) + if not ftrack_ent: + ftrack_ent = self.process_session.query( + self.entities_query_by_id.format( + self.cur_project["id"], ftrack_id + ) + ).one() + self.ftrack_ents_by_id[ftrack_id] = ftrack_ent + + ent_path_items = [ent["name"] for ent in ftrack_ent["link"]] + parents = ent_path_items[1:len(ent_path_items)-1:] + + avalon_ent_parents = ( + avalon_ent_by_name.get("data", {}).get("parents") + ) + if parents == avalon_ent_parents: + self.dbcon.update_one({ + "_id": avalon_ent_by_name["_id"] + }, { + "$set": { + "data.ftrackId": ftrack_id, + "data.entityType": entity_type + } + }) + + avalon_ent_by_name["data"]["ftrackId"] = ftrack_id + avalon_ent_by_name["data"]["entityType"] = entity_type + + self._avalon_ents_by_ftrack_id[ftrack_id] = ( + avalon_ent_by_name + ) + if self._avalon_ents_by_parent_id: + found = None + for _parent_id_, _entities_ in ( + self._avalon_ents_by_parent_id.items() + ): + for _idx_, entity in enumerate(_entities_): + if entity["_id"] == avalon_ent_by_name["_id"]: + found = (_parent_id_, _idx_) + break + + if found: + break + + if found: + _parent_id_, _idx_ = found + self._avalon_ents_by_parent_id[_parent_id_][ + _idx_] = avalon_ent_by_name + + if self._avalon_ents_by_id: + self._avalon_ents_by_id[avalon_ent_by_name["_id"]] = ( + avalon_ent_by_name + ) + + if self._avalon_ents_by_name: + self._avalon_ents_by_name[name] = avalon_ent_by_name + + if self._avalon_ents: + found = None + project, entities = self._avalon_ents + for _idx_, _ent_ in enumerate(entities): + if _ent_["_id"] != avalon_ent_by_name["_id"]: + continue + found = _idx_ + break + + if found is not None: + entities[found] = avalon_ent_by_name + self._avalon_ents = project, entities + + pop_out_ents.append(ftrack_id) + continue configuration_id = entity_type_conf_ids.get(entity_type) if not configuration_id: @@ -1731,6 +1828,13 @@ class SyncToAvalonEvent(BaseEvent): obj_type_id = ent_info["objectTypeId"] ent_cust_attrs = cust_attrs_by_obj_id.get(obj_type_id) + if ent_cust_attrs is None: + self.log.warning(( + "BUG REPORT: Entity has ent type without" + " custom attributes <{}> \"{}\"" + ).format(entType, ent_info)) + continue + for key, values in ent_info["changes"].items(): if key in hier_attrs_keys: self.hier_cust_attrs_changes[key].append(ftrack_id) diff --git a/pype/ftrack/events/event_user_assigment.py b/pype/ftrack/events/event_user_assigment.py index 87994d34b2..eaacfd959a 100644 --- a/pype/ftrack/events/event_user_assigment.py +++ b/pype/ftrack/events/event_user_assigment.py @@ -207,7 +207,9 @@ class UserAssigmentEvent(BaseEvent): # formatting work dir is easiest part as we can use whole path work_dir = anatomy.format(data)['avalon']['work'] # we also need publish but not whole - publish = anatomy.format_all(data)['partial']['avalon']['publish'] + filled_all = anatomy.format_all(data) + publish = filled_all['avalon']['publish'] + # now find path to {asset} m = re.search("(^.+?{})".format(data['asset']), publish) diff --git a/pype/ftrack/ftrack_server/lib.py b/pype/ftrack/ftrack_server/lib.py index edd3cee09b..fefba580e0 100644 --- a/pype/ftrack/ftrack_server/lib.py +++ b/pype/ftrack/ftrack_server/lib.py @@ -265,6 +265,37 @@ class ProcessEventHub(ftrack_api.event.hub.EventHub): return self._send_packet(self._code_name_mapping["heartbeat"]) return super()._handle_packet(code, packet_identifier, path, data) + + +class UserEventHub(ftrack_api.event.hub.EventHub): + def __init__(self, *args, **kwargs): + self.sock = kwargs.pop("sock") + super(UserEventHub, self).__init__(*args, **kwargs) + + def _handle_packet(self, code, packet_identifier, path, data): + """Override `_handle_packet` which extend heartbeat""" + code_name = self._code_name_mapping[code] + if code_name == "heartbeat": + # Reply with heartbeat. + self.sock.sendall(b"hearbeat") + return self._send_packet(self._code_name_mapping['heartbeat']) + + elif code_name == "connect": + event = ftrack_api.event.base.Event( + topic="pype.storer.started", + data={}, + source={ + "id": self.id, + "user": {"username": self._api_user} + } + ) + self._event_queue.put(event) + + return super(UserEventHub, self)._handle_packet( + code, packet_identifier, path, data + ) + + class SocketSession(ftrack_api.session.Session): '''An isolated session for interaction with an ftrack server.''' def __init__( diff --git a/pype/ftrack/ftrack_server/socket_thread.py b/pype/ftrack/ftrack_server/socket_thread.py index 3309f75cd7..8e217870ba 100644 --- a/pype/ftrack/ftrack_server/socket_thread.py +++ b/pype/ftrack/ftrack_server/socket_thread.py @@ -1,4 +1,5 @@ import os +import sys import time import socket import threading @@ -26,6 +27,8 @@ class SocketThread(threading.Thread): self.mongo_error = False + self._temp_data = {} + def stop(self): self._is_running = False @@ -50,8 +53,7 @@ class SocketThread(threading.Thread): ) self.subproc = subprocess.Popen( - ["python", self.filepath, "-port", str(self.port)], - stdout=subprocess.PIPE + [sys.executable, self.filepath, "-port", str(self.port)] ) # Listen for incoming connections @@ -81,8 +83,9 @@ class SocketThread(threading.Thread): try: if not self._is_running: break + data = None try: - data = connection.recv(16) + data = self.get_data_from_con(connection) time_con = time.time() except socket.timeout: @@ -99,10 +102,7 @@ class SocketThread(threading.Thread): self._is_running = False break - if data: - if data == b"MongoError": - self.mongo_error = True - connection.sendall(data) + self._handle_data(connection, data) except Exception as exc: self.log.error( @@ -115,9 +115,15 @@ class SocketThread(threading.Thread): if self.subproc.poll() is None: self.subproc.terminate() - lines = self.subproc.stdout.readlines() - if lines: - print("*** Socked Thread stdout ***") - for line in lines: - os.write(1, line) self.finished = True + + def get_data_from_con(self, connection): + return connection.recv(16) + + def _handle_data(self, connection, data): + if not data: + return + + if data == b"MongoError": + self.mongo_error = True + connection.sendall(data) diff --git a/pype/ftrack/ftrack_server/sub_user_server.py b/pype/ftrack/ftrack_server/sub_user_server.py new file mode 100644 index 0000000000..f0d39447a8 --- /dev/null +++ b/pype/ftrack/ftrack_server/sub_user_server.py @@ -0,0 +1,56 @@ +import sys +import signal +import socket + +import traceback + +from ftrack_server import FtrackServer +from pype.ftrack.ftrack_server.lib import SocketSession, UserEventHub + +from pypeapp import Logger + +log = Logger().get_logger("FtrackUserServer") + + +def main(args): + port = int(args[-1]) + + # Create a TCP/IP socket + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + + # Connect the socket to the port where the server is listening + server_address = ("localhost", port) + log.debug( + "User Ftrack Server connected to {} port {}".format(*server_address) + ) + sock.connect(server_address) + sock.sendall(b"CreatedUser") + + try: + session = SocketSession( + auto_connect_event_hub=True, sock=sock, Eventhub=UserEventHub + ) + server = FtrackServer("action") + log.debug("Launched User Ftrack Server") + server.run_server(session=session) + except Exception: + traceback.print_exception(*sys.exc_info()) + + finally: + log.debug("Closing socket") + sock.close() + return 1 + + +if __name__ == "__main__": + # Register interupt signal + def signal_handler(sig, frame): + log.info( + "Process was forced to stop. Process ended." + ) + sys.exit(0) + + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + sys.exit(main(sys.argv)) diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index 2240e42d36..f08dc73c19 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -1722,7 +1722,11 @@ class SyncEntitiesFactory: self.avalon_project_id = new_id self._avalon_ents_by_id[str(new_id)] = project_item + if self._avalon_ents_by_ftrack_id is None: + self._avalon_ents_by_ftrack_id = {} self._avalon_ents_by_ftrack_id[self.ft_project_id] = str(new_id) + if self._avalon_ents_by_name is None: + self._avalon_ents_by_name = {} self._avalon_ents_by_name[project_item["name"]] = str(new_id) self.create_list.append(project_item) diff --git a/pype/ftrack/tray/ftrack_module.py b/pype/ftrack/tray/ftrack_module.py index 8da97da56b..250872f239 100644 --- a/pype/ftrack/tray/ftrack_module.py +++ b/pype/ftrack/tray/ftrack_module.py @@ -1,26 +1,27 @@ import os -import json -import threading import time -from Qt import QtCore, QtGui, QtWidgets +import datetime +import threading +from Qt import QtCore, QtWidgets import ftrack_api -from pypeapp import style -from pype.ftrack import FtrackServer, check_ftrack_url, credentials +from ..ftrack_server.lib import check_ftrack_url +from ..ftrack_server import socket_thread +from ..lib import credentials from . import login_dialog -from pype import api as pype +from pypeapp import Logger -log = pype.Logger().get_logger("FtrackModule", "ftrack") +log = Logger().get_logger("FtrackModule", "ftrack") class FtrackModule: def __init__(self, main_parent=None, parent=None): self.parent = parent self.widget_login = login_dialog.Login_Dialog_ui(self) - self.action_server = FtrackServer('action') self.thread_action_server = None + self.thread_socket_server = None self.thread_timer = None self.bool_logged = False @@ -75,14 +76,6 @@ class FtrackModule: # Actions part def start_action_server(self): - self.bool_action_thread_running = True - self.set_menu_visibility() - if ( - self.thread_action_server is not None and - self.bool_action_thread_running is False - ): - self.stop_action_server() - if self.thread_action_server is None: self.thread_action_server = threading.Thread( target=self.set_action_server @@ -90,35 +83,114 @@ class FtrackModule: self.thread_action_server.start() def set_action_server(self): - first_check = True - while self.bool_action_thread_running is True: - if not check_ftrack_url(os.environ['FTRACK_SERVER']): - if first_check: - log.warning( - "Could not connect to Ftrack server" - ) - first_check = False + if self.bool_action_server_running: + return + + self.bool_action_server_running = True + self.bool_action_thread_running = False + + ftrack_url = os.environ['FTRACK_SERVER'] + + parent_file_path = os.path.dirname( + os.path.dirname(os.path.realpath(__file__)) + ) + + min_fail_seconds = 5 + max_fail_count = 3 + wait_time_after_max_fail = 10 + + # Threads data + thread_name = "ActionServerThread" + thread_port = 10021 + subprocess_path = ( + "{}/ftrack_server/sub_user_server.py".format(parent_file_path) + ) + if self.thread_socket_server is not None: + self.thread_socket_server.stop() + self.thread_socket_server.join() + self.thread_socket_server = None + + last_failed = datetime.datetime.now() + failed_count = 0 + + ftrack_accessible = False + printed_ftrack_error = False + + # Main loop + while True: + if not self.bool_action_server_running: + log.debug("Action server was pushed to stop.") + break + + # Check if accessible Ftrack and Mongo url + if not ftrack_accessible: + ftrack_accessible = check_ftrack_url(ftrack_url) + + # Run threads only if Ftrack is accessible + if not ftrack_accessible: + if not printed_ftrack_error: + log.warning("Can't access Ftrack {}".format(ftrack_url)) + + if self.thread_socket_server is not None: + self.thread_socket_server.stop() + self.thread_socket_server.join() + self.thread_socket_server = None + self.bool_action_thread_running = False + self.set_menu_visibility() + + printed_ftrack_error = True + time.sleep(1) continue - log.info( - "Connected to Ftrack server. Running actions session" - ) - try: - self.bool_action_server_running = True + + printed_ftrack_error = False + + # Run backup thread which does not requeire mongo to work + if self.thread_socket_server is None: + if failed_count < max_fail_count: + self.thread_socket_server = socket_thread.SocketThread( + thread_name, thread_port, subprocess_path + ) + self.thread_socket_server.start() + self.bool_action_thread_running = True + self.set_menu_visibility() + + elif failed_count == max_fail_count: + log.warning(( + "Action server failed {} times." + " I'll try to run again {}s later" + ).format( + str(max_fail_count), str(wait_time_after_max_fail)) + ) + failed_count += 1 + + elif (( + datetime.datetime.now() - last_failed + ).seconds > wait_time_after_max_fail): + failed_count = 0 + + # If thread failed test Ftrack and Mongo connection + elif not self.thread_socket_server.isAlive(): + self.thread_socket_server.join() + self.thread_socket_server = None + ftrack_accessible = False + + self.bool_action_thread_running = False self.set_menu_visibility() - self.action_server.run_server() - if self.bool_action_thread_running: - log.debug("Ftrack action server has stopped") - except Exception: - log.warning( - "Ftrack Action server crashed. Trying to connect again", - exc_info=True - ) - self.bool_action_server_running = False - self.set_menu_visibility() - first_check = True + + _last_failed = datetime.datetime.now() + delta_time = (_last_failed - last_failed).seconds + if delta_time < min_fail_seconds: + failed_count += 1 + else: + failed_count = 0 + last_failed = _last_failed + + time.sleep(1) self.bool_action_thread_running = False + self.bool_action_server_running = False + self.set_menu_visibility() def reset_action_server(self): self.stop_action_server() @@ -126,16 +198,18 @@ class FtrackModule: def stop_action_server(self): try: - self.bool_action_thread_running = False - self.action_server.stop_session() + self.bool_action_server_running = False + if self.thread_socket_server is not None: + self.thread_socket_server.stop() + self.thread_socket_server.join() + self.thread_socket_server = None + if self.thread_action_server is not None: self.thread_action_server.join() self.thread_action_server = None log.info("Ftrack action server was forced to stop") - self.bool_action_server_running = False - self.set_menu_visibility() except Exception: log.warning( "Error has happened during Killing action server", @@ -201,9 +275,9 @@ class FtrackModule: self.stop_timer_thread() return - self.aRunActionS.setVisible(not self.bool_action_thread_running) + self.aRunActionS.setVisible(not self.bool_action_server_running) self.aResetActionS.setVisible(self.bool_action_thread_running) - self.aStopActionS.setVisible(self.bool_action_thread_running) + self.aStopActionS.setVisible(self.bool_action_server_running) if self.bool_timer_event is False: self.start_timer_thread() diff --git a/pype/lib.py b/pype/lib.py index b19491adeb..f26395d930 100644 --- a/pype/lib.py +++ b/pype/lib.py @@ -196,9 +196,13 @@ def any_outdated(): if representation in checked: continue - representation_doc = io.find_one({"_id": io.ObjectId(representation), - "type": "representation"}, - projection={"parent": True}) + representation_doc = io.find_one( + { + "_id": io.ObjectId(representation), + "type": "representation" + }, + projection={"parent": True} + ) if representation_doc and not is_latest(representation_doc): return True elif not representation_doc: @@ -308,27 +312,38 @@ def switch_item(container, representation_name = representation["name"] # Find the new one - asset = io.find_one({"name": asset_name, "type": "asset"}) + asset = io.find_one({ + "name": asset_name, + "type": "asset" + }) assert asset, ("Could not find asset in the database with the name " "'%s'" % asset_name) - subset = io.find_one({"name": subset_name, - "type": "subset", - "parent": asset["_id"]}) + subset = io.find_one({ + "name": subset_name, + "type": "subset", + "parent": asset["_id"] + }) assert subset, ("Could not find subset in the database with the name " "'%s'" % subset_name) - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[('name', -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[('name', -1)] + ) assert version, "Could not find a version for {}.{}".format( asset_name, subset_name ) - representation = io.find_one({"name": representation_name, - "type": "representation", - "parent": version["_id"]}) + representation = io.find_one({ + "name": representation_name, + "type": "representation", + "parent": version["_id"]} + ) assert representation, ("Could not find representation in the database with" " the name '%s'" % representation_name) @@ -366,7 +381,10 @@ def get_asset(asset_name=None): if not asset_name: asset_name = avalon.api.Session["AVALON_ASSET"] - asset_document = io.find_one({"name": asset_name, "type": "asset"}) + asset_document = io.find_one({ + "name": asset_name, + "type": "asset" + }) if not asset_document: raise TypeError("Entity \"{}\" was not found in DB".format(asset_name)) @@ -538,8 +556,7 @@ def get_subsets(asset_name, from avalon import io # query asset from db - asset_io = io.find_one({"type": "asset", - "name": asset_name}) + asset_io = io.find_one({"type": "asset", "name": asset_name}) # check if anything returned assert asset_io, "Asset not existing. \ @@ -563,14 +580,20 @@ def get_subsets(asset_name, # Process subsets for subset in subsets: if not version: - version_sel = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version_sel = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) else: assert isinstance(version, int), "version needs to be `int` type" - version_sel = io.find_one({"type": "version", - "parent": subset["_id"], - "name": int(version)}) + version_sel = io.find_one({ + "type": "version", + "parent": subset["_id"], + "name": int(version) + }) find_dict = {"type": "representation", "parent": version_sel["_id"]} diff --git a/pype/maya/__init__.py b/pype/maya/__init__.py index b4dbc52bc8..f027893a0e 100644 --- a/pype/maya/__init__.py +++ b/pype/maya/__init__.py @@ -162,6 +162,7 @@ def on_open(_): # Validate FPS after update_task_from_path to # ensure it is using correct FPS for the asset lib.validate_fps() + lib.fix_incompatible_containers() if any_outdated(): log.warning("Scene has outdated content.") diff --git a/pype/maya/lib.py b/pype/maya/lib.py index 0890d3863e..ec39b3556e 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2318,6 +2318,25 @@ def get_attr_in_layer(attr, layer): return cmds.getAttr(attr) +def fix_incompatible_containers(): + """Return whether the current scene has any outdated content""" + + host = avalon.api.registered_host() + for container in host.ls(): + loader = container['loader'] + + print(container['loader']) + + if loader in ["MayaAsciiLoader", + "AbcLoader", + "ModelLoader", + "CameraLoader", + "RigLoader", + "FBXLoader"]: + cmds.setAttr(container["objectName"] + ".loader", + "ReferenceLoader", type="string") + + def _null(*args): pass diff --git a/pype/maya/menu.py b/pype/maya/menu.py index 5254337f03..806944c117 100644 --- a/pype/maya/menu.py +++ b/pype/maya/menu.py @@ -15,12 +15,13 @@ log = logging.getLogger(__name__) def _get_menu(): """Return the menu instance if it currently exists in Maya""" - app = QtWidgets.QApplication.instance() - widgets = dict((w.objectName(), w) for w in app.allWidgets()) + widgets = dict(( + w.objectName(), w) for w in QtWidgets.QApplication.allWidgets()) menu = widgets.get(self._menu) return menu + def deferred(): log.info("Attempting to install scripts menu..") diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py index 141cf4c13d..f1f87e40c8 100644 --- a/pype/nuke/__init__.py +++ b/pype/nuke/__init__.py @@ -33,41 +33,6 @@ if os.getenv("PYBLISH_GUI", None): pyblish.register_gui(os.getenv("PYBLISH_GUI", None)) -class NukeHandler(logging.Handler): - ''' - Nuke Handler - emits logs into nuke's script editor. - warning will emit nuke.warning() - critical and fatal would popup msg dialog to alert of the error. - ''' - - def __init__(self): - logging.Handler.__init__(self) - self.set_name("Pype_Nuke_Handler") - - def emit(self, record): - # Formated message: - msg = self.format(record) - - if record.levelname.lower() in [ - # "warning", - "critical", - "fatal", - "error" - ]: - msg = self.format(record) - nuke.message(msg) - - -'''Adding Nuke Logging Handler''' -log.info([handler.get_name() for handler in logging.root.handlers[:]]) -nuke_handler = NukeHandler() -if nuke_handler.get_name() \ - not in [handler.get_name() - for handler in logging.root.handlers[:]]: - logging.getLogger().addHandler(nuke_handler) - logging.getLogger().setLevel(logging.INFO) -log.info([handler.get_name() for handler in logging.root.handlers[:]]) - def reload_config(): """Attempt to reload pipeline at run-time. @@ -113,7 +78,7 @@ def install(): family_states = [ "write", "review", - "nukenodes" + "nukenodes" "gizmo" ] diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 4faea1da36..a7f1b64eec 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -21,7 +21,6 @@ from .presets import ( from .presets import ( get_anatomy ) -# TODO: remove get_anatomy and import directly Anatomy() here from pypeapp import Logger log = Logger().get_logger(__name__, "nuke") @@ -50,8 +49,6 @@ def checkInventoryVersions(): and check if the node is having actual version. If not then it will color it to red. """ - # TODO: make it for all nodes not just Read (Loader - # get all Loader nodes by avalon attribute metadata for each in nuke.allNodes(): if each.Class() == 'Read': @@ -93,7 +90,6 @@ def checkInventoryVersions(): def writes_version_sync(): ''' Callback synchronizing version of publishable write nodes ''' - # TODO: make it work with new write node group try: rootVersion = pype.get_version_from_path(nuke.root().name()) padding = len(rootVersion) @@ -130,7 +126,8 @@ def writes_version_sync(): os.makedirs(os.path.dirname(node_new_file), 0o766) except Exception as e: log.warning( - "Write node: `{}` has no version in path: {}".format(each.name(), e)) + "Write node: `{}` has no version in path: {}".format( + each.name(), e)) def version_up_script(): @@ -183,9 +180,12 @@ def format_anatomy(data): try: padding = int(anatomy.templates['render']['padding']) except KeyError as e: - log.error("`padding` key is not in `render` " - "Anatomy template. Please, add it there and restart " - "the pipeline (padding: \"4\"): `{}`".format(e)) + msg = ("`padding` key is not in `render` " + "Anatomy template. Please, add it there and restart " + "the pipeline (padding: \"4\"): `{}`").format(e) + + log.error(msg) + nuke.message(msg) version = data.get("version", None) if not version: @@ -265,7 +265,9 @@ def create_write_node(name, data, input=None, prenodes=None): anatomy_filled = format_anatomy(data) except Exception as e: - log.error("problem with resolving anatomy tepmlate: {}".format(e)) + msg = "problem with resolving anatomy tepmlate: {}".format(e) + log.error(msg) + nuke.message(msg) # build file path to workfiles fpath = str(anatomy_filled["work"]["folder"]).replace("\\", "/") @@ -372,7 +374,7 @@ def create_write_node(name, data, input=None, prenodes=None): now_node.setInput(0, prev_node) # imprinting group node - GN = avalon.nuke.imprint(GN, data["avalon"]) + avalon.nuke.imprint(GN, data["avalon"]) divider = nuke.Text_Knob('') GN.addKnob(divider) @@ -543,8 +545,11 @@ class WorkfileSettings(object): viewer_dict (dict): adjustments from presets ''' - assert isinstance(viewer_dict, dict), log.error( - "set_viewers_colorspace(): argument should be dictionary") + if not isinstance(viewer_dict, dict): + msg = "set_viewers_colorspace(): argument should be dictionary" + log.error(msg) + nuke.message(msg) + return filter_knobs = [ "viewerProcess", @@ -592,8 +597,10 @@ class WorkfileSettings(object): root_dict (dict): adjustmensts from presets ''' - assert isinstance(root_dict, dict), log.error( - "set_root_colorspace(): argument should be dictionary") + if not isinstance(root_dict, dict): + msg = "set_root_colorspace(): argument should be dictionary" + log.error(msg) + nuke.message(msg) log.debug(">> root_dict: {}".format(root_dict)) @@ -638,12 +645,105 @@ class WorkfileSettings(object): write_dict (dict): nuke write node as dictionary ''' - # TODO: complete this function so any write node in # scene will have fixed colorspace following presets for the project - assert isinstance(write_dict, dict), log.error( - "set_root_colorspace(): argument should be dictionary") + if not isinstance(write_dict, dict): + msg = "set_root_colorspace(): argument should be dictionary" + log.error(msg) + return - log.debug("__ set_writes_colorspace(): {}".format(write_dict)) + from avalon.nuke import get_avalon_knob_data + + for node in nuke.allNodes(): + + if node.Class() in ["Viewer", "Dot"]: + continue + + # get data from avalon knob + avalon_knob_data = get_avalon_knob_data(node, ["avalon:", "ak:"]) + + if not avalon_knob_data: + continue + + if avalon_knob_data["id"] != "pyblish.avalon.instance": + continue + + # establish families + families = [avalon_knob_data["family"]] + if avalon_knob_data.get("families"): + families.append(avalon_knob_data.get("families")) + + # except disabled nodes but exclude backdrops in test + for fmly, knob in write_dict.items(): + write = None + if (fmly in families): + # Add all nodes in group instances. + if node.Class() == "Group": + node.begin() + for x in nuke.allNodes(): + if x.Class() == "Write": + write = x + node.end() + elif node.Class() == "Write": + write = node + else: + log.warning("Wrong write node Class") + + write["colorspace"].setValue(str(knob["colorspace"])) + log.info( + "Setting `{0}` to `{1}`".format( + write.name(), + knob["colorspace"])) + + def set_reads_colorspace(self, reads): + """ Setting colorspace to Read nodes + + Looping trought all read nodes and tries to set colorspace based on regex rules in presets + """ + changes = dict() + for n in nuke.allNodes(): + file = nuke.filename(n) + if not n.Class() == "Read": + continue + + # load nuke presets for Read's colorspace + read_clrs_presets = get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + log.debug(preset_clrsp) + if preset_clrsp is not None: + current = n["colorspace"].value() + future = str(preset_clrsp) + if current != future: + changes.update({ + n.name(): { + "from": current, + "to": future + } + }) + log.debug(changes) + if changes: + msg = "Read nodes are not set to correct colospace:\n\n" + for nname, knobs in changes.items(): + msg += str(" - node: '{0}' is now '{1}' " + "but should be '{2}'\n").format( + nname, knobs["from"], knobs["to"] + ) + + msg += "\nWould you like to change it?" + + if nuke.ask(msg): + for nname, knobs in changes.items(): + n = nuke.toNode(nname) + n["colorspace"].setValue(knobs["to"]) + log.info( + "Setting `{0}` to `{1}`".format( + nname, + knobs["to"])) def set_colorspace(self): ''' Setting colorpace following presets @@ -653,25 +753,33 @@ class WorkfileSettings(object): try: self.set_root_colorspace(nuke_colorspace["root"]) except AttributeError: - log.error( - "set_colorspace(): missing `root` settings in template") + msg = "set_colorspace(): missing `root` settings in template" + try: self.set_viewers_colorspace(nuke_colorspace["viewer"]) except AttributeError: - log.error( - "set_colorspace(): missing `viewer` settings in template") + msg = "set_colorspace(): missing `viewer` settings in template" + nuke.message(msg) + log.error(msg) + try: self.set_writes_colorspace(nuke_colorspace["write"]) except AttributeError: - log.error( - "set_colorspace(): missing `write` settings in template") + msg = "set_colorspace(): missing `write` settings in template" + nuke.message(msg) + log.error(msg) + + reads = nuke_colorspace.get("read") + if reads: + self.set_reads_colorspace(reads) try: for key in nuke_colorspace: log.debug("Preset's colorspace key: {}".format(key)) except TypeError: - log.error("Nuke is not in templates! \n\n\n" - "contact your supervisor!") + msg = "Nuke is not in templates! Contact your supervisor!" + nuke.message(msg) + log.error(msg) def reset_frame_range_handles(self): """Set frame range to current asset""" @@ -758,13 +866,13 @@ class WorkfileSettings(object): } if any(x for x in data.values() if x is None): - log.error( - "Missing set shot attributes in DB." - "\nContact your supervisor!." - "\n\nWidth: `{width}`" - "\nHeight: `{height}`" - "\nPixel Asspect: `{pixel_aspect}`".format(**data) - ) + msg = ("Missing set shot attributes in DB." + "\nContact your supervisor!." + "\n\nWidth: `{width}`" + "\nHeight: `{height}`" + "\nPixel Asspect: `{pixel_aspect}`").format(**data) + log.error(msg) + nuke.message(msg) bbox = self._asset_entity.get('data', {}).get('crop') @@ -781,10 +889,10 @@ class WorkfileSettings(object): ) except Exception as e: bbox = None - log.error( - "{}: {} \nFormat:Crop need to be set with dots, example: " - "0.0.1920.1080, /nSetting to default".format(__name__, e) - ) + msg = ("{}:{} \nFormat:Crop need to be set with dots, example: " + "0.0.1920.1080, /nSetting to default").format(__name__, e) + log.error(msg) + nuke.message(msg) existing_format = None for format in nuke.formats(): @@ -1000,7 +1108,8 @@ class BuildWorkfile(WorkfileSettings): def process(self, regex_filter=None, version=None, - representations=["exr", "dpx", "lutJson", "mov", "preview"]): + representations=["exr", "dpx", "lutJson", "mov", + "preview", "png"]): """ A short description. @@ -1041,9 +1150,10 @@ class BuildWorkfile(WorkfileSettings): wn["render"].setValue(True) vn.setInput(0, wn) - bdn = self.create_backdrop(label="Render write \n\n\n\nOUTPUT", - color='0xcc1102ff', layer=-1, - nodes=[wn]) + # adding backdrop under write + self.create_backdrop(label="Render write \n\n\n\nOUTPUT", + color='0xcc1102ff', layer=-1, + nodes=[wn]) # move position self.position_up(4) @@ -1057,10 +1167,12 @@ class BuildWorkfile(WorkfileSettings): version=version, representations=representations) - log.info("__ subsets: `{}`".format(subsets)) + for name, subset in subsets.items(): + log.debug("___________________") + log.debug(name) + log.debug(subset["version"]) nodes_backdrop = list() - for name, subset in subsets.items(): if "lut" in name: continue @@ -1090,9 +1202,10 @@ class BuildWorkfile(WorkfileSettings): # move position self.position_right() - bdn = self.create_backdrop(label="Loaded Reads", - color='0x2d7702ff', layer=-1, - nodes=nodes_backdrop) + # adding backdrop under all read nodes + self.create_backdrop(label="Loaded Reads", + color='0x2d7702ff', layer=-1, + nodes=nodes_backdrop) def read_loader(self, representation): """ @@ -1235,6 +1348,8 @@ class ExporterReview: # get first and last frame self.first_frame = min(self.collection.indexes) self.last_frame = max(self.collection.indexes) + if "slate" in self.instance.data["families"]: + self.first_frame += 1 else: self.fname = os.path.basename(self.path_in) self.fhead = os.path.splitext(self.fname)[0] + "." @@ -1254,7 +1369,7 @@ class ExporterReview: 'ext': self.ext, 'files': self.file, "stagingDir": self.staging_dir, - "anatomy_template": "publish", + "anatomy_template": "render", "tags": [self.name.replace("_", "-")] + add_tags } @@ -1460,14 +1575,13 @@ class ExporterReviewMov(ExporterReview): self.log.info("Rendered...") def save_file(self): + import shutil with anlib.maintained_selection(): self.log.info("Saving nodes as file... ") - # select temp nodes - anlib.select_nodes(self._temp_nodes) # create nk path path = os.path.splitext(self.path)[0] + ".nk" # save file to the path - nuke.nodeCopy(path) + shutil.copyfile(self.instance.context.data["currentFile"], path) self.log.info("Nodes exported...") return path @@ -1508,19 +1622,21 @@ class ExporterReviewMov(ExporterReview): # Write node write_node = nuke.createNode("Write") self.log.debug("Path: {}".format(self.path)) - self.instance.data["baked_colorspace_movie"] = self.path write_node["file"].setValue(self.path) write_node["file_type"].setValue(self.ext) + write_node["meta_codec"].setValue("ap4h") + write_node["mov64_codec"].setValue("ap4h") + write_node["mov64_write_timecode"].setValue(1) write_node["raw"].setValue(1) # connect write_node.setInput(0, self.previous_node) self._temp_nodes.append(write_node) self.log.debug("Write... `{}`".format(self._temp_nodes)) - # ---------- end nodes creation # ---------- render or save to nk if farm: + nuke.scriptSave() path_nk = self.save_file() self.data.update({ "bakeScriptPath": path_nk, @@ -1537,9 +1653,9 @@ class ExporterReviewMov(ExporterReview): self.log.debug("Representation... `{}`".format(self.data)) - #---------- Clean up + # ---------- Clean up self.clean_nodes() - + nuke.scriptSave() return self.data @@ -1578,3 +1694,70 @@ def get_dependent_nodes(nodes): }) return connections_in, connections_out + + +def find_free_space_to_paste_nodes( + nodes, + group=nuke.root(), + direction="right", + offset=300): + """ + For getting coordinates in DAG (node graph) for placing new nodes + + Arguments: + nodes (list): list of nuke.Node objects + group (nuke.Node) [optional]: object in which context it is + direction (str) [optional]: where we want it to be placed + [left, right, top, bottom] + offset (int) [optional]: what offset it is from rest of nodes + + Returns: + xpos (int): x coordinace in DAG + ypos (int): y coordinace in DAG + """ + if len(nodes) == 0: + return 0, 0 + + group_xpos = list() + group_ypos = list() + + # get local coordinates of all nodes + nodes_xpos = [n.xpos() for n in nodes] + \ + [n.xpos() + n.screenWidth() for n in nodes] + + nodes_ypos = [n.ypos() for n in nodes] + \ + [n.ypos() + n.screenHeight() for n in nodes] + + # get complete screen size of all nodes to be placed in + nodes_screen_width = max(nodes_xpos) - min(nodes_xpos) + nodes_screen_heigth = max(nodes_ypos) - min(nodes_ypos) + + # get screen size (r,l,t,b) of all nodes in `group` + with group: + group_xpos = [n.xpos() for n in nuke.allNodes() if n not in nodes] + \ + [n.xpos() + n.screenWidth() for n in nuke.allNodes() + if n not in nodes] + group_ypos = [n.ypos() for n in nuke.allNodes() if n not in nodes] + \ + [n.ypos() + n.screenHeight() for n in nuke.allNodes() + if n not in nodes] + + # calc output left + if direction in "left": + xpos = min(group_xpos) - abs(nodes_screen_width) - abs(offset) + ypos = min(group_ypos) + return xpos, ypos + # calc output right + if direction in "right": + xpos = max(group_xpos) + abs(offset) + ypos = min(group_ypos) + return xpos, ypos + # calc output top + if direction in "top": + xpos = min(group_xpos) + ypos = min(group_ypos) - abs(nodes_screen_heigth) - abs(offset) + return xpos, ypos + # calc output bottom + if direction in "bottom": + xpos = min(group_xpos) + ypos = max(group_ypos) + abs(offset) + return xpos, ypos diff --git a/pype/nuke/presets.py b/pype/nuke/presets.py index e0c12e2671..a413ccc878 100644 --- a/pype/nuke/presets.py +++ b/pype/nuke/presets.py @@ -1,6 +1,6 @@ from pype import api as pype from pypeapp import Anatomy, config - +import nuke log = pype.Logger().get_logger(__name__, "nuke") @@ -28,7 +28,7 @@ def get_node_dataflow_preset(**kwarg): families = kwarg.get("families", []) preset = kwarg.get("preset", None) # omit < 2.0.0v - assert any([host, cls]), log.error( + assert any([host, cls]), nuke.message( "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) nuke_dataflow = get_dataflow_preset().get(str(host), None) @@ -56,8 +56,10 @@ def get_node_colorspace_preset(**kwarg): families = kwarg.get("families", []) preset = kwarg.get("preset", None) # omit < 2.0.0v - assert any([host, cls]), log.error( - "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__)) + if not any([host, cls]): + msg = "`{}`: Missing mandatory kwargs `host`, `cls`".format(__file__) + log.error(msg) + nuke.message(msg) nuke_colorspace = get_colorspace_preset().get(str(host), None) nuke_colorspace_node = nuke_colorspace.get(str(cls), None) diff --git a/pype/nukestudio/__init__.py b/pype/nukestudio/__init__.py index 097f077e15..75825d188a 100644 --- a/pype/nukestudio/__init__.py +++ b/pype/nukestudio/__init__.py @@ -51,7 +51,7 @@ if os.getenv("PYBLISH_GUI", None): pyblish.register_gui(os.getenv("PYBLISH_GUI", None)) -def install(config): +def install(): """ Installing Nukestudio integration for avalon diff --git a/pype/plugins/blender/create/create_model.py b/pype/plugins/blender/create/create_model.py new file mode 100644 index 0000000000..7301073f05 --- /dev/null +++ b/pype/plugins/blender/create/create_model.py @@ -0,0 +1,32 @@ +"""Create a model asset.""" + +import bpy + +from avalon import api +from avalon.blender import Creator, lib + + +class CreateModel(Creator): + """Polygonal static geometry""" + + name = "modelMain" + label = "Model" + family = "model" + icon = "cube" + + def process(self): + import pype.blender + + asset = self.data["asset"] + subset = self.data["subset"] + name = pype.blender.plugin.model_name(asset, subset) + collection = bpy.data.collections.new(name=name) + bpy.context.scene.collection.children.link(collection) + self.data['task'] = api.Session.get('AVALON_TASK') + lib.imprint(collection, self.data) + + if (self.options or {}).get("useSelection"): + for obj in lib.get_selection(): + collection.objects.link(obj) + + return collection diff --git a/pype/plugins/blender/load/load_model.py b/pype/plugins/blender/load/load_model.py new file mode 100644 index 0000000000..bd6db17650 --- /dev/null +++ b/pype/plugins/blender/load/load_model.py @@ -0,0 +1,315 @@ +"""Load a model asset in Blender.""" + +import logging +from pathlib import Path +from pprint import pformat +from typing import Dict, List, Optional + +import avalon.blender.pipeline +import bpy +import pype.blender +from avalon import api + +logger = logging.getLogger("pype").getChild("blender").getChild("load_model") + + +class BlendModelLoader(pype.blender.AssetLoader): + """Load models from a .blend file. + + Because they come from a .blend file we can simply link the collection that + contains the model. There is no further need to 'containerise' it. + + Warning: + Loading the same asset more then once is not properly supported at the + moment. + """ + + families = ["model"] + representations = ["blend"] + + label = "Link Model" + icon = "code-fork" + color = "orange" + + @staticmethod + def _get_lib_collection(name: str, libpath: Path) -> Optional[bpy.types.Collection]: + """Find the collection(s) with name, loaded from libpath. + + Note: + It is assumed that only 1 matching collection is found. + """ + for collection in bpy.data.collections: + if collection.name != name: + continue + if collection.library is None: + continue + if not collection.library.filepath: + continue + collection_lib_path = str(Path(bpy.path.abspath(collection.library.filepath)).resolve()) + normalized_libpath = str(Path(bpy.path.abspath(str(libpath))).resolve()) + if collection_lib_path == normalized_libpath: + return collection + return None + + @staticmethod + def _collection_contains_object( + collection: bpy.types.Collection, object: bpy.types.Object + ) -> bool: + """Check if the collection contains the object.""" + for obj in collection.objects: + if obj == object: + return True + return False + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + + libpath = self.fname + asset = context["asset"]["name"] + subset = context["subset"]["name"] + lib_container = pype.blender.plugin.model_name(asset, subset) + container_name = pype.blender.plugin.model_name( + asset, subset, namespace + ) + relative = bpy.context.preferences.filepaths.use_relative_paths + + with bpy.data.libraries.load( + libpath, link=True, relative=relative + ) as (_, data_to): + data_to.collections = [lib_container] + + scene = bpy.context.scene + instance_empty = bpy.data.objects.new( + container_name, None + ) + if not instance_empty.get("avalon"): + instance_empty["avalon"] = dict() + avalon_info = instance_empty["avalon"] + avalon_info.update({"container_name": container_name}) + scene.collection.objects.link(instance_empty) + instance_empty.instance_type = 'COLLECTION' + container = bpy.data.collections[lib_container] + container.name = container_name + instance_empty.instance_collection = container + container.make_local() + avalon.blender.pipeline.containerise_existing( + container, + name, + namespace, + context, + self.__class__.__name__, + ) + + nodes = list(container.objects) + nodes.append(container) + nodes.append(instance_empty) + self[:] = nodes + return nodes + + def update(self, container: Dict, representation: Dict): + """Update the loaded asset. + + This will remove all objects of the current collection, load the new + ones and add them to the collection. + If the objects of the collection are used in another collection they + will not be removed, only unlinked. Normally this should not be the + case though. + + Warning: + No nested collections are supported at the moment! + """ + collection = bpy.data.collections.get( + container["objectName"] + ) + libpath = Path(api.get_representation_path(representation)) + extension = libpath.suffix.lower() + + logger.debug( + "Container: %s\nRepresentation: %s", + pformat(container, indent=2), + pformat(representation, indent=2), + ) + + assert collection, ( + f"The asset is not loaded: {container['objectName']}" + ) + assert not (collection.children), ( + "Nested collections are not supported." + ) + assert libpath, ( + "No existing library file found for {container['objectName']}" + ) + assert libpath.is_file(), ( + f"The file doesn't exist: {libpath}" + ) + assert extension in pype.blender.plugin.VALID_EXTENSIONS, ( + f"Unsupported file: {libpath}" + ) + collection_libpath = ( + self._get_library_from_container(collection).filepath + ) + normalized_collection_libpath = ( + str(Path(bpy.path.abspath(collection_libpath)).resolve()) + ) + normalized_libpath = ( + str(Path(bpy.path.abspath(str(libpath))).resolve()) + ) + logger.debug( + "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s", + normalized_collection_libpath, + normalized_libpath, + ) + if normalized_collection_libpath == normalized_libpath: + logger.info("Library already loaded, not updating...") + return + # Let Blender's garbage collection take care of removing the library + # itself after removing the objects. + objects_to_remove = set() + collection_objects = list() + collection_objects[:] = collection.objects + for obj in collection_objects: + # Unlink every object + collection.objects.unlink(obj) + remove_obj = True + for coll in [ + coll for coll in bpy.data.collections + if coll != collection + ]: + if ( + coll.objects and + self._collection_contains_object(coll, obj) + ): + remove_obj = False + if remove_obj: + objects_to_remove.add(obj) + + for obj in objects_to_remove: + # Only delete objects that are not used elsewhere + bpy.data.objects.remove(obj) + + instance_empties = [ + obj for obj in collection.users_dupli_group + if obj.name in collection.name + ] + if instance_empties: + instance_empty = instance_empties[0] + container_name = instance_empty["avalon"]["container_name"] + + relative = bpy.context.preferences.filepaths.use_relative_paths + with bpy.data.libraries.load( + str(libpath), link=True, relative=relative + ) as (_, data_to): + data_to.collections = [container_name] + + new_collection = self._get_lib_collection(container_name, libpath) + if new_collection is None: + raise ValueError( + "A matching collection '{container_name}' " + "should have been found in: {libpath}" + ) + + for obj in new_collection.objects: + collection.objects.link(obj) + bpy.data.collections.remove(new_collection) + # Update the representation on the collection + avalon_prop = collection[avalon.blender.pipeline.AVALON_PROPERTY] + avalon_prop["representation"] = str(representation["_id"]) + + def remove(self, container: Dict) -> bool: + """Remove an existing container from a Blender scene. + + Arguments: + container (avalon-core:container-1.0): Container to remove, + from `host.ls()`. + + Returns: + bool: Whether the container was deleted. + + Warning: + No nested collections are supported at the moment! + """ + collection = bpy.data.collections.get( + container["objectName"] + ) + if not collection: + return False + assert not (collection.children), ( + "Nested collections are not supported." + ) + instance_parents = list(collection.users_dupli_group) + instance_objects = list(collection.objects) + for obj in instance_objects + instance_parents: + bpy.data.objects.remove(obj) + bpy.data.collections.remove(collection) + + return True + + +class CacheModelLoader(pype.blender.AssetLoader): + """Load cache models. + + Stores the imported asset in a collection named after the asset. + + Note: + At least for now it only supports Alembic files. + """ + + families = ["model"] + representations = ["abc"] + + label = "Link Model" + icon = "code-fork" + color = "orange" + + def process_asset( + self, context: dict, name: str, namespace: Optional[str] = None, + options: Optional[Dict] = None + ) -> Optional[List]: + """ + Arguments: + name: Use pre-defined name + namespace: Use pre-defined namespace + context: Full parenthood of representation to load + options: Additional settings dictionary + """ + raise NotImplementedError("Loading of Alembic files is not yet implemented.") + # TODO (jasper): implement Alembic import. + + libpath = self.fname + asset = context["asset"]["name"] + subset = context["subset"]["name"] + # TODO (jasper): evaluate use of namespace which is 'alien' to Blender. + lib_container = container_name = ( + pype.blender.plugin.model_name(asset, subset, namespace) + ) + relative = bpy.context.preferences.filepaths.use_relative_paths + + with bpy.data.libraries.load( + libpath, link=True, relative=relative + ) as (data_from, data_to): + data_to.collections = [lib_container] + + scene = bpy.context.scene + instance_empty = bpy.data.objects.new( + container_name, None + ) + scene.collection.objects.link(instance_empty) + instance_empty.instance_type = 'COLLECTION' + collection = bpy.data.collections[lib_container] + collection.name = container_name + instance_empty.instance_collection = collection + + nodes = list(collection.objects) + nodes.append(collection) + nodes.append(instance_empty) + self[:] = nodes + return nodes diff --git a/pype/plugins/blender/publish/collect_current_file.py b/pype/plugins/blender/publish/collect_current_file.py new file mode 100644 index 0000000000..a097c72047 --- /dev/null +++ b/pype/plugins/blender/publish/collect_current_file.py @@ -0,0 +1,16 @@ +import bpy + +import pyblish.api + + +class CollectBlenderCurrentFile(pyblish.api.ContextPlugin): + """Inject the current working file into context""" + + order = pyblish.api.CollectorOrder - 0.5 + label = "Blender Current File" + hosts = ['blender'] + + def process(self, context): + """Inject the current working file""" + current_file = bpy.data.filepath + context.data['currentFile'] = current_file diff --git a/pype/plugins/blender/publish/collect_model.py b/pype/plugins/blender/publish/collect_model.py new file mode 100644 index 0000000000..ee10eaf7f2 --- /dev/null +++ b/pype/plugins/blender/publish/collect_model.py @@ -0,0 +1,53 @@ +import typing +from typing import Generator + +import bpy + +import avalon.api +import pyblish.api +from avalon.blender.pipeline import AVALON_PROPERTY + + +class CollectModel(pyblish.api.ContextPlugin): + """Collect the data of a model.""" + + hosts = ["blender"] + label = "Collect Model" + order = pyblish.api.CollectorOrder + + @staticmethod + def get_model_collections() -> Generator: + """Return all 'model' collections. + + Check if the family is 'model' and if it doesn't have the + representation set. If the representation is set, it is a loaded model + and we don't want to publish it. + """ + for collection in bpy.data.collections: + avalon_prop = collection.get(AVALON_PROPERTY) or dict() + if (avalon_prop.get('family') == 'model' + and not avalon_prop.get('representation')): + yield collection + + def process(self, context): + """Collect the models from the current Blender scene.""" + collections = self.get_model_collections() + for collection in collections: + avalon_prop = collection[AVALON_PROPERTY] + asset = avalon_prop['asset'] + family = avalon_prop['family'] + subset = avalon_prop['subset'] + task = avalon_prop['task'] + name = f"{asset}_{subset}" + instance = context.create_instance( + name=name, + family=family, + families=[family], + subset=subset, + asset=asset, + task=task, + ) + members = list(collection.objects) + members.append(collection) + instance[:] = members + self.log.debug(instance.data) diff --git a/pype/plugins/blender/publish/extract_model.py b/pype/plugins/blender/publish/extract_model.py new file mode 100644 index 0000000000..501c4d9d5c --- /dev/null +++ b/pype/plugins/blender/publish/extract_model.py @@ -0,0 +1,47 @@ +import os +import avalon.blender.workio + +import pype.api + + +class ExtractModel(pype.api.Extractor): + """Extract as model.""" + + label = "Model" + hosts = ["blender"] + families = ["model"] + optional = True + + def process(self, instance): + # Define extract output file path + + stagingdir = self.staging_dir(instance) + filename = f"{instance.name}.blend" + filepath = os.path.join(stagingdir, filename) + + # Perform extraction + self.log.info("Performing extraction..") + + # Just save the file to a temporary location. At least for now it's no + # problem to have (possibly) extra stuff in the file. + avalon.blender.workio.save_file(filepath, copy=True) + # + # # Store reference for integration + # if "files" not in instance.data: + # instance.data["files"] = list() + # + # # instance.data["files"].append(filename) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'blend', + 'ext': 'blend', + 'files': filename, + "stagingDir": stagingdir, + } + instance.data["representations"].append(representation) + + + self.log.info("Extracted instance '%s' to: %s", instance.name, representation) diff --git a/pype/plugins/blender/publish/validate_mesh_has_uv.py b/pype/plugins/blender/publish/validate_mesh_has_uv.py new file mode 100644 index 0000000000..b71a40ad8f --- /dev/null +++ b/pype/plugins/blender/publish/validate_mesh_has_uv.py @@ -0,0 +1,49 @@ +from typing import List + +import bpy + +import pyblish.api +import pype.blender.action + + +class ValidateMeshHasUvs(pyblish.api.InstancePlugin): + """Validate that the current mesh has UV's.""" + + order = pyblish.api.ValidatorOrder + hosts = ["blender"] + families = ["model"] + category = "geometry" + label = "Mesh Has UV's" + actions = [pype.blender.action.SelectInvalidAction] + optional = True + + @staticmethod + def has_uvs(obj: bpy.types.Object) -> bool: + """Check if an object has uv's.""" + if not obj.data.uv_layers: + return False + for uv_layer in obj.data.uv_layers: + for polygon in obj.data.polygons: + for loop_index in polygon.loop_indices: + if not uv_layer.data[loop_index].uv: + return False + + return True + + @classmethod + def get_invalid(cls, instance) -> List: + invalid = [] + # TODO (jasper): only check objects in the collection that will be published? + for obj in [ + obj for obj in bpy.data.objects if obj.type == 'MESH' + ]: + # Make sure we are in object mode. + bpy.ops.object.mode_set(mode='OBJECT') + if not cls.has_uvs(obj): + invalid.append(obj) + return invalid + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError(f"Meshes found in instance without valid UV's: {invalid}") diff --git a/pype/plugins/blender/publish/validate_mesh_no_negative_scale.py b/pype/plugins/blender/publish/validate_mesh_no_negative_scale.py new file mode 100644 index 0000000000..7e3b38dd19 --- /dev/null +++ b/pype/plugins/blender/publish/validate_mesh_no_negative_scale.py @@ -0,0 +1,35 @@ +from typing import List + +import bpy + +import pyblish.api +import pype.blender.action + + +class ValidateMeshNoNegativeScale(pyblish.api.Validator): + """Ensure that meshes don't have a negative scale.""" + + order = pyblish.api.ValidatorOrder + hosts = ["blender"] + families = ["model"] + label = "Mesh No Negative Scale" + actions = [pype.blender.action.SelectInvalidAction] + + @staticmethod + def get_invalid(instance) -> List: + invalid = [] + # TODO (jasper): only check objects in the collection that will be published? + for obj in [ + obj for obj in bpy.data.objects if obj.type == 'MESH' + ]: + if any(v < 0 for v in obj.scale): + invalid.append(obj) + + return invalid + + def process(self, instance): + invalid = self.get_invalid(instance) + if invalid: + raise RuntimeError( + f"Meshes found in instance with negative scale: {invalid}" + ) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_comments.py b/pype/plugins/ftrack/_unused_publish/integrate_ftrack_comments.py similarity index 92% rename from pype/plugins/ftrack/publish/integrate_ftrack_comments.py rename to pype/plugins/ftrack/_unused_publish/integrate_ftrack_comments.py index 9d0b7b3ab9..4be9f7fc3a 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_comments.py +++ b/pype/plugins/ftrack/_unused_publish/integrate_ftrack_comments.py @@ -7,8 +7,9 @@ class IntegrateFtrackComments(pyblish.api.InstancePlugin): """Create comments in Ftrack.""" order = pyblish.api.IntegratorOrder - label = "Integrate Comments to Ftrack." + label = "Integrate Comments to Ftrack" families = ["shot"] + enabled = False def process(self, instance): session = instance.context.data["ftrackSession"] diff --git a/pype/plugins/ftrack/publish/collect_ftrack_api.py b/pype/plugins/ftrack/publish/collect_ftrack_api.py index d09baec676..f79d74453b 100644 --- a/pype/plugins/ftrack/publish/collect_ftrack_api.py +++ b/pype/plugins/ftrack/publish/collect_ftrack_api.py @@ -23,25 +23,43 @@ class CollectFtrackApi(pyblish.api.ContextPlugin): # Collect session session = ftrack_api.Session() + self.log.debug("Ftrack user: \"{0}\"".format(session.api_user)) context.data["ftrackSession"] = session # Collect task - project = os.environ.get('AVALON_PROJECT', '') - asset = os.environ.get('AVALON_ASSET', '') - task = os.environ.get('AVALON_TASK', None) - self.log.debug(task) + project_name = os.environ.get('AVALON_PROJECT', '') + asset_name = os.environ.get('AVALON_ASSET', '') + task_name = os.environ.get('AVALON_TASK', None) + + # Find project entity + project_query = 'Project where full_name is "{0}"'.format(project_name) + self.log.debug("Project query: < {0} >".format(project_query)) + project_entity = session.query(project_query).one() + self.log.debug("Project found: {0}".format(project_entity)) + + # Find asset entity + entity_query = ( + 'TypedContext where project_id is "{0}"' + ' and name is "{1}"' + ).format(project_entity["id"], asset_name) + self.log.debug("Asset entity query: < {0} >".format(entity_query)) + asset_entity = session.query(entity_query).one() + self.log.debug("Asset found: {0}".format(asset_entity)) + + # Find task entity if task is set + if task_name: + task_query = ( + 'Task where name is "{0}" and parent_id is "{1}"' + ).format(task_name, asset_entity["id"]) + self.log.debug("Task entity query: < {0} >".format(task_query)) + task_entity = session.query(task_query).one() + self.log.debug("Task entity found: {0}".format(task_entity)) - if task: - result = session.query('Task where\ - project.full_name is "{0}" and\ - name is "{1}" and\ - parent.name is "{2}"'.format(project, task, asset)).one() - context.data["ftrackTask"] = result else: - result = session.query('TypedContext where\ - project.full_name is "{0}" and\ - name is "{1}"'.format(project, asset)).one() - context.data["ftrackEntity"] = result + task_entity = None + self.log.warning("Task name is not set.") - self.log.info(result) + context.data["ftrackProject"] = asset_entity + context.data["ftrackEntity"] = asset_entity + context.data["ftrackTask"] = task_entity diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_api.py b/pype/plugins/ftrack/publish/integrate_ftrack_api.py index c51685f84d..cd94b2a150 100644 --- a/pype/plugins/ftrack/publish/integrate_ftrack_api.py +++ b/pype/plugins/ftrack/publish/integrate_ftrack_api.py @@ -77,6 +77,7 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): info_msg = "Created new {entity_type} with data: {data}" info_msg += ", metadata: {metadata}." + used_asset_versions = [] # Iterate over components and publish for data in instance.data.get("ftrackComponentsList", []): @@ -148,6 +149,9 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): assetversion_cust_attrs = _assetversion_data.pop( "custom_attributes", {} ) + asset_version_comment = _assetversion_data.pop( + "comment", None + ) assetversion_data.update(_assetversion_data) assetversion_entity = session.query( @@ -185,6 +189,20 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): existing_assetversion_metadata.update(assetversion_metadata) assetversion_entity["metadata"] = existing_assetversion_metadata + # Add comment + if asset_version_comment: + assetversion_entity["comment"] = asset_version_comment + try: + session.commit() + except Exception: + session.rollback() + self.log.warning(( + "Comment was not possible to set for AssetVersion" + "\"{0}\". Can't set it's value to: \"{1}\"" + ).format( + assetversion_entity["id"], str(asset_version_comment) + )) + # Adding Custom Attributes for attr, val in assetversion_cust_attrs.items(): if attr in assetversion_entity["custom_attributes"]: @@ -369,3 +387,14 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin): tp, value, tb = sys.exc_info() session.rollback() six.reraise(tp, value, tb) + + if assetversion_entity not in used_asset_versions: + used_asset_versions.append(assetversion_entity) + + asset_versions_key = "ftrackIntegratedAssetVersions" + if asset_versions_key not in instance.data: + instance.data[asset_versions_key] = [] + + for asset_version in used_asset_versions: + if asset_version not in instance.data[asset_versions_key]: + instance.data[asset_versions_key].append(asset_version) diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_note.py b/pype/plugins/ftrack/publish/integrate_ftrack_note.py new file mode 100644 index 0000000000..f7fb5addbb --- /dev/null +++ b/pype/plugins/ftrack/publish/integrate_ftrack_note.py @@ -0,0 +1,51 @@ +import sys +import pyblish.api +import six + + +class IntegrateFtrackNote(pyblish.api.InstancePlugin): + """Create comments in Ftrack.""" + + # Must be after integrate asset new + order = pyblish.api.IntegratorOrder + 0.4999 + label = "Integrate Ftrack note" + families = ["ftrack"] + optional = True + + def process(self, instance): + comment = (instance.context.data.get("comment") or "").strip() + if not comment: + self.log.info("Comment is not set.") + return + + self.log.debug("Comment is set to {}".format(comment)) + + asset_versions_key = "ftrackIntegratedAssetVersions" + asset_versions = instance.data.get(asset_versions_key) + if not asset_versions: + self.log.info("There are any integrated AssetVersions") + return + + session = instance.context.data["ftrackSession"] + user = session.query( + "User where username is \"{}\"".format(session.api_user) + ).first() + if not user: + self.log.warning( + "Was not able to query current User {}".format( + session.api_user + ) + ) + + for asset_version in asset_versions: + asset_version.create_note(comment, author=user) + + try: + session.commit() + self.log.debug("Note added to AssetVersion \"{}\"".format( + str(asset_version) + )) + except Exception: + tp, value, tb = sys.exc_info() + session.rollback() + six.reraise(tp, value, tb) diff --git a/pype/plugins/ftrack/publish/integrate_remove_components.py b/pype/plugins/ftrack/publish/integrate_remove_components.py index bad50f7200..26cac0f1ae 100644 --- a/pype/plugins/ftrack/publish/integrate_remove_components.py +++ b/pype/plugins/ftrack/publish/integrate_remove_components.py @@ -11,13 +11,13 @@ class IntegrateCleanComponentData(pyblish.api.InstancePlugin): label = 'Clean component data' families = ["ftrack"] optional = True - active = True + active = False def process(self, instance): for comp in instance.data['representations']: self.log.debug('component {}'.format(comp)) - + if "%" in comp['published_path'] or "#" in comp['published_path']: continue diff --git a/pype/plugins/global/publish/collect_comment.py b/pype/plugins/global/publish/collect_comment.py index 22970665a1..062142ace9 100644 --- a/pype/plugins/global/publish/collect_comment.py +++ b/pype/plugins/global/publish/collect_comment.py @@ -15,4 +15,5 @@ class CollectComment(pyblish.api.ContextPlugin): order = pyblish.api.CollectorOrder def process(self, context): - context.data["comment"] = "" + comment = (context.data.get("comment") or "").strip() + context.data["comment"] = comment diff --git a/pype/plugins/global/publish/collect_datetime_data.py b/pype/plugins/global/publish/collect_datetime_data.py new file mode 100644 index 0000000000..f04f924e18 --- /dev/null +++ b/pype/plugins/global/publish/collect_datetime_data.py @@ -0,0 +1,18 @@ +"""These data *must* be collected only once during publishing process. + +Provides: + context -> datetimeData +""" + +import pyblish.api +from pypeapp import config + + +class CollectDateTimeData(pyblish.api.ContextPlugin): + order = pyblish.api.CollectorOrder + label = "Collect DateTime data" + + def process(self, context): + key = "datetimeData" + if key not in context.data: + context.data[key] = config.get_datetime_data() diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py index 1214657856..6c06229304 100644 --- a/pype/plugins/global/publish/collect_filesequences.py +++ b/pype/plugins/global/publish/collect_filesequences.py @@ -12,7 +12,6 @@ import os import re import copy import json -from pprint import pformat import pyblish.api from avalon import api @@ -91,13 +90,22 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): """ - order = pyblish.api.CollectorOrder + order = pyblish.api.CollectorOrder - 0.0001 targets = ["filesequence"] label = "RenderedFrames" def process(self, context): pixel_aspect = 1 + resolution_width = 1920 + resolution_height = 1080 lut_path = None + slate_frame = None + families_data = None + baked_mov_path = None + subset = None + version = None + frame_start = 0 + frame_end = 0 if os.environ.get("PYPE_PUBLISH_PATHS"): paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep) self.log.info("Collecting paths: {}".format(paths)) @@ -123,6 +131,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): cwd = os.path.dirname(path) root_override = data.get("root") + frame_start = int(data.get("frameStart")) + frame_end = int(data.get("frameEnd")) + subset = data.get("subset") + if root_override: if os.path.isabs(root_override): root = root_override @@ -146,13 +158,16 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): os.environ.update(session) instance = metadata.get("instance") if instance: - # here is the place to add ability for nuke noninteractive - # ______________________________________ instance_family = instance.get("family") pixel_aspect = instance.get("pixelAspect", 1) resolution_width = instance.get("resolutionWidth", 1920) resolution_height = instance.get("resolutionHeight", 1080) lut_path = instance.get("lutPath", None) + baked_mov_path = instance.get("bakeRenderPath") + families_data = instance.get("families") + slate_frame = instance.get("slateFrame") + version = instance.get("version") + else: # Search in directory @@ -160,35 +175,40 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): root = path self.log.info("Collecting: {}".format(root)) + regex = data.get("regex") + if baked_mov_path: + regex = "^{}.*$".format(subset) + if regex: self.log.info("Using regex: {}".format(regex)) + if "slate" in families_data: + frame_start -= 1 + collections, remainder = collect( root=root, regex=regex, exclude_regex=data.get("exclude_regex"), - frame_start=data.get("frameStart"), - frame_end=data.get("frameEnd"), + frame_start=frame_start, + frame_end=frame_end, ) self.log.info("Found collections: {}".format(collections)) - - """ - if data.get("subset"): - # If subset is provided for this json then it must be a single - # collection. - if len(collections) > 1: - self.log.error("Forced subset can only work with a single " - "found sequence") - raise RuntimeError("Invalid sequence") - """ + self.log.info("Found remainder: {}".format(remainder)) fps = data.get("fps", 25) + # adding publish comment and intent to context + context.data["comment"] = data.get("comment", "") + context.data["intent"] = data.get("intent", "") + if data.get("user"): context.data["user"] = data["user"] + if data.get("version"): + version = data.get("version") + # Get family from the data families = data.get("families", ["render"]) if "render" not in families: @@ -197,6 +217,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): families.append("ftrack") if "write" in instance_family: families.append("write") + if families_data and "slate" in families_data: + families.append("slate") if data.get("attachTo"): # we need to attach found collections to existing @@ -217,11 +239,13 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): "asset": data.get( "asset", api.Session["AVALON_ASSET"]), "stagingDir": root, - "frameStart": data.get("frameStart"), - "frameEnd": data.get("frameEnd"), + "frameStart": frame_start, + "frameEnd": frame_end, "fps": fps, "source": data.get("source", ""), - "pixelAspect": pixel_aspect + "pixelAspect": pixel_aspect, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height }) if "representations" not in instance.data: @@ -246,31 +270,47 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): instance.data["representations"].append( representation) - elif data.get("subset"): + elif subset: # if we have subset - add all collections and known # reminder as representations + # take out review family if mov path + # this will make imagesequence none review + + if baked_mov_path: + self.log.info( + "Baked mov is available {}".format( + baked_mov_path)) + families.append("review") + + if session['AVALON_APP'] == "maya": + families.append("review") + self.log.info( "Adding representations to subset {}".format( - data.get("subset"))) + subset)) - instance = context.create_instance(data.get("subset")) + instance = context.create_instance(subset) data = copy.deepcopy(data) instance.data.update( { - "name": data.get("subset"), + "name": subset, "family": families[0], "families": list(families), - "subset": data.get("subset"), + "subset": subset, "asset": data.get( "asset", api.Session["AVALON_ASSET"]), "stagingDir": root, - "frameStart": data.get("frameStart"), - "frameEnd": data.get("frameEnd"), + "frameStart": frame_start, + "frameEnd": frame_end, "fps": fps, "source": data.get("source", ""), "pixelAspect": pixel_aspect, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "slateFrame": slate_frame, + "version": version } ) @@ -282,31 +322,53 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): ext = collection.tail.lstrip(".") + if "slate" in instance.data["families"]: + frame_start += 1 + representation = { "name": ext, "ext": "{}".format(ext), "files": list(collection), + "frameStart": frame_start, + "frameEnd": frame_end, "stagingDir": root, "anatomy_template": "render", "fps": fps, - "tags": ["review"], + "tags": ["review"] if not baked_mov_path else [], } instance.data["representations"].append( representation) + # filter out only relevant mov in case baked available + self.log.debug("__ remainder {}".format(remainder)) + if baked_mov_path: + remainder = [r for r in remainder + if r in baked_mov_path] + self.log.debug("__ remainder {}".format(remainder)) + # process reminders for rem in remainder: # add only known types to representation if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']: self.log.info(" . {}".format(rem)) + + if "slate" in instance.data["families"]: + frame_start += 1 + + tags = ["review"] + + if baked_mov_path: + tags.append("delete") + representation = { "name": rem.split(".")[-1], "ext": "{}".format(rem.split(".")[-1]), "files": rem, "stagingDir": root, + "frameStart": frame_start, "anatomy_template": "render", "fps": fps, - "tags": ["review"], + "tags": tags } instance.data["representations"].append( representation) @@ -348,6 +410,9 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): "fps": fps, "source": data.get("source", ""), "pixelAspect": pixel_aspect, + "resolutionWidth": resolution_width, + "resolutionHeight": resolution_height, + "version": version } ) if lut_path: @@ -363,9 +428,26 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin): "name": ext, "ext": "{}".format(ext), "files": list(collection), + "frameStart": start, + "frameEnd": end, "stagingDir": root, "anatomy_template": "render", "fps": fps, "tags": ["review"], } instance.data["representations"].append(representation) + + # temporary ... allow only beauty on ftrack + if session['AVALON_APP'] == "maya": + AOV_filter = ['beauty'] + for aov in AOV_filter: + if aov not in instance.data['subset']: + instance.data['families'].remove('review') + instance.data['families'].remove('ftrack') + representation["tags"].remove('review') + + self.log.debug( + "__ representations {}".format( + instance.data["representations"])) + self.log.debug( + "__ instance.data {}".format(instance.data)) diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py index 48623eec22..3104b5b705 100644 --- a/pype/plugins/global/publish/collect_templates.py +++ b/pype/plugins/global/publish/collect_templates.py @@ -31,32 +31,44 @@ class CollectTemplates(pyblish.api.InstancePlugin): asset_name = instance.data["asset"] project_name = api.Session["AVALON_PROJECT"] - project = io.find_one({"type": "project", - "name": project_name}, - projection={"config": True, "data": True}) + project = io.find_one( + { + "type": "project", + "name": project_name + }, + projection={"config": True, "data": True} + ) template = project["config"]["template"]["publish"] anatomy = instance.context.data['anatomy'] - asset = io.find_one({"type": "asset", - "name": asset_name, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project["_id"] + }) assert asset, ("No asset found by the name '{}' " "in project '{}'".format(asset_name, project_name)) silo = asset.get('silo') - subset = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset["_id"]}) + subset = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset["_id"] + }) # assume there is no version yet, we start at `1` version = None version_number = 1 if subset is not None: - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) # if there is a subset there ought to be version if version is not None: @@ -66,6 +78,8 @@ class CollectTemplates(pyblish.api.InstancePlugin): if hierarchy: # hierarchy = os.path.sep.join(hierarchy) hierarchy = os.path.join(*hierarchy) + else: + hierarchy = "" template_data = {"root": api.Session["AVALON_PROJECTS"], "project": {"name": project_name, @@ -78,6 +92,21 @@ class CollectTemplates(pyblish.api.InstancePlugin): "hierarchy": hierarchy.replace("\\", "/"), "representation": "TEMP"} + # Add datetime data to template data + datetime_data = instance.context.data.get("datetimeData") or {} + template_data.update(datetime_data) + + resolution_width = instance.data.get("resolutionWidth") + resolution_height = instance.data.get("resolutionHeight") + fps = instance.data.get("fps") + + if resolution_width: + template_data["resolution_width"] = resolution_width + if resolution_width: + template_data["resolution_height"] = resolution_height + if resolution_width: + template_data["fps"] = fps + instance.data["template"] = template instance.data["assumedTemplateData"] = template_data diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index 06a62dd98b..e50ba891d2 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -4,6 +4,7 @@ import copy import pype.api import pyblish +from pypeapp import config class ExtractBurnin(pype.api.Extractor): @@ -15,7 +16,7 @@ class ExtractBurnin(pype.api.Extractor): `tags` including `burnin` """ - label = "Quicktime with burnins" + label = "Extract burnins" order = pyblish.api.ExtractorOrder + 0.03 families = ["review", "burnin"] hosts = ["nuke", "maya", "shell"] @@ -25,11 +26,8 @@ class ExtractBurnin(pype.api.Extractor): if "representations" not in instance.data: raise RuntimeError("Burnin needs already created mov to work on.") - # TODO: expand burnin data list to include all usefull keys - version = '' - if instance.context.data.get('version'): - version = "v" + str(instance.context.data['version']) - + version = instance.context.data.get( + 'version', instance.data.get('version')) frame_start = int(instance.data.get("frameStart") or 0) frame_end = int(instance.data.get("frameEnd") or 1) duration = frame_end - frame_start + 1 @@ -41,10 +39,31 @@ class ExtractBurnin(pype.api.Extractor): "frame_start": frame_start, "frame_end": frame_end, "duration": duration, - "version": version, - "comment": instance.context.data.get("comment"), - "intent": instance.context.data.get("intent") + "version": int(version), + "comment": instance.context.data.get("comment", ""), + "intent": instance.context.data.get("intent", "") } + + # Add datetime data to preparation data + datetime_data = instance.context.data.get("datetimeData") or {} + prep_data.update(datetime_data) + + slate_frame_start = frame_start + slate_frame_end = frame_end + slate_duration = duration + + # exception for slate workflow + if "slate" in instance.data["families"]: + slate_frame_start = frame_start - 1 + slate_frame_end = frame_end + slate_duration = slate_frame_end - slate_frame_start + 1 + + prep_data.update({ + "slate_frame_start": slate_frame_start, + "slate_frame_end": slate_frame_end, + "slate_duration": slate_duration + }) + # Update data with template data template_data = instance.data.get("assumedTemplateData") or {} prep_data.update(template_data) @@ -59,26 +78,39 @@ class ExtractBurnin(pype.api.Extractor): if "burnin" not in repre.get("tags", []): continue + is_sequence = "sequence" in repre.get("tags", []) + stagingdir = repre["stagingDir"] filename = "{0}".format(repre["files"]) + if is_sequence: + filename = repre["sequence_file"] + name = "_burnin" - movieFileBurnin = filename.replace(".mov", "") + name + ".mov" + ext = os.path.splitext(filename)[1] + movieFileBurnin = filename.replace(ext, "") + name + ext + + if is_sequence: + fn_splt = filename.split(".") + movieFileBurnin = ".".join( + ((fn_splt[0] + name), fn_splt[-2], fn_splt[-1])) + + self.log.debug("__ movieFileBurnin: `{}`".format(movieFileBurnin)) full_movie_path = os.path.join( - os.path.normpath(stagingdir), repre["files"] - ) + os.path.normpath(stagingdir), filename) full_burnin_path = os.path.join( - os.path.normpath(stagingdir), movieFileBurnin - ) + os.path.normpath(stagingdir), movieFileBurnin) + + self.log.debug("__ full_movie_path: {}".format(full_movie_path)) self.log.debug("__ full_burnin_path: {}".format(full_burnin_path)) # create copy of prep_data for anatomy formatting _prep_data = copy.deepcopy(prep_data) _prep_data["representation"] = repre["name"] - _prep_data["anatomy"] = ( - anatomy.format_all(_prep_data).get("solved") or {} - ) + filled_anatomy = anatomy.format_all(_prep_data) + _prep_data["anatomy"] = filled_anatomy.get_solved() + burnin_data = { "input": full_movie_path.replace("\\", "/"), "codec": repre.get("codec", []), @@ -125,15 +157,35 @@ class ExtractBurnin(pype.api.Extractor): self.log.debug("Output: {}".format(output)) repre_update = { + "anatomy_template": "render", "files": movieFileBurnin, "name": repre["name"], "tags": [x for x in repre["tags"] if x != "delete"] } + + if is_sequence: + burnin_seq_files = list() + for frame_index in range(_prep_data["duration"] + 1): + if frame_index == 0: + continue + burnin_seq_files.append(movieFileBurnin % frame_index) + repre_update.update({ + "files": burnin_seq_files + }) + instance.data["representations"][i].update(repre_update) # removing the source mov file - os.remove(full_movie_path) - self.log.debug("Removed: `{}`".format(full_movie_path)) + if is_sequence: + for frame_index in range(_prep_data["duration"] + 1): + if frame_index == 0: + continue + rm_file = full_movie_path % frame_index + os.remove(rm_file) + self.log.debug("Removed: `{}`".format(rm_file)) + else: + os.remove(full_movie_path) + self.log.debug("Removed: `{}`".format(full_movie_path)) # Remove any representations tagged for deletion. for repre in instance.data["representations"]: diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index 8a1a0b5e68..4978649ba2 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -6,7 +6,7 @@ import pype.api class ExtractJpegEXR(pyblish.api.InstancePlugin): - """Resolve any dependency issies + """Resolve any dependency issues This plug-in resolves any paths which, if not updated might break the published file. @@ -20,6 +20,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): hosts = ["shell"] order = pyblish.api.ExtractorOrder families = ["imagesequence", "render", "write", "source"] + enabled = False def process(self, instance): start = instance.data.get("frameStart") @@ -28,51 +29,74 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): collected_frames = os.listdir(stagingdir) collections, remainder = clique.assemble(collected_frames) - input_file = ( - collections[0].format('{head}{padding}{tail}') % start - ) - full_input_path = os.path.join(stagingdir, input_file) - self.log.info("input {}".format(full_input_path)) + self.log.info("subset {}".format(instance.data['subset'])) + if 'crypto' in instance.data['subset']: + return - filename = collections[0].format('{head}') - if not filename.endswith('.'): - filename += "." - jpegFile = filename + "jpg" - full_output_path = os.path.join(stagingdir, jpegFile) + # get representation and loop them + representations = instance.data["representations"] - self.log.info("output {}".format(full_output_path)) + # filter out mov and img sequences + representations_new = representations[:] - config_data = instance.context.data['output_repre_config'] + for repre in representations: + self.log.debug(repre) + if 'review' not in repre['tags']: + return - proj_name = os.environ.get('AVALON_PROJECT', '__default__') - profile = config_data.get(proj_name, config_data['__default__']) + input_file = repre['files'][0] - jpeg_items = [] - jpeg_items.append( - os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) - # override file if already exists - jpeg_items.append("-y") - # use same input args like with mov - jpeg_items.extend(profile.get('input', [])) - # input file - jpeg_items.append("-i {}".format(full_input_path)) - # output file - jpeg_items.append(full_output_path) + # input_file = ( + # collections[0].format('{head}{padding}{tail}') % start + # ) + full_input_path = os.path.join(stagingdir, input_file) + self.log.info("input {}".format(full_input_path)) - subprocess_jpeg = " ".join(jpeg_items) + filename = os.path.splitext(input_file)[0] + if not filename.endswith('.'): + filename += "." + jpeg_file = filename + "jpg" + full_output_path = os.path.join(stagingdir, jpeg_file) - # run subprocess - self.log.debug("{}".format(subprocess_jpeg)) - pype.api.subprocess(subprocess_jpeg) + self.log.info("output {}".format(full_output_path)) - if "representations" not in instance.data: - instance.data["representations"] = [] + config_data = instance.context.data['output_repre_config'] - representation = { - 'name': 'jpg', - 'ext': 'jpg', - 'files': jpegFile, - "stagingDir": stagingdir, - "thumbnail": True - } - instance.data["representations"].append(representation) + proj_name = os.environ.get('AVALON_PROJECT', '__default__') + profile = config_data.get(proj_name, config_data['__default__']) + + jpeg_items = [] + jpeg_items.append( + os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) + # override file if already exists + jpeg_items.append("-y") + # use same input args like with mov + jpeg_items.extend(profile.get('input', [])) + # input file + jpeg_items.append("-i {}".format(full_input_path)) + # output file + jpeg_items.append(full_output_path) + + subprocess_jpeg = " ".join(jpeg_items) + + # run subprocess + self.log.debug("{}".format(subprocess_jpeg)) + pype.api.subprocess(subprocess_jpeg) + + if "representations" not in instance.data: + instance.data["representations"] = [] + + representation = { + 'name': 'thumbnail', + 'ext': 'jpg', + 'files': jpeg_file, + "stagingDir": stagingdir, + "thumbnail": True, + "tags": ['thumbnail'] + } + + # adding representation + self.log.debug("Adding: {}".format(representation)) + representations_new.append(representation) + + instance.data["representations"] = representations_new diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 4eb7fa16ed..2e79d86c38 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -32,13 +32,13 @@ class ExtractReview(pyblish.api.InstancePlugin): inst_data = instance.data fps = inst_data.get("fps") start_frame = inst_data.get("frameStart") - resolution_width = instance.data.get("resolutionWidth", to_width) - resolution_height = instance.data.get("resolutionHeight", to_height) - pixel_aspect = instance.data.get("pixelAspect", 1) - self.log.debug("Families In: `{}`".format(instance.data["families"])) + resolution_width = inst_data.get("resolutionWidth", to_width) + resolution_height = inst_data.get("resolutionHeight", to_height) + pixel_aspect = inst_data.get("pixelAspect", 1) + self.log.debug("Families In: `{}`".format(inst_data["families"])) # get representation and loop them - representations = instance.data["representations"] + representations = inst_data["representations"] # filter out mov and img sequences representations_new = representations[:] @@ -46,21 +46,39 @@ class ExtractReview(pyblish.api.InstancePlugin): if repre['ext'] in self.ext_filter: tags = repre.get("tags", []) + if "thumbnail" in tags: + continue + self.log.info("Try repre: {}".format(repre)) if "review" in tags: staging_dir = repre["stagingDir"] + + # iterating preset output profiles for name, profile in output_profiles.items(): + repre_new = repre.copy() + ext = profile.get("ext", None) + p_tags = profile.get('tags', []) + self.log.info("p_tags: `{}`".format(p_tags)) + + # adding control for presets to be sequence + # or single file + is_sequence = ("sequence" in p_tags) and (ext in ( + "png", "jpg", "jpeg")) + self.log.debug("Profile name: {}".format(name)) - ext = profile.get("ext", None) if not ext: ext = "mov" self.log.warning( - "`ext` attribute not in output profile. Setting to default ext: `mov`") + str("`ext` attribute not in output " + "profile. Setting to default ext: `mov`")) - self.log.debug("instance.families: {}".format(instance.data['families'])) - self.log.debug("profile.families: {}".format(profile['families'])) + self.log.debug( + "instance.families: {}".format( + instance.data['families'])) + self.log.debug( + "profile.families: {}".format(profile['families'])) if any(item in instance.data['families'] for item in profile['families']): if isinstance(repre["files"], list): @@ -81,18 +99,22 @@ class ExtractReview(pyblish.api.InstancePlugin): filename = repre["files"].split(".")[0] repr_file = filename + "_{0}.{1}".format(name, ext) - full_output_path = os.path.join( staging_dir, repr_file) + if is_sequence: + filename_base = filename + "_{0}".format(name) + repr_file = filename_base + ".%08d.{0}".format( + ext) + repre_new["sequence_file"] = repr_file + full_output_path = os.path.join( + staging_dir, filename_base, repr_file) + self.log.info("input {}".format(full_input_path)) self.log.info("output {}".format(full_output_path)) - repre_new = repre.copy() - new_tags = [x for x in tags if x != "delete"] - p_tags = profile.get('tags', []) - self.log.info("p_tags: `{}`".format(p_tags)) + # add families [instance.data["families"].append(t) for t in p_tags @@ -115,8 +137,9 @@ class ExtractReview(pyblish.api.InstancePlugin): # necessary input data # adds start arg only if image sequence if isinstance(repre["files"], list): - input_args.append("-start_number {0} -framerate {1}".format( - start_frame, fps)) + input_args.append( + "-start_number {0} -framerate {1}".format( + start_frame, fps)) input_args.append("-i {}".format(full_input_path)) @@ -180,14 +203,19 @@ class ExtractReview(pyblish.api.InstancePlugin): ffmpet_height = int( resolution_height * pixel_aspect) else: - # TODO: it might still be failing in some cases if resolution_ratio != delivery_ratio: lb /= scale_factor else: lb /= pixel_aspect - output_args.append( - "-filter:v scale={0}x{1}:flags=lanczos,setsar=1,drawbox=0:0:iw:round((ih-(iw*(1/{2})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{2})))/2):iw:round((ih-(iw*(1/{2})))/2):t=fill:c=black".format(ffmpet_width, ffmpet_height, lb)) + output_args.append(str( + "-filter:v scale={0}x{1}:flags=lanczos," + "setsar=1,drawbox=0:0:iw:" + "round((ih-(iw*(1/{2})))/2):t=fill:" + "c=black,drawbox=0:ih-round((ih-(iw*(" + "1/{2})))/2):iw:round((ih-(iw*(1/{2})))" + "/2):t=fill:c=black").format( + ffmpet_width, ffmpet_height, lb)) # In case audio is longer than video. output_args.append("-shortest") @@ -195,9 +223,14 @@ class ExtractReview(pyblish.api.InstancePlugin): # output filename output_args.append(full_output_path) - self.log.debug("__ pixel_aspect: `{}`".format(pixel_aspect)) - self.log.debug("__ resolution_width: `{}`".format(resolution_width)) - self.log.debug("__ resolution_height: `{}`".format(resolution_height)) + self.log.debug( + "__ pixel_aspect: `{}`".format(pixel_aspect)) + self.log.debug( + "__ resolution_width: `{}`".format( + resolution_width)) + self.log.debug( + "__ resolution_height: `{}`".format( + resolution_height)) # scaling none square pixels and 1920 width if "reformat" in p_tags: @@ -212,22 +245,34 @@ class ExtractReview(pyblish.api.InstancePlugin): self.log.debug("heigher then delivery") width_scale = to_width width_half_pad = 0 - scale_factor = float(to_width) / float(resolution_width) + scale_factor = float(to_width) / float( + resolution_width) self.log.debug(scale_factor) height_scale = int( resolution_height * scale_factor) height_half_pad = int( (to_height - height_scale)/2) - self.log.debug("__ width_scale: `{}`".format(width_scale)) - self.log.debug("__ width_half_pad: `{}`".format(width_half_pad)) - self.log.debug("__ height_scale: `{}`".format(height_scale)) - self.log.debug("__ height_half_pad: `{}`".format(height_half_pad)) + self.log.debug( + "__ width_scale: `{}`".format(width_scale)) + self.log.debug( + "__ width_half_pad: `{}`".format( + width_half_pad)) + self.log.debug( + "__ height_scale: `{}`".format( + height_scale)) + self.log.debug( + "__ height_half_pad: `{}`".format( + height_half_pad)) - - scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format( - width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad - ) + scaling_arg = str( + "scale={0}x{1}:flags=lanczos," + "pad={2}:{3}:{4}:{5}:black,setsar=1" + ).format(width_scale, height_scale, + to_width, to_height, + width_half_pad, + height_half_pad + ) vf_back = self.add_video_filter_args( output_args, scaling_arg) @@ -255,7 +300,16 @@ class ExtractReview(pyblish.api.InstancePlugin): # add it to output_args output_args.insert(0, vf_back) self.log.info("Added Lut to ffmpeg command") - self.log.debug("_ output_args: `{}`".format(output_args)) + self.log.debug( + "_ output_args: `{}`".format(output_args)) + + if is_sequence: + stg_dir = os.path.dirname(full_output_path) + + if not os.path.exists(stg_dir): + self.log.debug( + "creating dir: {}".format(stg_dir)) + os.mkdir(stg_dir) mov_args = [ os.path.join( @@ -279,8 +333,17 @@ class ExtractReview(pyblish.api.InstancePlugin): 'files': repr_file, "tags": new_tags, "outputName": name, - "codec": codec_args + "codec": codec_args, + "_profile": profile, + "resolutionHeight": resolution_height, + "resolutionWidth": resolution_width, }) + if is_sequence: + repre_new.update({ + "stagingDir": stg_dir, + "files": os.listdir(stg_dir) + }) + if repre_new.get('preview'): repre_new.pop("preview") if repre_new.get('thumbnail'): diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py new file mode 100644 index 0000000000..9a720b77a9 --- /dev/null +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -0,0 +1,243 @@ +import os +import pype.api +import pyblish + + +class ExtractReviewSlate(pype.api.Extractor): + """ + Will add slate frame at the start of the video files + """ + + label = "Review with Slate frame" + order = pyblish.api.ExtractorOrder + 0.031 + families = ["slate"] + hosts = ["nuke", "maya", "shell"] + optional = True + + def process(self, instance): + inst_data = instance.data + if "representations" not in inst_data: + raise RuntimeError("Burnin needs already created mov to work on.") + + suffix = "_slate" + slate_path = inst_data.get("slateFrame") + ffmpeg_path = os.path.join(os.environ.get("FFMPEG_PATH", ""), "ffmpeg") + + to_width = 1920 + to_height = 1080 + resolution_width = inst_data.get("resolutionWidth", to_width) + resolution_height = inst_data.get("resolutionHeight", to_height) + pixel_aspect = inst_data.get("pixelAspect", 1) + fps = inst_data.get("fps") + + # defining image ratios + resolution_ratio = float(resolution_width / ( + resolution_height * pixel_aspect)) + delivery_ratio = float(to_width) / float(to_height) + self.log.debug(resolution_ratio) + self.log.debug(delivery_ratio) + + # get scale factor + scale_factor = to_height / ( + resolution_height * pixel_aspect) + self.log.debug(scale_factor) + + for i, repre in enumerate(inst_data["representations"]): + _remove_at_end = [] + self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre)) + + p_tags = repre.get("tags", []) + + if "slate-frame" not in p_tags: + continue + + stagingdir = repre["stagingDir"] + input_file = "{0}".format(repre["files"]) + + ext = os.path.splitext(input_file)[1] + output_file = input_file.replace(ext, "") + suffix + ext + + input_path = os.path.join( + os.path.normpath(stagingdir), repre["files"]) + self.log.debug("__ input_path: {}".format(input_path)) + _remove_at_end.append(input_path) + + output_path = os.path.join( + os.path.normpath(stagingdir), output_file) + self.log.debug("__ output_path: {}".format(output_path)) + + input_args = [] + output_args = [] + # overrides output file + input_args.append("-y") + # preset's input data + input_args.extend(repre["_profile"].get('input', [])) + input_args.append("-loop 1 -i {}".format(slate_path)) + input_args.extend([ + "-r {}".format(fps), + "-t 0.04"] + ) + + # output args + codec_args = repre["_profile"].get('codec', []) + output_args.extend(codec_args) + # preset's output data + output_args.extend(repre["_profile"].get('output', [])) + + # make sure colors are correct + output_args.extend([ + "-vf scale=out_color_matrix=bt709", + "-color_primaries bt709", + "-color_trc bt709", + "-colorspace bt709" + ]) + + # scaling none square pixels and 1920 width + if "reformat" in p_tags: + if resolution_ratio < delivery_ratio: + self.log.debug("lower then delivery") + width_scale = int(to_width * scale_factor) + width_half_pad = int(( + to_width - width_scale)/2) + height_scale = to_height + height_half_pad = 0 + else: + self.log.debug("heigher then delivery") + width_scale = to_width + width_half_pad = 0 + scale_factor = float(to_width) / float(resolution_width) + self.log.debug(scale_factor) + height_scale = int( + resolution_height * scale_factor) + height_half_pad = int( + (to_height - height_scale)/2) + + self.log.debug( + "__ width_scale: `{}`".format(width_scale)) + self.log.debug( + "__ width_half_pad: `{}`".format(width_half_pad)) + self.log.debug( + "__ height_scale: `{}`".format(height_scale)) + self.log.debug( + "__ height_half_pad: `{}`".format(height_half_pad)) + + scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format( + width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad + ) + + vf_back = self.add_video_filter_args( + output_args, scaling_arg) + # add it to output_args + output_args.insert(0, vf_back) + + slate_v_path = slate_path.replace(".png", ext) + output_args.append(slate_v_path) + _remove_at_end.append(slate_v_path) + + slate_args = [ + ffmpeg_path, + " ".join(input_args), + " ".join(output_args) + ] + slate_subprcs_cmd = " ".join(slate_args) + + # run slate generation subprocess + self.log.debug("Slate Executing: {}".format(slate_subprcs_cmd)) + slate_output = pype.api.subprocess(slate_subprcs_cmd) + self.log.debug("Slate Output: {}".format(slate_output)) + + # create ffmpeg concat text file path + conc_text_file = input_file.replace(ext, "") + "_concat" + ".txt" + conc_text_path = os.path.join( + os.path.normpath(stagingdir), conc_text_file) + _remove_at_end.append(conc_text_path) + self.log.debug("__ conc_text_path: {}".format(conc_text_path)) + + new_line = "\n" + with open(conc_text_path, "w") as conc_text_f: + conc_text_f.writelines([ + "file {}".format( + slate_v_path.replace("\\", "/")), + new_line, + "file {}".format(input_path.replace("\\", "/")) + ]) + + # concat slate and videos together + conc_input_args = ["-y", "-f concat", "-safe 0"] + conc_input_args.append("-i {}".format(conc_text_path)) + + conc_output_args = ["-c copy"] + conc_output_args.append(output_path) + + concat_args = [ + ffmpeg_path, + " ".join(conc_input_args), + " ".join(conc_output_args) + ] + concat_subprcs_cmd = " ".join(concat_args) + + # ffmpeg concat subprocess + self.log.debug("Executing concat: {}".format(concat_subprcs_cmd)) + concat_output = pype.api.subprocess(concat_subprcs_cmd) + self.log.debug("Output concat: {}".format(concat_output)) + + self.log.debug("__ repre[tags]: {}".format(repre["tags"])) + repre_update = { + "files": output_file, + "name": repre["name"], + "tags": [x for x in repre["tags"] if x != "delete"] + } + inst_data["representations"][i].update(repre_update) + self.log.debug( + "_ representation {}: `{}`".format( + i, inst_data["representations"][i])) + + # removing temp files + for f in _remove_at_end: + os.remove(f) + self.log.debug("Removed: `{}`".format(f)) + + # Remove any representations tagged for deletion. + for repre in inst_data.get("representations", []): + if "delete" in repre.get("tags", []): + self.log.debug("Removing representation: {}".format(repre)) + inst_data["representations"].remove(repre) + + self.log.debug(inst_data["representations"]) + + def add_video_filter_args(self, args, inserting_arg): + """ + Fixing video filter argumets to be one long string + + Args: + args (list): list of string arguments + inserting_arg (str): string argument we want to add + (without flag `-vf`) + + Returns: + str: long joined argument to be added back to list of arguments + + """ + # find all video format settings + vf_settings = [p for p in args + for v in ["-filter:v", "-vf"] + if v in p] + self.log.debug("_ vf_settings: `{}`".format(vf_settings)) + + # remove them from output args list + for p in vf_settings: + self.log.debug("_ remove p: `{}`".format(p)) + args.remove(p) + self.log.debug("_ args: `{}`".format(args)) + + # strip them from all flags + vf_fixed = [p.replace("-vf ", "").replace("-filter:v ", "") + for p in vf_settings] + + self.log.debug("_ vf_fixed: `{}`".format(vf_fixed)) + vf_fixed.insert(0, inserting_arg) + self.log.debug("_ vf_fixed: `{}`".format(vf_fixed)) + # create new video filter setting + vf_back = "-vf " + ",".join(vf_fixed) + + return vf_back diff --git a/pype/plugins/global/publish/integrate.py b/pype/plugins/global/publish/integrate.py index 59e05ee2aa..87b9e1a9bd 100644 --- a/pype/plugins/global/publish/integrate.py +++ b/pype/plugins/global/publish/integrate.py @@ -24,7 +24,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin): label = "Integrate Asset" order = pyblish.api.IntegratorOrder - families = ["assembly"] + families = [] exclude_families = ["clip"] def process(self, instance): @@ -84,9 +84,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin): project = io.find_one({"type": "project"}) - asset = io.find_one({"type": "asset", - "name": ASSET, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": ASSET, + "parent": project["_id"] + }) assert all([project, asset]), ("Could not find current project or " "asset '%s'" % ASSET) @@ -94,10 +96,14 @@ class IntegrateAsset(pyblish.api.InstancePlugin): subset = self.get_subset(asset, instance) # get next version - latest_version = io.find_one({"type": "version", - "parent": subset["_id"]}, - {"name": True}, - sort=[("name", -1)]) + latest_version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + {"name": True}, + sort=[("name", -1)] + ) next_version = 1 if latest_version is not None: @@ -318,9 +324,11 @@ class IntegrateAsset(pyblish.api.InstancePlugin): def get_subset(self, asset, instance): - subset = io.find_one({"type": "subset", - "parent": asset["_id"], - "name": instance.data["subset"]}) + subset = io.find_one({ + "type": "subset", + "parent": asset["_id"], + "name": instance.data["subset"] + }) if subset is None: subset_name = instance.data["subset"] diff --git a/pype/plugins/global/publish/integrate_assumed_destination.py b/pype/plugins/global/publish/integrate_assumed_destination.py index a26529fc2c..d090e2711a 100644 --- a/pype/plugins/global/publish/integrate_assumed_destination.py +++ b/pype/plugins/global/publish/integrate_assumed_destination.py @@ -82,31 +82,40 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin): project_name = api.Session["AVALON_PROJECT"] a_template = anatomy.templates - project = io.find_one({"type": "project", - "name": project_name}, - projection={"config": True, "data": True}) + project = io.find_one( + {"type": "project", "name": project_name}, + projection={"config": True, "data": True} + ) template = a_template['publish']['path'] # anatomy = instance.context.data['anatomy'] - asset = io.find_one({"type": "asset", - "name": asset_name, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project["_id"] + }) assert asset, ("No asset found by the name '{}' " "in project '{}'".format(asset_name, project_name)) - subset = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset["_id"]}) + subset = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset["_id"] + }) # assume there is no version yet, we start at `1` version = None version_number = 1 if subset is not None: - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) # if there is a subset there ought to be version if version is not None: diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index c78e9c6442..7d95534897 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -7,7 +7,7 @@ import errno import pyblish.api from avalon import api, io from avalon.vendor import filelink -from pathlib2 import Path + # this is needed until speedcopy for linux is fixed if sys.platform == "win32": from speedcopy import copyfile @@ -76,8 +76,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "source", "matchmove", "image" + "source", + "assembly" ] exclude_families = ["clip"] + db_representation_context_keys = [ + "project", "asset", "task", "subset", "version", "representation", + "family", "hierarchy", "task", "username" + ] def process(self, instance): @@ -153,9 +159,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): io.install() project = io.find_one({"type": "project"}) - asset = io.find_one({"type": "asset", - "name": ASSET, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": ASSET, + "parent": project["_id"] + }) assert all([project, asset]), ("Could not find current project or " "asset '%s'" % ASSET) @@ -163,10 +171,14 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): subset = self.get_subset(asset, instance) # get next version - latest_version = io.find_one({"type": "version", - "parent": subset["_id"]}, - {"name": True}, - sort=[("name", -1)]) + latest_version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + {"name": True}, + sort=[("name", -1)] + ) next_version = 1 if latest_version is not None: @@ -175,16 +187,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if instance.data.get('version'): next_version = int(instance.data.get('version')) - # self.log.info("Verifying version from assumed destination") - - # assumed_data = instance.data["assumedTemplateData"] - # assumed_version = assumed_data["version"] - # if assumed_version != next_version: - # raise AttributeError("Assumed version 'v{0:03d}' does not match" - # "next version in database " - # "('v{1:03d}')".format(assumed_version, - # next_version)) - self.log.debug("Next version: v{0:03d}".format(next_version)) version_data = self.create_version_data(context, instance) @@ -270,6 +272,21 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "version": int(version["name"]), "hierarchy": hierarchy} + # Add datetime data to template data + datetime_data = context.data.get("datetimeData") or {} + template_data.update(datetime_data) + + resolution_width = repre.get("resolutionWidth") + resolution_height = repre.get("resolutionHeight") + fps = instance.data.get("fps") + + if resolution_width: + template_data["resolution_width"] = resolution_width + if resolution_width: + template_data["resolution_height"] = resolution_height + if resolution_width: + template_data["fps"] = fps + files = repre['files'] if repre.get('stagingDir'): stagingdir = repre['stagingDir'] @@ -279,7 +296,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): anatomy.templates[template_name]["path"]) sequence_repre = isinstance(files, list) - + repre_context = None if sequence_repre: src_collections, remainder = clique.assemble(files) self.log.debug( @@ -302,10 +319,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): template_data["representation"] = repre['ext'] template_data["frame"] = src_padding_exp % i anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled[template_name]["path"] + if repre_context is None: + repre_context = template_filled.used_values test_dest_files.append( - os.path.normpath( - anatomy_filled[template_name]["path"]) + os.path.normpath(template_filled) ) self.log.debug( @@ -319,19 +338,18 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): index_frame_start = None if repre.get("frameStart"): - frame_start_padding = len(str( - repre.get("frameEnd"))) + frame_start_padding = anatomy.templates["render"]["padding"] index_frame_start = int(repre.get("frameStart")) + # exception for slate workflow + if "slate" in instance.data["families"]: + index_frame_start -= 1 + dst_padding_exp = src_padding_exp dst_start_frame = None for i in src_collection.indexes: src_padding = src_padding_exp % i - # for adding first frame into db - if not dst_start_frame: - dst_start_frame = src_padding - src_file_name = "{0}{1}{2}".format( src_head, src_padding, src_tail) @@ -353,11 +371,16 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("source: {}".format(src)) instance.data["transfers"].append([src, dst]) + # for adding first frame into db + if not dst_start_frame: + dst_start_frame = dst_padding + + dst = "{0}{1}{2}".format( dst_head, dst_start_frame, dst_tail).replace("..", ".") - repre['published_path'] = dst + repre['published_path'] = self.unc_convert(dst) else: # Single file @@ -381,15 +404,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): src = os.path.join(stagingdir, fname) anatomy_filled = anatomy.format(template_data) - dst = os.path.normpath( - anatomy_filled[template_name]["path"]).replace("..", ".") + template_filled = anatomy_filled[template_name]["path"] + repre_context = template_filled.used_values + dst = os.path.normpath(template_filled).replace("..", ".") instance.data["transfers"].append([src, dst]) - repre['published_path'] = dst + repre['published_path'] = self.unc_convert(dst) self.log.debug("__ dst: {}".format(dst)) + for key in self.db_representation_context_keys: + value = template_data.get(key) + if not value: + continue + repre_context[key] = template_data[key] + representation = { + "_id": io.ObjectId(), "schema": "pype:representation-2.0", "type": "representation", "parent": version_id, @@ -399,21 +430,12 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): # Imprint shortcut to context # for performance reasons. - "context": { - "root": root, - "project": {"name": PROJECT, - "code": project['data']['code']}, - 'task': TASK, - "silo": asset.get('silo'), - "asset": ASSET, - "family": instance.data['family'], - "subset": subset["name"], - "version": version["name"], - "hierarchy": hierarchy, - "representation": repre['ext'] - } + "context": repre_context } + if repre.get("outputName"): + representation["context"]["output"] = repre['outputName'] + if sequence_repre and repre.get("frameStart"): representation['context']['frame'] = src_padding_exp % int(repre.get("frameStart")) @@ -429,6 +451,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("__ represNAME: {}".format(rep['name'])) self.log.debug("__ represPATH: {}".format(rep['published_path'])) io.insert_many(representations) + instance.data["published_representations"] = representations # self.log.debug("Representation: {}".format(representations)) self.log.info("Registered {} items".format(len(representations))) @@ -460,6 +483,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): self.log.debug("Hardlinking file .. {} -> {}".format(src, dest)) self.hardlink_file(src, dest) + def unc_convert(self, path): + self.log.debug("> __ path: `{}`".format(path)) + drive, _path = os.path.splitdrive(path) + self.log.debug("> __ drive, _path: `{}`, `{}`".format(drive, _path)) + + if not os.path.exists(drive + "/"): + self.log.info("Converting to unc from environments ..") + + path_replace = os.getenv("PYPE_STUDIO_PROJECTS_PATH") + path_mount = os.getenv("PYPE_STUDIO_PROJECTS_MOUNT") + + if "/" in path_mount: + path = path.replace(path_mount[0:-1], path_replace) + else: + path = path.replace(path_mount, path_replace) + return path + def copy_file(self, src, dst): """ Copy given source to destination @@ -469,11 +509,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): Returns: None """ - - src = str(Path(src).resolve()) - drive, _path = os.path.splitdrive(dst) - unc = Path(drive).resolve() - dst = str(unc / _path) + src = self.unc_convert(src) + dst = self.unc_convert(dst) self.log.debug("Copying file .. {} -> {}".format(src, dst)) dirname = os.path.dirname(dst) @@ -494,8 +531,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): def hardlink_file(self, src, dst): dirname = os.path.dirname(dst) - src = Path(src).resolve() - dst = Path(dst).resolve() + + src = self.unc_convert(src) + dst = self.unc_convert(dst) + try: os.makedirs(dirname) except OSError as e: @@ -508,9 +547,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): filelink.create(src, dst, filelink.HARDLINK) def get_subset(self, asset, instance): - subset = io.find_one({"type": "subset", - "parent": asset["_id"], - "name": instance.data["subset"]}) + subset = io.find_one({ + "type": "subset", + "parent": asset["_id"], + "name": instance.data["subset"] + }) if subset is None: subset_name = instance.data["subset"] @@ -601,7 +642,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "source": source, "comment": context.data.get("comment"), "machine": context.data.get("machine"), - "fps": context.data.get("fps")} + "fps": context.data.get( + "fps", instance.data.get("fps"))} # Include optional data if present in optionals = [ diff --git a/pype/plugins/global/publish/integrate_rendered_frames.py b/pype/plugins/global/publish/integrate_rendered_frames.py index 086b03802e..5819051146 100644 --- a/pype/plugins/global/publish/integrate_rendered_frames.py +++ b/pype/plugins/global/publish/integrate_rendered_frames.py @@ -88,9 +88,11 @@ class IntegrateFrames(pyblish.api.InstancePlugin): project = io.find_one({"type": "project"}) - asset = io.find_one({"type": "asset", - "name": ASSET, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": ASSET, + "parent": project["_id"] + }) assert all([project, asset]), ("Could not find current project or " "asset '%s'" % ASSET) @@ -98,10 +100,14 @@ class IntegrateFrames(pyblish.api.InstancePlugin): subset = self.get_subset(asset, instance) # get next version - latest_version = io.find_one({"type": "version", - "parent": subset["_id"]}, - {"name": True}, - sort=[("name", -1)]) + latest_version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + {"name": True}, + sort=[("name", -1)] + ) next_version = 1 if latest_version is not None: @@ -251,9 +257,6 @@ class IntegrateFrames(pyblish.api.InstancePlugin): self.log.debug("path_to_save: {}".format(path_to_save)) - - - representation = { "schema": "pype:representation-2.0", "type": "representation", @@ -332,9 +335,11 @@ class IntegrateFrames(pyblish.api.InstancePlugin): def get_subset(self, asset, instance): - subset = io.find_one({"type": "subset", - "parent": asset["_id"], - "name": instance.data["subset"]}) + subset = io.find_one({ + "type": "subset", + "parent": asset["_id"], + "name": instance.data["subset"] + }) if subset is None: subset_name = instance.data["subset"] diff --git a/pype/plugins/global/publish/integrate_thumbnail.py b/pype/plugins/global/publish/integrate_thumbnail.py new file mode 100644 index 0000000000..1c4399b386 --- /dev/null +++ b/pype/plugins/global/publish/integrate_thumbnail.py @@ -0,0 +1,139 @@ +import os +import sys +import errno +import shutil +import copy + +import six +import pyblish.api +from bson.objectid import ObjectId + +from avalon import api, io + + +class IntegrateThumbnails(pyblish.api.InstancePlugin): + """Integrate Thumbnails.""" + + label = "Integrate Thumbnails" + order = pyblish.api.IntegratorOrder + 0.01 + families = ["review"] + + def process(self, instance): + + if not os.environ.get("AVALON_THUMBNAIL_ROOT"): + self.log.info("AVALON_THUMBNAIL_ROOT is not set." + " Skipping thumbnail integration.") + return + + published_repres = instance.data.get("published_representations") + if not published_repres: + self.log.debug( + "There are not published representation ids on the instance." + ) + return + + project_name = api.Session["AVALON_PROJECT"] + + anatomy = instance.context.data["anatomy"] + if "publish" not in anatomy.templates: + raise AssertionError("Anatomy does not have set publish key!") + + if "thumbnail" not in anatomy.templates["publish"]: + raise AssertionError(( + "There is not set \"thumbnail\" template for project \"{}\"" + ).format(project_name)) + + thumbnail_template = anatomy.templates["publish"]["thumbnail"] + + io.install() + + thumb_repre = None + for repre in published_repres: + if repre["name"].lower() == "thumbnail": + thumb_repre = repre + break + + if not thumb_repre: + self.log.debug( + "There is not representation with name \"thumbnail\"" + ) + return + + version = io.find_one({"_id": thumb_repre["parent"]}) + if not version: + raise AssertionError( + "There does not exist version with id {}".format( + str(thumb_repre["parent"]) + ) + ) + + # Get full path to thumbnail file from representation + src_full_path = os.path.normpath(thumb_repre["data"]["path"]) + if not os.path.exists(src_full_path): + self.log.warning("Thumbnail file was not found. Path: {}".format( + src_full_path + )) + return + + filename, file_extension = os.path.splitext(src_full_path) + # Create id for mongo entity now to fill anatomy template + thumbnail_id = ObjectId() + + # Prepare anatomy template fill data + template_data = copy.deepcopy(thumb_repre["context"]) + template_data.update({ + "_id": str(thumbnail_id), + "thumbnail_root": os.environ.get("AVALON_THUMBNAIL_ROOT"), + "ext": file_extension, + "thumbnail_type": "thumbnail" + }) + + anatomy_filled = anatomy.format(template_data) + final_path = anatomy_filled.get("publish", {}).get("thumbnail") + if not final_path: + raise AssertionError(( + "Anatomy template was not filled with entered data" + "\nTemplate: {} " + "\nData: {}" + ).format(thumbnail_template, str(template_data))) + + dst_full_path = os.path.normpath(final_path) + self.log.debug( + "Copying file .. {} -> {}".format(src_full_path, dst_full_path) + ) + dirname = os.path.dirname(dst_full_path) + try: + os.makedirs(dirname) + except OSError as e: + if e.errno != errno.EEXIST: + tp, value, tb = sys.exc_info() + six.reraise(tp, value, tb) + + shutil.copy(src_full_path, dst_full_path) + + # Clean template data from keys that are dynamic + template_data.pop("_id") + template_data.pop("thumbnail_root") + + thumbnail_entity = { + "_id": thumbnail_id, + "type": "thumbnail", + "schema": "pype:thumbnail-1.0", + "data": { + "template": thumbnail_template, + "template_data": template_data + } + } + # Create thumbnail entity + io.insert_one(thumbnail_entity) + self.log.debug( + "Creating entity in database {}".format(str(thumbnail_entity)) + ) + # Set thumbnail id for version + io.update_many( + {"_id": version["_id"]}, + {"$set": {"data.thumbnail_id": thumbnail_id}} + ) + self.log.debug("Setting thumbnail for version \"{}\" <{}>".format( + version["name"], str(version["_id"]) + )) diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 12737880d0..a9fa8febd4 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -33,14 +33,22 @@ def _get_script(): # Logic to retrieve latest files concerning extendFrames def get_latest_version(asset_name, subset_name, family): # Get asset - asset_name = io.find_one({"type": "asset", - "name": asset_name}, - projection={"name": True}) + asset_name = io.find_one( + { + "type": "asset", + "name": asset_name + }, + projection={"name": True} + ) - subset = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset_name["_id"]}, - projection={"_id": True, "name": True}) + subset = io.find_one( + { + "type": "subset", + "name": subset_name, + "parent": asset_name["_id"] + }, + projection={"_id": True, "name": True} + ) # Check if subsets actually exists (pre-run check) assert subset, "No subsets found, please publish with `extendFrames` off" @@ -51,11 +59,15 @@ def get_latest_version(asset_name, subset_name, family): "data.endFrame": True, "parent": True} - version = io.find_one({"type": "version", - "parent": subset["_id"], - "data.families": family}, - projection=version_projection, - sort=[("name", -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"], + "data.families": family + }, + projection=version_projection, + sort=[("name", -1)] + ) assert version, "No version found, this is a bug" @@ -149,7 +161,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "FTRACK_API_USER", "FTRACK_API_KEY", "FTRACK_SERVER", - "PYPE_ROOT" + "PYPE_ROOT", + "PYPE_METADATA_FILE", + "PYPE_STUDIO_PROJECTS_PATH", + "PYPE_STUDIO_PROJECTS_MOUNT" ] def _submit_deadline_post_job(self, instance, job): @@ -160,7 +175,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ data = instance.data.copy() subset = data["subset"] - state = data.get("publishJobState", "Suspended") job_name = "{batch} - {subset} [publish image sequence]".format( batch=job["Props"]["Name"], subset=subset @@ -172,7 +186,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): metadata_path = os.path.normpath(metadata_path) mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT']) - network_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_PATH']) + network_root = os.path.normpath( + os.environ['PYPE_STUDIO_PROJECTS_PATH']) metadata_path = metadata_path.replace(mount_root, network_root) @@ -186,13 +201,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "JobDependency0": job["_id"], "UserName": job["Props"]["User"], "Comment": instance.context.data.get("comment", ""), - "InitialStatus": state, "Priority": job["Props"]["Pri"] }, "PluginInfo": { "Version": "3.6", "ScriptFile": _get_script(), - "Arguments": '--paths "{}"'.format(metadata_path), + "Arguments": "", "SingleFrameOnly": "True" }, @@ -204,6 +218,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): # job so they use the same environment environment = job["Props"].get("Env", {}) + environment["PYPE_METADATA_FILE"] = metadata_path i = 0 for index, key in enumerate(environment): self.log.info("KEY: {}".format(key)) @@ -241,6 +256,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): """ # Get a submission job data = instance.data.copy() + if hasattr(instance, "_log"): + data['_log'] = instance._log render_job = data.pop("deadlineSubmissionJob", None) submission_type = "deadline" @@ -318,6 +335,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "source": source, "user": context.data["user"], "version": context.data["version"], + "intent": context.data.get("intent"), + "comment": context.data.get("comment"), # Optional metadata (for debugging) "metadata": { "instance": data, @@ -326,6 +345,9 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): } } + if api.Session["AVALON_APP"] == "nuke": + metadata['subset'] = subset + if submission_type == "muster": ftrack = { "FTRACK_API_USER": os.environ.get("FTRACK_API_USER"), diff --git a/pype/plugins/global/publish/validate_custom_ftrack_attributes.py b/pype/plugins/global/publish/validate_custom_ftrack_attributes.py index 2386b359e4..1e8b239b33 100644 --- a/pype/plugins/global/publish/validate_custom_ftrack_attributes.py +++ b/pype/plugins/global/publish/validate_custom_ftrack_attributes.py @@ -47,7 +47,7 @@ class ValidateFtrackAttributes(pyblish.api.InstancePlugin): host = pyblish.api.current_host() to_check = context.data["presets"].get( - host, {}).get("ftrack_attributes") + host, {}).get("ftrack_custom_attributes") if not to_check: self.log.warning("ftrack_attributes preset not found") return diff --git a/pype/plugins/maya/load/actions.py b/pype/plugins/maya/load/actions.py index 9f6a5c4d34..77d18b0ee3 100644 --- a/pype/plugins/maya/load/actions.py +++ b/pype/plugins/maya/load/actions.py @@ -140,9 +140,9 @@ class ImportMayaLoader(api.Loader): message = "Are you sure you want import this" state = QtWidgets.QMessageBox.warning(None, - "Are you sure?", - message, - buttons=buttons, - defaultButton=accept) + "Are you sure?", + message, + buttons=buttons, + defaultButton=accept) return state == accept diff --git a/pype/plugins/maya/load/load_camera.py b/pype/plugins/maya/load/load_camera.py deleted file mode 100644 index e9bf265b98..0000000000 --- a/pype/plugins/maya/load/load_camera.py +++ /dev/null @@ -1,62 +0,0 @@ -import pype.maya.plugin -import os -from pypeapp import config - - -class CameraLoader(pype.maya.plugin.ReferenceLoader): - """Specific loader of Alembic for the pype.camera family""" - - families = ["camera"] - label = "Reference camera" - representations = ["abc", "ma"] - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - import maya.cmds as cmds - # Get family type from the context - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "camera" - - cmds.loadPlugin("AbcImport.mll", quiet=True) - groupName = "{}:{}".format(namespace, name) - nodes = cmds.file(self.fname, - namespace=namespace, - sharedReferenceFile=False, - groupReference=True, - groupName="{}:{}".format(namespace, name), - reference=True, - returnNewNodes=True) - - cameras = cmds.ls(nodes, type="camera") - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - - # Check the Maya version, lockTransform has been introduced since - # Maya 2016.5 Ext 2 - version = int(cmds.about(version=True)) - if version >= 2016: - for camera in cameras: - cmds.camera(camera, edit=True, lockTransform=True) - else: - self.log.warning("This version of Maya does not support locking of" - " transforms of cameras.") - - self[:] = nodes - - return nodes - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_fbx.py b/pype/plugins/maya/load/load_fbx.py deleted file mode 100644 index 14df300c3c..0000000000 --- a/pype/plugins/maya/load/load_fbx.py +++ /dev/null @@ -1,54 +0,0 @@ -import pype.maya.plugin -import os -from pypeapp import config - - -class FBXLoader(pype.maya.plugin.ReferenceLoader): - """Load the FBX""" - - families = ["fbx"] - representations = ["fbx"] - - label = "Reference FBX" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - import maya.cmds as cmds - from avalon import maya - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "fbx" - - # Ensure FBX plug-in is loaded - cmds.loadPlugin("fbxmaya", quiet=True) - - with maya.maintained_selection(): - nodes = cmds.file(self.fname, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName="{}:{}".format(namespace, name)) - - groupName = "{}:{}".format(namespace, name) - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - - self[:] = nodes - - return nodes - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_look.py b/pype/plugins/maya/load/load_look.py index b1c88bcd18..04ac9b23e4 100644 --- a/pype/plugins/maya/load/load_look.py +++ b/pype/plugins/maya/load/load_look.py @@ -116,9 +116,11 @@ class LookLoader(pype.maya.plugin.ReferenceLoader): shapes=True)) nodes = set(nodes_list) - json_representation = io.find_one({"type": "representation", - "parent": representation['parent'], - "name": "json"}) + json_representation = io.find_one({ + "type": "representation", + "parent": representation['parent'], + "name": "json" + }) # Load relationships shader_relation = api.get_representation_path(json_representation) diff --git a/pype/plugins/maya/load/load_mayaascii.py b/pype/plugins/maya/load/load_mayaascii.py deleted file mode 100644 index b9a5de2782..0000000000 --- a/pype/plugins/maya/load/load_mayaascii.py +++ /dev/null @@ -1,68 +0,0 @@ -import pype.maya.plugin -from pypeapp import config -import os - - -class MayaAsciiLoader(pype.maya.plugin.ReferenceLoader): - """Load the model""" - - families = ["mayaAscii", - "setdress", - "layout"] - representations = ["ma"] - - label = "Reference Maya Ascii" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - import maya.cmds as cmds - from avalon import maya - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "model" - - with maya.maintained_selection(): - nodes = cmds.file(self.fname, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName="{}:{}".format(namespace, name)) - - self[:] = nodes - groupName = "{}:{}".format(namespace, name) - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - cmds.setAttr(groupName + ".displayHandle", 1) - # get bounding box - bbox = cmds.exactWorldBoundingBox(groupName) - # get pivot position on world space - pivot = cmds.xform(groupName, q=True, sp=True, ws=True) - # center of bounding box - cx = (bbox[0] + bbox[3]) / 2 - cy = (bbox[1] + bbox[4]) / 2 - cz = (bbox[2] + bbox[5]) / 2 - # add pivot position to calculate offset - cx = cx + pivot[0] - cy = cy + pivot[1] - cz = cz + pivot[2] - # set selection handle offset to center of bounding box - cmds.setAttr(groupName + ".selectHandleX", cx) - cmds.setAttr(groupName + ".selectHandleY", cy) - cmds.setAttr(groupName + ".selectHandleZ", cz) - return nodes - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py index 376fcc2c01..cbd1da7cbd 100644 --- a/pype/plugins/maya/load/load_reference.py +++ b/pype/plugins/maya/load/load_reference.py @@ -1,4 +1,6 @@ import pype.maya.plugin +from avalon import api, maya +from maya import cmds import os from pypeapp import config @@ -6,8 +8,15 @@ from pypeapp import config class ReferenceLoader(pype.maya.plugin.ReferenceLoader): """Load the model""" - families = ["model", "pointcache", "animation"] - representations = ["ma", "abc"] + families = ["model", + "pointcache", + "animation", + "mayaAscii", + "setdress", + "layout", + "camera", + "rig"] + representations = ["ma", "abc", "fbx"] tool_names = ["loader"] label = "Reference" @@ -37,27 +46,29 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): reference=True, returnNewNodes=True) - namespace = cmds.referenceQuery(nodes[0], namespace=True) + # namespace = cmds.referenceQuery(nodes[0], namespace=True) shapes = cmds.ls(nodes, shapes=True, long=True) - print(shapes) newNodes = (list(set(nodes) - set(shapes))) - print(newNodes) + + current_namespace = pm.namespaceInfo(currentNamespace=True) + + if current_namespace != ":": + groupName = current_namespace + ":" + groupName groupNode = pm.PyNode(groupName) roots = set() - print(nodes) for node in newNodes: try: roots.add(pm.PyNode(node).getAllParents()[-2]) - except: + except: # noqa: E722 pass for root in roots: root.setParent(world=True) - groupNode.root().zeroTransformPivots() + groupNode.zeroTransformPivots() for root in roots: root.setParent(groupNode) @@ -90,23 +101,39 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader): cmds.setAttr(groupName + ".selectHandleY", cy) cmds.setAttr(groupName + ".selectHandleZ", cz) + if data.get("post_process", True): + if family == "rig": + self._post_process_rig(name, namespace, context, data) + return newNodes def switch(self, container, representation): self.update(container, representation) + def _post_process_rig(self, name, namespace, context, data): -# for backwards compatibility -class AbcLoader(ReferenceLoader): - label = "Deprecated loader (don't use)" - families = ["pointcache", "animation"] - representations = ["abc"] - tool_names = [] + output = next((node for node in self if + node.endswith("out_SET")), None) + controls = next((node for node in self if + node.endswith("controls_SET")), None) + assert output, "No out_SET in rig, this is a bug." + assert controls, "No controls_SET in rig, this is a bug." -# for backwards compatibility -class ModelLoader(ReferenceLoader): - label = "Deprecated loader (don't use)" - families = ["model", "pointcache"] - representations = ["abc"] - tool_names = [] + # Find the roots amongst the loaded nodes + roots = cmds.ls(self[:], assemblies=True, long=True) + assert roots, "No root nodes in rig, this is a bug." + + asset = api.Session["AVALON_ASSET"] + dependency = str(context["representation"]["_id"]) + + self.log.info("Creating subset: {}".format(namespace)) + + # Create the animation instance + with maya.maintained_selection(): + cmds.select([output, controls] + roots, noExpand=True) + api.create(name=namespace, + asset=asset, + family="animation", + options={"useSelection": True}, + data={"dependencies": dependency}) diff --git a/pype/plugins/maya/load/load_rig.py b/pype/plugins/maya/load/load_rig.py deleted file mode 100644 index fc6e666ac6..0000000000 --- a/pype/plugins/maya/load/load_rig.py +++ /dev/null @@ -1,95 +0,0 @@ -from maya import cmds - -import pype.maya.plugin -from avalon import api, maya -import os -from pypeapp import config - - -class RigLoader(pype.maya.plugin.ReferenceLoader): - """Specific loader for rigs - - This automatically creates an instance for animators upon load. - - """ - - families = ["rig"] - representations = ["ma"] - - label = "Reference rig" - order = -10 - icon = "code-fork" - color = "orange" - - def process_reference(self, context, name, namespace, data): - - try: - family = context["representation"]["context"]["family"] - except ValueError: - family = "rig" - - groupName = "{}:{}".format(namespace, name) - nodes = cmds.file(self.fname, - namespace=namespace, - reference=True, - returnNewNodes=True, - groupReference=True, - groupName=groupName) - - cmds.xform(groupName, pivots=(0, 0, 0)) - - presets = config.get_presets(project=os.environ['AVALON_PROJECT']) - colors = presets['plugins']['maya']['load']['colors'] - - c = colors.get(family) - if c is not None: - cmds.setAttr(groupName + ".useOutlinerColor", 1) - cmds.setAttr(groupName + ".outlinerColor", - c[0], c[1], c[2]) - - shapes = cmds.ls(nodes, shapes=True, long=True) - print(shapes) - - newNodes = (list(set(nodes) - set(shapes))) - print(newNodes) - - # Store for post-process - self[:] = newNodes - if data.get("post_process", True): - self._post_process(name, namespace, context, data) - - return newNodes - - def _post_process(self, name, namespace, context, data): - - # TODO(marcus): We are hardcoding the name "out_SET" here. - # Better register this keyword, so that it can be used - # elsewhere, such as in the Integrator plug-in, - # without duplication. - - output = next((node for node in self if - node.endswith("out_SET")), None) - controls = next((node for node in self if - node.endswith("controls_SET")), None) - - assert output, "No out_SET in rig, this is a bug." - assert controls, "No controls_SET in rig, this is a bug." - - # Find the roots amongst the loaded nodes - roots = cmds.ls(self[:], assemblies=True, long=True) - assert roots, "No root nodes in rig, this is a bug." - - asset = api.Session["AVALON_ASSET"] - dependency = str(context["representation"]["_id"]) - - # Create the animation instance - with maya.maintained_selection(): - cmds.select([output, controls] + roots, noExpand=True) - api.create(name=namespace, - asset=asset, - family="animation", - options={"useSelection": True}, - data={"dependencies": dependency}) - - def switch(self, container, representation): - self.update(container, representation) diff --git a/pype/plugins/maya/load/load_vrayproxy.py b/pype/plugins/maya/load/load_vrayproxy.py index 9b07dc7e30..35d93676a0 100644 --- a/pype/plugins/maya/load/load_vrayproxy.py +++ b/pype/plugins/maya/load/load_vrayproxy.py @@ -117,7 +117,7 @@ class VRayProxyLoader(api.Loader): vray_mesh = cmds.createNode('VRayMesh', name="{}_VRMS".format(name)) mesh_shape = cmds.createNode("mesh", name="{}_GEOShape".format(name)) vray_mat = cmds.shadingNode("VRayMeshMaterial", asShader=True, - name="{}_VRMM".format(name)) + name="{}_VRMM".format(name)) vray_mat_sg = cmds.sets(name="{}_VRSG".format(name), empty=True, renderable=True, diff --git a/pype/plugins/maya/publish/collect_ass.py b/pype/plugins/maya/publish/collect_ass.py index c0174e7026..8e6691120a 100644 --- a/pype/plugins/maya/publish/collect_ass.py +++ b/pype/plugins/maya/publish/collect_ass.py @@ -21,15 +21,17 @@ class CollectAssData(pyblish.api.InstancePlugin): objsets = instance.data['setMembers'] for objset in objsets: + objset = str(objset) members = cmds.sets(objset, query=True) if members is None: self.log.warning("Skipped empty instance: \"%s\" " % objset) continue - if objset == "content_SET": + if "content_SET" in objset: instance.data['setMembers'] = members - elif objset == "proxy_SET": + self.log.debug('content members: {}'.format(members)) + elif objset.startswith("proxy_SET"): assert len(members) == 1, "You have multiple proxy meshes, please only use one" instance.data['proxy'] = members - + self.log.debug('proxy members: {}'.format(members)) self.log.debug("data: {}".format(instance.data)) diff --git a/pype/plugins/maya/publish/collect_yeti_rig.py b/pype/plugins/maya/publish/collect_yeti_rig.py index 7ab5649c0b..c743b2c00b 100644 --- a/pype/plugins/maya/publish/collect_yeti_rig.py +++ b/pype/plugins/maya/publish/collect_yeti_rig.py @@ -119,11 +119,15 @@ class CollectYetiRig(pyblish.api.InstancePlugin): texture_filenames = [] if image_search_paths: + # TODO: Somehow this uses OS environment path separator, `:` vs `;` # Later on check whether this is pipeline OS cross-compatible. image_search_paths = [p for p in image_search_paths.split(os.path.pathsep) if p] + # find all ${TOKEN} tokens and replace them with $TOKEN env. variable + image_search_paths = self._replace_tokens(image_search_paths) + # List all related textures texture_filenames = cmds.pgYetiCommand(node, listTextures=True) self.log.info("Found %i texture(s)" % len(texture_filenames)) @@ -140,6 +144,8 @@ class CollectYetiRig(pyblish.api.InstancePlugin): "atttribute'" % node) # Collect all texture files + # find all ${TOKEN} tokens and replace them with $TOKEN env. variable + texture_filenames = self._replace_tokens(texture_filenames) for texture in texture_filenames: files = [] @@ -283,3 +289,20 @@ class CollectYetiRig(pyblish.api.InstancePlugin): collection, remainder = clique.assemble(files, patterns=pattern) return collection + + def _replace_tokens(self, strings): + env_re = re.compile(r"\$\{(\w+)\}") + + replaced = [] + for s in strings: + matches = re.finditer(env_re, s) + for m in matches: + try: + s = s.replace(m.group(), os.environ[m.group(1)]) + except KeyError: + msg = "Cannot find requested {} in environment".format( + m.group(1)) + self.log.error(msg) + raise RuntimeError(msg) + replaced.append(s) + return replaced diff --git a/pype/plugins/maya/publish/extract_ass.py b/pype/plugins/maya/publish/extract_ass.py index 71f3e0d84c..4cf394aefe 100644 --- a/pype/plugins/maya/publish/extract_ass.py +++ b/pype/plugins/maya/publish/extract_ass.py @@ -17,6 +17,7 @@ class ExtractAssStandin(pype.api.Extractor): label = "Ass Standin (.ass)" hosts = ["maya"] families = ["ass"] + asciiAss = False def process(self, instance): @@ -47,7 +48,7 @@ class ExtractAssStandin(pype.api.Extractor): exported_files = cmds.arnoldExportAss(filename=file_path, selected=True, - asciiAss=True, + asciiAss=self.asciiAss, shadowLinks=True, lightLinks=True, boundingBox=True, @@ -59,13 +60,15 @@ class ExtractAssStandin(pype.api.Extractor): filenames.append(os.path.split(file)[1]) self.log.info("Exported: {}".format(filenames)) else: + self.log.info("Extracting ass") cmds.arnoldExportAss(filename=file_path, selected=True, - asciiAss=True, + asciiAss=False, shadowLinks=True, lightLinks=True, boundingBox=True ) + self.log.info("Extracted {}".format(filename)) filenames = filename optionals = [ "frameStart", "frameEnd", "step", "handles", diff --git a/pype/plugins/maya/publish/extract_assembly.py b/pype/plugins/maya/publish/extract_assembly.py index 26b16a73c4..c12d57e836 100644 --- a/pype/plugins/maya/publish/extract_assembly.py +++ b/pype/plugins/maya/publish/extract_assembly.py @@ -22,11 +22,11 @@ class ExtractAssembly(pype.api.Extractor): def process(self, instance): - parent_dir = self.staging_dir(instance) + staging_dir = self.staging_dir(instance) hierarchy_filename = "{}.abc".format(instance.name) - hierarchy_path = os.path.join(parent_dir, hierarchy_filename) + hierarchy_path = os.path.join(staging_dir, hierarchy_filename) json_filename = "{}.json".format(instance.name) - json_path = os.path.join(parent_dir, json_filename) + json_path = os.path.join(staging_dir, json_filename) self.log.info("Dumping scene data for debugging ..") with open(json_path, "w") as filepath: @@ -46,8 +46,24 @@ class ExtractAssembly(pype.api.Extractor): "uvWrite": True, "selection": True}) - instance.data["files"] = [json_filename, hierarchy_filename] + if "representations" not in instance.data: + instance.data["representations"] = [] + representation_abc = { + 'name': 'abc', + 'ext': 'abc', + 'files': hierarchy_filename, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation_abc) + + representation_json = { + 'name': 'json', + 'ext': 'json', + 'files': json_filename, + "stagingDir": staging_dir + } + instance.data["representations"].append(representation_json) # Remove data instance.data.pop("scenedata", None) diff --git a/pype/plugins/maya/publish/extract_look.py b/pype/plugins/maya/publish/extract_look.py index 5226f80f7a..fa6ecd72c3 100644 --- a/pype/plugins/maya/publish/extract_look.py +++ b/pype/plugins/maya/publish/extract_look.py @@ -429,33 +429,42 @@ class ExtractLook(pype.api.Extractor): a_template = anatomy.templates project = io.find_one( - {"type": "project", "name": project_name}, - projection={"config": True, "data": True}, + { + "type": "project", + "name": project_name + }, + projection={"config": True, "data": True} ) template = a_template["publish"]["path"] # anatomy = instance.context.data['anatomy'] - asset = io.find_one( - {"type": "asset", "name": asset_name, "parent": project["_id"]} - ) + asset = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project["_id"] + }) assert asset, ("No asset found by the name '{}' " "in project '{}'").format(asset_name, project_name) silo = asset.get("silo") - subset = io.find_one( - {"type": "subset", "name": subset_name, "parent": asset["_id"]} - ) + subset = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset["_id"] + }) # assume there is no version yet, we start at `1` version = None version_number = 1 if subset is not None: version = io.find_one( - {"type": "version", - "parent": subset["_id"] - }, sort=[("name", -1)] + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] ) # if there is a subset there ought to be version diff --git a/pype/plugins/maya/publish/extract_quicktime.py b/pype/plugins/maya/publish/extract_quicktime.py index 1031955260..94b5a716a2 100644 --- a/pype/plugins/maya/publish/extract_quicktime.py +++ b/pype/plugins/maya/publish/extract_quicktime.py @@ -1,16 +1,14 @@ import os +import glob import contextlib -import capture_gui import clique +import capture # import pype.maya.lib as lib import pype.api # from maya import cmds, mel import pymel.core as pm -# import ffmpeg -# # from pype.scripts import otio_burnin -# reload(ffmpeg) # TODO: move codec settings to presets @@ -93,7 +91,18 @@ class ExtractQuicktime(pype.api.Extractor): pm.currentTime(refreshFrameInt, edit=True) with maintained_time(): - playblast = capture_gui.lib.capture_scene(preset) + filename = preset.get("filename", "%TEMP%") + + # Force viewer to False in call to capture because we have our own + # viewer opening call to allow a signal to trigger between playblast + # and viewer + preset['viewer'] = False + + # Remove panel key since it's internal value to capture_gui + preset.pop("panel", None) + + path = capture.capture(**preset) + playblast = self._fix_playblast_output_path(path) self.log.info("file list {}".format(playblast)) @@ -119,6 +128,46 @@ class ExtractQuicktime(pype.api.Extractor): } instance.data["representations"].append(representation) + def _fix_playblast_output_path(self, filepath): + """Workaround a bug in maya.cmds.playblast to return correct filepath. + + When the `viewer` argument is set to False and maya.cmds.playblast + does not automatically open the playblasted file the returned + filepath does not have the file's extension added correctly. + + To workaround this we just glob.glob() for any file extensions and + assume the latest modified file is the correct file and return it. + + """ + # Catch cancelled playblast + if filepath is None: + self.log.warning("Playblast did not result in output path. " + "Playblast is probably interrupted.") + return None + + # Fix: playblast not returning correct filename (with extension) + # Lets assume the most recently modified file is the correct one. + if not os.path.exists(filepath): + directory = os.path.dirname(filepath) + filename = os.path.basename(filepath) + # check if the filepath is has frame based filename + # example : capture.####.png + parts = filename.split(".") + if len(parts) == 3: + query = os.path.join(directory, "{}.*.{}".format(parts[0], + parts[-1])) + files = glob.glob(query) + else: + files = glob.glob("{}.*".format(filepath)) + + if not files: + raise RuntimeError("Couldn't find playblast from: " + "{0}".format(filepath)) + filepath = max(files, key=os.path.getmtime) + + return filepath + + @contextlib.contextmanager def maintained_time(): diff --git a/pype/plugins/maya/publish/extract_thumbnail.py b/pype/plugins/maya/publish/extract_thumbnail.py index dc8044cf19..8377af1ac0 100644 --- a/pype/plugins/maya/publish/extract_thumbnail.py +++ b/pype/plugins/maya/publish/extract_thumbnail.py @@ -1,31 +1,14 @@ import os import contextlib -import time -import sys +import glob -import capture_gui -import clique +import capture import pype.maya.lib as lib import pype.api from maya import cmds import pymel.core as pm -# import ffmpeg -# reload(ffmpeg) - -import avalon.maya - -# import maya_utils as mu - -# from tweakHUD import master -# from tweakHUD import draft_hud as dHUD -# from tweakHUD import ftrackStrings as fStrings - -# -# def soundOffsetFunc(oSF, SF, H): -# tmOff = (oSF - H) - SF -# return tmOff class ExtractThumbnail(pype.api.Extractor): @@ -47,39 +30,8 @@ class ExtractThumbnail(pype.api.Extractor): end = cmds.currentTime(query=True) self.log.info("start: {}, end: {}".format(start, end)) - members = instance.data['setMembers'] camera = instance.data['review_camera'] - # project_code = ftrack_data['Project']['code'] - # task_type = ftrack_data['Task']['type'] - # - # # load Preset - # studio_repos = os.path.abspath(os.environ.get('studio_repos')) - # shot_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # (project_code + '_' + task_type + '_' + asset + '.json')) - # - # task_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # (project_code + '_' + task_type + '.json')) - # - # project_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # (project_code + '.json')) - # - # default_preset_path = os.path.join(studio_repos, 'maya', - # 'capture_gui_presets', - # 'default.json') - # - # if os.path.isfile(shot_preset_path): - # preset_to_use = shot_preset_path - # elif os.path.isfile(task_preset_path): - # preset_to_use = task_preset_path - # elif os.path.isfile(project_preset_path): - # preset_to_use = project_preset_path - # else: - # preset_to_use = default_preset_path - capture_preset = "" capture_preset = instance.context.data['presets']['maya']['capture'] try: @@ -126,7 +78,18 @@ class ExtractThumbnail(pype.api.Extractor): pm.currentTime(refreshFrameInt, edit=True) with maintained_time(): - playblast = capture_gui.lib.capture_scene(preset) + filename = preset.get("filename", "%TEMP%") + + # Force viewer to False in call to capture because we have our own + # viewer opening call to allow a signal to trigger between + # playblast and viewer + preset['viewer'] = False + + # Remove panel key since it's internal value to capture_gui + preset.pop("panel", None) + + path = capture.capture(**preset) + playblast = self._fix_playblast_output_path(path) _, thumbnail = os.path.split(playblast) @@ -144,6 +107,45 @@ class ExtractThumbnail(pype.api.Extractor): } instance.data["representations"].append(representation) + def _fix_playblast_output_path(self, filepath): + """Workaround a bug in maya.cmds.playblast to return correct filepath. + + When the `viewer` argument is set to False and maya.cmds.playblast + does not automatically open the playblasted file the returned + filepath does not have the file's extension added correctly. + + To workaround this we just glob.glob() for any file extensions and + assume the latest modified file is the correct file and return it. + + """ + # Catch cancelled playblast + if filepath is None: + self.log.warning("Playblast did not result in output path. " + "Playblast is probably interrupted.") + return None + + # Fix: playblast not returning correct filename (with extension) + # Lets assume the most recently modified file is the correct one. + if not os.path.exists(filepath): + directory = os.path.dirname(filepath) + filename = os.path.basename(filepath) + # check if the filepath is has frame based filename + # example : capture.####.png + parts = filename.split(".") + if len(parts) == 3: + query = os.path.join(directory, "{}.*.{}".format(parts[0], + parts[-1])) + files = glob.glob(query) + else: + files = glob.glob("{}.*".format(filepath)) + + if not files: + raise RuntimeError("Couldn't find playblast from: " + "{0}".format(filepath)) + filepath = max(files, key=os.path.getmtime) + + return filepath + @contextlib.contextmanager def maintained_time(): diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index 55c04e9c41..e3fa79b1c8 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -228,80 +228,19 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "AuxFiles": [] } - # Include critical environment variables with submission + # We need those to pass them to pype for it to set correct context keys = [ - # This will trigger `userSetup.py` on the slave - # such that proper initialisation happens the same - # way as it does on a local machine. - # TODO(marcus): This won't work if the slaves don't - # have accesss to these paths, such as if slaves are - # running Linux and the submitter is on Windows. - "PYTHONPATH", - "PATH", - - "MTOA_EXTENSIONS_PATH", - "MTOA_EXTENSIONS", - "DYLD_LIBRARY_PATH", - "MAYA_RENDER_DESC_PATH", - "MAYA_MODULE_PATH", - "ARNOLD_PLUGIN_PATH", - "AVALON_SCHEMA", "FTRACK_API_KEY", "FTRACK_API_USER", "FTRACK_SERVER", - "PYBLISHPLUGINPATH", - - # todo: This is a temporary fix for yeti variables - "PEREGRINEL_LICENSE", - "SOLIDANGLE_LICENSE", - "ARNOLD_LICENSE" - "MAYA_MODULE_PATH", - "TOOL_ENV" + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_TASK", + "PYPE_USERNAME" ] + environment = dict({key: os.environ[key] for key in keys if key in os.environ}, **api.Session) - # self.log.debug("enviro: {}".format(pprint(environment))) - for path in os.environ: - if path.lower().startswith('pype_'): - environment[path] = os.environ[path] - - environment["PATH"] = os.environ["PATH"] - # self.log.debug("enviro: {}".format(environment['PYPE_SCRIPTS'])) - clean_environment = {} - for key in environment: - clean_path = "" - self.log.debug("key: {}".format(key)) - self.log.debug("value: {}".format(environment[key])) - to_process = str(environment[key]) - if key == "PYPE_STUDIO_CORE_MOUNT": - clean_path = to_process - elif "://" in to_process: - clean_path = to_process - elif os.pathsep not in str(to_process): - try: - path = to_process - path.decode('UTF-8', 'strict') - clean_path = os.path.normpath(path) - except UnicodeDecodeError: - print('path contains non UTF characters') - else: - for path in to_process.split(os.pathsep): - try: - path.decode('UTF-8', 'strict') - clean_path += os.path.normpath(path) + os.pathsep - except UnicodeDecodeError: - print('path contains non UTF characters') - - if key == "PYTHONPATH": - clean_path = clean_path.replace('python2', 'python3') - clean_path = clean_path.replace( - os.path.normpath( - environment['PYPE_STUDIO_CORE_MOUNT']), # noqa - os.path.normpath( - environment['PYPE_STUDIO_CORE_PATH'])) # noqa - clean_environment[key] = clean_path - - environment = clean_environment payload["JobInfo"].update({ "EnvironmentKeyValue%d" % index: "{key}={value}".format( @@ -319,7 +258,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): self.preflight_check(instance) - self.log.info("Submitting..") + self.log.info("Submitting ...") self.log.info(json.dumps(payload, indent=4, sort_keys=True)) # E.g. http://192.168.0.1:8082/api/jobs diff --git a/pype/plugins/maya/publish/validate_node_ids_related.py b/pype/plugins/maya/publish/validate_node_ids_related.py index 4872f438d4..191ac0c2f8 100644 --- a/pype/plugins/maya/publish/validate_node_ids_related.py +++ b/pype/plugins/maya/publish/validate_node_ids_related.py @@ -38,9 +38,13 @@ class ValidateNodeIDsRelated(pyblish.api.InstancePlugin): invalid = list() asset = instance.data['asset'] - asset_data = io.find_one({"name": asset, - "type": "asset"}, - projection={"_id": True}) + asset_data = io.find_one( + { + "name": asset, + "type": "asset" + }, + projection={"_id": True} + ) asset_id = str(asset_data['_id']) # We do want to check the referenced nodes as we it might be diff --git a/pype/plugins/maya/publish/validate_renderlayer_aovs.py b/pype/plugins/maya/publish/validate_renderlayer_aovs.py index e14c92a8b4..686a11e906 100644 --- a/pype/plugins/maya/publish/validate_renderlayer_aovs.py +++ b/pype/plugins/maya/publish/validate_renderlayer_aovs.py @@ -49,9 +49,10 @@ class ValidateRenderLayerAOVs(pyblish.api.InstancePlugin): """Check if subset is registered in the database under the asset""" asset = io.find_one({"type": "asset", "name": asset_name}) - is_valid = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset["_id"]}) + is_valid = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset["_id"] + }) return is_valid - diff --git a/pype/plugins/nuke/_load_unused/extract_write_next_render.py b/pype/plugins/nuke/_load_unused/extract_write_next_render.py deleted file mode 100644 index 40bfe59ec2..0000000000 --- a/pype/plugins/nuke/_load_unused/extract_write_next_render.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyblish.api - - -class WriteToRender(pyblish.api.InstancePlugin): - """Swith Render knob on write instance to on, - so next time publish will be set to render - """ - - order = pyblish.api.ExtractorOrder + 0.1 - label = "Write to render next" - optional = True - hosts = ["nuke", "nukeassist"] - families = ["write"] - - def process(self, instance): - return - if [f for f in instance.data["families"] - if ".frames" in f]: - instance[0]["render"].setValue(True) - self.log.info("Swith write node render to `on`") - else: - # swith to - instance[0]["render"].setValue(False) - self.log.info("Swith write node render to `Off`") diff --git a/pype/plugins/nuke/_load_unused/load_backdrop b/pype/plugins/nuke/_load_unused/load_backdrop deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pype/plugins/nuke/_publish_unused/collect_active_viewer.py b/pype/plugins/nuke/_publish_unused/collect_active_viewer.py deleted file mode 100644 index 5a6cc02b88..0000000000 --- a/pype/plugins/nuke/_publish_unused/collect_active_viewer.py +++ /dev/null @@ -1,14 +0,0 @@ -import pyblish.api -import nuke - - -class CollectActiveViewer(pyblish.api.ContextPlugin): - """Collect any active viewer from nodes - """ - - order = pyblish.api.CollectorOrder + 0.3 - label = "Collect Active Viewer" - hosts = ["nuke"] - - def process(self, context): - context.data["ActiveViewer"] = nuke.activeViewer() diff --git a/pype/plugins/nuke/_publish_unused/extract_frames.py b/pype/plugins/nuke/_publish_unused/extract_frames.py deleted file mode 100644 index b75f893802..0000000000 --- a/pype/plugins/nuke/_publish_unused/extract_frames.py +++ /dev/null @@ -1,22 +0,0 @@ -import pyblish - - -class ExtractFramesToIntegrate(pyblish.api.InstancePlugin): - """Extract rendered frames for integrator - """ - - order = pyblish.api.ExtractorOrder - label = "Extract rendered frames" - hosts = ["nuke"] - families = ["render"] - - def process(self, instance\ - return - - # staging_dir = instance.data.get('stagingDir', None) - # output_dir = instance.data.get('outputDir', None) - # - # if not staging_dir: - # staging_dir = output_dir - # instance.data['stagingDir'] = staging_dir - # # instance.data['transfer'] = False diff --git a/pype/plugins/nuke/_publish_unused/extract_nuke_write.py b/pype/plugins/nuke/_publish_unused/extract_nuke_write.py deleted file mode 100644 index 155b5cf56d..0000000000 --- a/pype/plugins/nuke/_publish_unused/extract_nuke_write.py +++ /dev/null @@ -1,116 +0,0 @@ -import os - -import nuke -import pyblish.api - - -class Extract(pyblish.api.InstancePlugin): - """Super class for write and writegeo extractors.""" - - order = pyblish.api.ExtractorOrder - optional = True - label = "Extract Nuke [super]" - hosts = ["nuke"] - match = pyblish.api.Subset - - # targets = ["process.local"] - - def execute(self, instance): - # Get frame range - node = instance[0] - first_frame = nuke.root()["first_frame"].value() - last_frame = nuke.root()["last_frame"].value() - - if node["use_limit"].value(): - first_frame = node["first"].value() - last_frame = node["last"].value() - - # Render frames - nuke.execute(node.name(), int(first_frame), int(last_frame)) - - -class ExtractNukeWrite(Extract): - """ Extract output from write nodes. """ - - families = ["write", "local"] - label = "Extract Write" - - def process(self, instance): - - self.execute(instance) - - # Validate output - for filename in list(instance.data["collection"]): - if not os.path.exists(filename): - instance.data["collection"].remove(filename) - self.log.warning("\"{0}\" didn't render.".format(filename)) - - -class ExtractNukeCache(Extract): - - label = "Cache" - families = ["cache", "local"] - - def process(self, instance): - - self.execute(instance) - - # Validate output - msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) - assert os.path.exists(instance.data["output_path"]), msg - - -class ExtractNukeCamera(Extract): - - label = "Camera" - families = ["camera", "local"] - - def process(self, instance): - - node = instance[0] - node["writeGeometries"].setValue(False) - node["writePointClouds"].setValue(False) - node["writeAxes"].setValue(False) - - file_path = node["file"].getValue() - node["file"].setValue(instance.data["output_path"]) - - self.execute(instance) - - node["writeGeometries"].setValue(True) - node["writePointClouds"].setValue(True) - node["writeAxes"].setValue(True) - - node["file"].setValue(file_path) - - # Validate output - msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) - assert os.path.exists(instance.data["output_path"]), msg - - -class ExtractNukeGeometry(Extract): - - label = "Geometry" - families = ["geometry", "local"] - - def process(self, instance): - - node = instance[0] - node["writeCameras"].setValue(False) - node["writePointClouds"].setValue(False) - node["writeAxes"].setValue(False) - - file_path = node["file"].getValue() - node["file"].setValue(instance.data["output_path"]) - - self.execute(instance) - - node["writeCameras"].setValue(True) - node["writePointClouds"].setValue(True) - node["writeAxes"].setValue(True) - - node["file"].setValue(file_path) - - # Validate output - msg = "\"{0}\" didn't render.".format(instance.data["output_path"]) - assert os.path.exists(instance.data["output_path"]), msg diff --git a/pype/plugins/nuke/_publish_unused/extract_script.py b/pype/plugins/nuke/_publish_unused/extract_script.py deleted file mode 100644 index 7d55ea0da4..0000000000 --- a/pype/plugins/nuke/_publish_unused/extract_script.py +++ /dev/null @@ -1,40 +0,0 @@ - -import pyblish.api -import os -import pype -import shutil - - -class ExtractScript(pype.api.Extractor): - """Publish script - """ - label = 'Extract Script' - order = pyblish.api.ExtractorOrder - 0.05 - optional = True - hosts = ['nuke'] - families = ["workfile"] - - def process(self, instance): - self.log.debug("instance extracting: {}".format(instance.data)) - current_script = instance.context.data["currentFile"] - - # Define extract output file path - stagingdir = self.staging_dir(instance) - filename = "{0}".format(instance.data["name"]) - path = os.path.join(stagingdir, filename) - - self.log.info("Performing extraction..") - shutil.copy(current_script, path) - - if "representations" not in instance.data: - instance.data["representations"] = list() - - representation = { - 'name': 'nk', - 'ext': '.nk', - 'files': filename, - "stagingDir": stagingdir, - } - instance.data["representations"].append(representation) - - self.log.info("Extracted instance '%s' to: %s" % (instance.name, path)) diff --git a/pype/plugins/nuke/_publish_unused/integrate_staging_dir.py b/pype/plugins/nuke/_publish_unused/integrate_staging_dir.py deleted file mode 100644 index e05c42ae50..0000000000 --- a/pype/plugins/nuke/_publish_unused/integrate_staging_dir.py +++ /dev/null @@ -1,27 +0,0 @@ -import pyblish.api -import shutil -import os - - -class CopyStagingDir(pyblish.api.InstancePlugin): - """Copy data rendered into temp local directory - """ - - order = pyblish.api.IntegratorOrder - 2 - label = "Copy data from temp dir" - hosts = ["nuke", "nukeassist"] - families = ["render.local"] - - def process(self, instance): - temp_dir = instance.data.get("stagingDir") - output_dir = instance.data.get("outputDir") - - # copy data to correct dir - if not os.path.exists(output_dir): - os.makedirs(output_dir) - self.log.info("output dir has been created") - - for f in os.listdir(temp_dir): - self.log.info("copy file to correct destination: {}".format(f)) - shutil.copy(os.path.join(temp_dir, os.path.basename(f)), - os.path.join(output_dir, os.path.basename(f))) diff --git a/pype/plugins/nuke/_publish_unused/publish_image_sequences.py b/pype/plugins/nuke/_publish_unused/publish_image_sequences.py deleted file mode 100644 index 34634dcc6b..0000000000 --- a/pype/plugins/nuke/_publish_unused/publish_image_sequences.py +++ /dev/null @@ -1,98 +0,0 @@ -import re -import os -import json -import subprocess - -import pyblish.api - -from pype.action import get_errored_plugins_from_data - - -def _get_script(): - """Get path to the image sequence script""" - - # todo: use a more elegant way to get the python script - - try: - from pype.fusion.scripts import publish_filesequence - except Exception: - raise RuntimeError("Expected module 'publish_imagesequence'" - "to be available") - - module_path = publish_filesequence.__file__ - if module_path.endswith(".pyc"): - module_path = module_path[:-len(".pyc")] + ".py" - - return module_path - - -class PublishImageSequence(pyblish.api.InstancePlugin): - """Publish the generated local image sequences.""" - - order = pyblish.api.IntegratorOrder - label = "Publish Rendered Image Sequence(s)" - hosts = ["fusion"] - families = ["saver.renderlocal"] - - def process(self, instance): - - # Skip this plug-in if the ExtractImageSequence failed - errored_plugins = get_errored_plugins_from_data(instance.context) - if any(plugin.__name__ == "FusionRenderLocal" for plugin in - errored_plugins): - raise RuntimeError("Fusion local render failed, " - "publishing images skipped.") - - subset = instance.data["subset"] - ext = instance.data["ext"] - - # Regex to match resulting renders - regex = "^{subset}.*[0-9]+{ext}+$".format(subset=re.escape(subset), - ext=re.escape(ext)) - - # The instance has most of the information already stored - metadata = { - "regex": regex, - "frameStart": instance.context.data["frameStart"], - "frameEnd": instance.context.data["frameEnd"], - "families": ["imagesequence"], - } - - # Write metadata and store the path in the instance - output_directory = instance.data["outputDir"] - path = os.path.join(output_directory, - "{}_metadata.json".format(subset)) - with open(path, "w") as f: - json.dump(metadata, f) - - assert os.path.isfile(path), ("Stored path is not a file for %s" - % instance.data["name"]) - - # Suppress any subprocess console - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW - startupinfo.wShowWindow = subprocess.SW_HIDE - - process = subprocess.Popen(["python", _get_script(), - "--paths", path], - bufsize=1, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - startupinfo=startupinfo) - - while True: - output = process.stdout.readline() - # Break when there is no output or a return code has been given - if output == '' and process.poll() is not None: - process.stdout.close() - break - if output: - line = output.strip() - if line.startswith("ERROR"): - self.log.error(line) - else: - self.log.info(line) - - if process.returncode != 0: - raise RuntimeError("Process quit with non-zero " - "return code: {}".format(process.returncode)) diff --git a/pype/plugins/nuke/_publish_unused/validate_active_viewer.py b/pype/plugins/nuke/_publish_unused/validate_active_viewer.py deleted file mode 100644 index 618a7f1502..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_active_viewer.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyblish.api -import nuke - - -class ValidateActiveViewer(pyblish.api.ContextPlugin): - """Validate presentse of the active viewer from nodes - """ - - order = pyblish.api.ValidatorOrder - label = "Validate Active Viewer" - hosts = ["nuke"] - - def process(self, context): - viewer_process_node = context.data.get("ViewerProcess") - - assert viewer_process_node, ( - "Missing active viewer process! Please click on output write node and push key number 1-9" - ) - active_viewer = context.data["ActiveViewer"] - active_input = active_viewer.activeInput() - - assert active_input is not None, ( - "Missing active viewer input! Please click on output write node and push key number 1-9" - ) diff --git a/pype/plugins/nuke/_publish_unused/validate_version_match.py b/pype/plugins/nuke/_publish_unused/validate_version_match.py deleted file mode 100644 index 1358d9a7b3..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_version_match.py +++ /dev/null @@ -1,36 +0,0 @@ -import os -import pyblish.api -import pype.utils - - - -@pyblish.api.log -class RepairNukeWriteNodeVersionAction(pyblish.api.Action): - label = "Repair" - on = "failed" - icon = "wrench" - - def process(self, context, plugin): - import pype.nuke.lib as nukelib - instances = pype.utils.filter_instances(context, plugin) - - for instance in instances: - node = instance[0] - render_path = nukelib.get_render_path(node) - self.log.info("render_path: {}".format(render_path)) - node['file'].setValue(render_path.replace("\\", "/")) - - -class ValidateVersionMatch(pyblish.api.InstancePlugin): - """Checks if write version matches workfile version""" - - label = "Validate Version Match" - order = pyblish.api.ValidatorOrder - actions = [RepairNukeWriteNodeVersionAction] - hosts = ["nuke"] - families = ['write'] - - def process(self, instance): - - assert instance.data['version'] == instance.context.data['version'], "\ - Version in write doesn't match version of the workfile" diff --git a/pype/plugins/nuke/_publish_unused/validate_write_families.py b/pype/plugins/nuke/_publish_unused/validate_write_families.py deleted file mode 100644 index 73f710867d..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_write_families.py +++ /dev/null @@ -1,59 +0,0 @@ - -import pyblish.api -import pype.api -import pype.nuke.actions - - -class RepairWriteFamiliesAction(pyblish.api.Action): - label = "Fix Write's render attributes" - on = "failed" - icon = "wrench" - - def process(self, instance, plugin): - self.log.info("instance {}".format(instance)) - instance["render"].setValue(True) - self.log.info("Rendering toggled ON") - - -@pyblish.api.log -class ValidateWriteFamilies(pyblish.api.InstancePlugin): - """ Validates write families. """ - - order = pyblish.api.ValidatorOrder - label = "Valitade writes families" - hosts = ["nuke"] - families = ["write"] - actions = [pype.nuke.actions.SelectInvalidAction, pype.api.RepairAction] - - @staticmethod - def get_invalid(self, instance): - if not [f for f in instance.data["families"] - if ".frames" in f]: - return - - if not instance.data.get('files'): - return (instance) - - def process(self, instance): - self.log.debug('instance.data["files"]: {}'.format(instance.data['files'])) - - invalid = self.get_invalid(self, instance) - - if invalid: - raise ValueError(str("`{}`: Switch `Render` on! " - "> {}".format(__name__, invalid))) - - # if any(".frames" in f for f in instance.data["families"]): - # if not instance.data["files"]: - # raise ValueError("instance {} is set to publish frames\ - # but no files were collected, render the frames first or\ - # check 'render' checkbox onthe no to 'ON'".format(instance))) - # - # - # self.log.info("Checked correct writes families") - - @classmethod - def repair(cls, instance): - cls.log.info("instance {}".format(instance)) - instance[0]["render"].setValue(True) - cls.log.info("Rendering toggled ON") diff --git a/pype/plugins/nuke/create/create_backdrop.py b/pype/plugins/nuke/create/create_backdrop.py index 767e92b592..2016c66095 100644 --- a/pype/plugins/nuke/create/create_backdrop.py +++ b/pype/plugins/nuke/create/create_backdrop.py @@ -35,8 +35,10 @@ class CreateBackdrop(Creator): return instance else: - nuke.message("Please select nodes you " - "wish to add to a container") + msg = "Please select nodes you " + "wish to add to a container" + self.log.error(msg) + nuke.message(msg) return else: bckd_node = autoBackdrop() diff --git a/pype/plugins/nuke/create/create_gizmo.py b/pype/plugins/nuke/create/create_gizmo.py index 41229862e3..ca199b8800 100644 --- a/pype/plugins/nuke/create/create_gizmo.py +++ b/pype/plugins/nuke/create/create_gizmo.py @@ -36,8 +36,10 @@ class CreateGizmo(Creator): node["tile_color"].setValue(int(self.node_color, 16)) return anlib.imprint(node, self.data) else: - nuke.message("Please select a group node " - "you wish to publish as the gizmo") + msg = ("Please select a group node " + "you wish to publish as the gizmo") + self.log.error(msg) + nuke.message(msg) if len(nodes) >= 2: anlib.select_nodes(nodes) @@ -58,8 +60,10 @@ class CreateGizmo(Creator): return anlib.imprint(gizmo_node, self.data) else: - nuke.message("Please select nodes you " - "wish to add to the gizmo") + msg = ("Please select nodes you " + "wish to add to the gizmo") + self.log.error(msg) + nuke.message(msg) return else: with anlib.maintained_selection(): diff --git a/pype/plugins/nuke/create/create_read.py b/pype/plugins/nuke/create/create_read.py index 1aa7e68746..70db580a7e 100644 --- a/pype/plugins/nuke/create/create_read.py +++ b/pype/plugins/nuke/create/create_read.py @@ -34,7 +34,9 @@ class CrateRead(avalon.nuke.Creator): nodes = self.nodes if not nodes or len(nodes) == 0: - nuke.message('Please select Read node') + msg = "Please select Read node" + self.log.error(msg) + nuke.message(msg) else: count_reads = 0 for node in nodes: @@ -46,7 +48,9 @@ class CrateRead(avalon.nuke.Creator): count_reads += 1 if count_reads < 1: - nuke.message('Please select Read node') + msg = "Please select Read node" + self.log.error(msg) + nuke.message(msg) return def change_read_node(self, name, node, data): diff --git a/pype/plugins/nuke/create/create_read_plate b/pype/plugins/nuke/create/create_read_plate deleted file mode 100644 index 90a47cb55e..0000000000 --- a/pype/plugins/nuke/create/create_read_plate +++ /dev/null @@ -1,8 +0,0 @@ -# create publishable read node usually used for enabling version tracking -# also useful for sharing across shots or assets - -# if read nodes are selected it will convert them to centainer -# if no read node selected it will create read node and offer browser to shot resource folder - -# type movie > mov or imagesequence -# type still > matpaint .psd, .tif, .png, diff --git a/pype/plugins/nuke/create/create_write.py b/pype/plugins/nuke/create/create_write.py index f522c50511..74e450f267 100644 --- a/pype/plugins/nuke/create/create_write.py +++ b/pype/plugins/nuke/create/create_write.py @@ -1,18 +1,14 @@ from collections import OrderedDict -import avalon.api -import avalon.nuke -from pype import api as pype from pype.nuke import plugin -from pypeapp import config - import nuke + class CreateWriteRender(plugin.PypeCreator): # change this to template preset name = "WriteRender" label = "Create Write Render" hosts = ["nuke"] - nClass = "write" + n_class = "write" family = "render" icon = "sign-out" defaults = ["Main", "Mask"] @@ -23,7 +19,7 @@ class CreateWriteRender(plugin.PypeCreator): data = OrderedDict() data["family"] = self.family - data["families"] = self.nClass + data["families"] = self.n_class for k, v in self.data.items(): if k not in data.keys(): @@ -31,7 +27,7 @@ class CreateWriteRender(plugin.PypeCreator): self.data = data self.nodes = nuke.selectedNodes() - self.log.info("self.data: '{}'".format(self.data)) + self.log.debug("_ self.data: '{}'".format(self.data)) def process(self): from pype.nuke import lib as pnlib @@ -45,7 +41,11 @@ class CreateWriteRender(plugin.PypeCreator): if (self.options or {}).get("useSelection"): nodes = self.nodes - assert len(nodes) < 2, self.log.error("Select only one node. The node you want to connect to, or tick off `Use selection`") + if not (len(nodes) < 2): + msg = ("Select only one node. The node you want to connect to, " + "or tick off `Use selection`") + log.error(msg) + nuke.message(msg) selected_node = nodes[0] inputs = [selected_node] @@ -66,7 +66,7 @@ class CreateWriteRender(plugin.PypeCreator): # recreate new write_data = { - "class": self.nClass, + "class": self.n_class, "families": [self.family], "avalon": self.data } @@ -97,75 +97,121 @@ class CreateWriteRender(plugin.PypeCreator): return write_node -# -# class CreateWritePrerender(avalon.nuke.Creator): -# # change this to template preset -# preset = "prerender" -# -# name = "WritePrerender" -# label = "Create Write Prerender" -# hosts = ["nuke"] -# family = "{}_write".format(preset) -# families = preset -# icon = "sign-out" -# defaults = ["Main", "Mask"] -# -# def __init__(self, *args, **kwargs): -# super(CreateWritePrerender, self).__init__(*args, **kwargs) -# self.presets = config.get_presets()['plugins']["nuke"]["create"].get( -# self.__class__.__name__, {} -# ) -# -# data = OrderedDict() -# -# data["family"] = self.family.split("_")[1] -# data["families"] = self.families -# -# {data.update({k: v}) for k, v in self.data.items() -# if k not in data.keys()} -# self.data = data -# -# def process(self): -# self.name = self.data["subset"] -# -# instance = nuke.toNode(self.data["subset"]) -# node = 'write' -# -# if not instance: -# write_data = { -# "class": node, -# "preset": self.preset, -# "avalon": self.data -# } -# -# if self.presets.get('fpath_template'): -# self.log.info("Adding template path from preset") -# write_data.update( -# {"fpath_template": self.presets["fpath_template"]} -# ) -# else: -# self.log.info("Adding template path from plugin") -# write_data.update({ -# "fpath_template": "{work}/prerenders/{subset}/{subset}.{frame}.{ext}"}) -# -# # get group node -# group_node = create_write_node(self.data["subset"], write_data) -# -# # open group node -# group_node.begin() -# for n in nuke.allNodes(): -# # get write node -# if n.Class() in "Write": -# write_node = n -# group_node.end() -# -# # linking knobs to group property panel -# linking_knobs = ["first", "last", "use_limit"] -# for k in linking_knobs: -# lnk = nuke.Link_Knob(k) -# lnk.makeLink(write_node.name(), k) -# lnk.setName(k.replace('_', ' ').capitalize()) -# lnk.clearFlag(nuke.STARTLINE) -# group_node.addKnob(lnk) -# -# return + +class CreateWritePrerender(plugin.PypeCreator): + # change this to template preset + name = "WritePrerender" + label = "Create Write Prerender" + hosts = ["nuke"] + n_class = "write" + family = "prerender" + icon = "sign-out" + defaults = ["Key01", "Bg01", "Fg01", "Branch01", "Part01"] + + def __init__(self, *args, **kwargs): + super(CreateWritePrerender, self).__init__(*args, **kwargs) + + data = OrderedDict() + + data["family"] = self.family + data["families"] = self.n_class + + for k, v in self.data.items(): + if k not in data.keys(): + data.update({k: v}) + + self.data = data + self.nodes = nuke.selectedNodes() + self.log.debug("_ self.data: '{}'".format(self.data)) + + def process(self): + from pype.nuke import lib as pnlib + + inputs = [] + outputs = [] + instance = nuke.toNode(self.data["subset"]) + selected_node = None + + # use selection + if (self.options or {}).get("useSelection"): + nodes = self.nodes + + if not (len(nodes) < 2): + msg = ("Select only one node. The node you want to connect to, " + "or tick off `Use selection`") + self.log.error(msg) + nuke.message(msg) + + selected_node = nodes[0] + inputs = [selected_node] + outputs = selected_node.dependent() + + if instance: + if (instance.name() in selected_node.name()): + selected_node = instance.dependencies()[0] + + # if node already exist + if instance: + # collect input / outputs + inputs = instance.dependencies() + outputs = instance.dependent() + selected_node = inputs[0] + # remove old one + nuke.delete(instance) + + # recreate new + write_data = { + "class": self.n_class, + "families": [self.family], + "avalon": self.data + } + + if self.presets.get('fpath_template'): + self.log.info("Adding template path from preset") + write_data.update( + {"fpath_template": self.presets["fpath_template"]} + ) + else: + self.log.info("Adding template path from plugin") + write_data.update({ + "fpath_template": "{work}/prerenders/nuke/{subset}/{subset}.{frame}.{ext}"}) + + write_node = pnlib.create_write_node( + self.data["subset"], + write_data, + input=selected_node, + prenodes=[]) + + # relinking to collected connections + for i, input in enumerate(inputs): + write_node.setInput(i, input) + + write_node.autoplace() + + for output in outputs: + output.setInput(0, write_node) + + # open group node + write_node.begin() + for n in nuke.allNodes(): + # get write node + if n.Class() in "Write": + w_node = n + write_node.end() + + # add inner write node Tab + write_node.addKnob(nuke.Tab_Knob("WriteLinkedKnobs")) + + # linking knobs to group property panel + linking_knobs = ["channels", "___", "first", "last", "use_limit"] + for k in linking_knobs: + if "___" in k: + write_node.addKnob(nuke.Text_Knob('')) + else: + lnk = nuke.Link_Knob(k) + lnk.makeLink(w_node.name(), k) + lnk.setName(k.replace('_', ' ').capitalize()) + lnk.clearFlag(nuke.STARTLINE) + write_node.addKnob(lnk) + + return write_node diff --git a/pype/plugins/nuke/load/load_backdrop.py b/pype/plugins/nuke/load/load_backdrop.py new file mode 100644 index 0000000000..07a6724771 --- /dev/null +++ b/pype/plugins/nuke/load/load_backdrop.py @@ -0,0 +1,322 @@ +from avalon import api, style, io +import nuke +import nukescripts +from pype.nuke import lib as pnlib +from avalon.nuke import lib as anlib +from avalon.nuke import containerise, update_container +reload(pnlib) + +class LoadBackdropNodes(api.Loader): + """Loading Published Backdrop nodes (workfile, nukenodes)""" + + representations = ["nk"] + families = ["workfile", "nukenodes"] + + label = "Iport Nuke Nodes" + order = 0 + icon = "eye" + color = style.colors.light + node_color = "0x7533c1ff" + + def load(self, context, name, namespace, data): + """ + Loading function to import .nk file into script and wrap + it on backdrop + + Arguments: + context (dict): context of version + name (str): name of the version + namespace (str): asset name + data (dict): compulsory attribute > not used + + Returns: + nuke node: containerised nuke node object + """ + + # get main variables + version = context['version'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + namespace = namespace or context['asset']['name'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + # prepare data for imprinting + # add additional metadata from the version to imprint to Avalon knob + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # getting file path + file = self.fname.replace("\\", "/") + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + # Get mouse position + n = nuke.createNode("NoOp") + xcursor, ycursor = (n.xpos(), n.ypos()) + anlib.reset_selection() + nuke.delete(n) + + bdn_frame = 50 + + with anlib.maintained_selection(): + + # add group from nk + nuke.nodePaste(file) + + # get all pasted nodes + new_nodes = list() + nodes = nuke.selectedNodes() + + # get pointer position in DAG + xpointer, ypointer = pnlib.find_free_space_to_paste_nodes(nodes, direction="right", offset=200+bdn_frame) + + # reset position to all nodes and replace inputs and output + for n in nodes: + anlib.reset_selection() + xpos = (n.xpos() - xcursor) + xpointer + ypos = (n.ypos() - ycursor) + ypointer + n.setXYpos(xpos, ypos) + + # replace Input nodes for dots + if n.Class() in "Input": + dot = nuke.createNode("Dot") + new_name = n.name().replace("INP", "DOT") + dot.setName(new_name) + dot["label"].setValue(new_name) + dot.setXYpos(xpos, ypos) + new_nodes.append(dot) + + # rewire + dep = n.dependent() + for d in dep: + index = next((i for i, dpcy in enumerate( + d.dependencies()) + if n is dpcy), 0) + d.setInput(index, dot) + + # remove Input node + anlib.reset_selection() + nuke.delete(n) + continue + + # replace Input nodes for dots + elif n.Class() in "Output": + dot = nuke.createNode("Dot") + new_name = n.name() + "_DOT" + dot.setName(new_name) + dot["label"].setValue(new_name) + dot.setXYpos(xpos, ypos) + new_nodes.append(dot) + + # rewire + dep = next((d for d in n.dependencies()), None) + if dep: + dot.setInput(0, dep) + + # remove Input node + anlib.reset_selection() + nuke.delete(n) + continue + else: + new_nodes.append(n) + + # reselect nodes with new Dot instead of Inputs and Output + anlib.reset_selection() + anlib.select_nodes(new_nodes) + # place on backdrop + bdn = nukescripts.autoBackdrop() + + # add frame offset + xpos = bdn.xpos() - bdn_frame + ypos = bdn.ypos() - bdn_frame + bdwidth = bdn["bdwidth"].value() + (bdn_frame*2) + bdheight = bdn["bdheight"].value() + (bdn_frame*2) + + bdn["xpos"].setValue(xpos) + bdn["ypos"].setValue(ypos) + bdn["bdwidth"].setValue(bdwidth) + bdn["bdheight"].setValue(bdheight) + + bdn["name"].setValue(object_name) + bdn["label"].setValue("Version tracked frame: \n`{}`\n\nPLEASE DO NOT REMOVE OR MOVE \nANYTHING FROM THIS FRAME!".format(object_name)) + bdn["note_font_size"].setValue(20) + + return containerise( + node=bdn, + name=name, + namespace=namespace, + context=context, + loader=self.__class__.__name__, + data=data_imprint) + + def update(self, container, representation): + """Update the Loader's path + + Nuke automatically tries to reset some variables when changing + the loader's path to a new file. These automatic changes are to its + inputs: + + """ + + # get main variables + # Get version from io + version = io.find_one({ + "type": "version", + "_id": representation["parent"] + }) + # get corresponding node + GN = nuke.toNode(container['objectName']) + + file = api.get_representation_path(representation).replace("\\", "/") + context = representation["context"] + name = container['name'] + version_data = version.get("data", {}) + vname = version.get("name", None) + first = version_data.get("frameStart", None) + last = version_data.get("frameEnd", None) + namespace = container['namespace'] + colorspace = version_data.get("colorspace", None) + object_name = "{}_{}".format(name, namespace) + + add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd", + "source", "author", "fps"] + + data_imprint = {"representation": str(representation["_id"]), + "frameStart": first, + "frameEnd": last, + "version": vname, + "colorspaceInput": colorspace, + "objectName": object_name} + + for k in add_keys: + data_imprint.update({k: version_data[k]}) + + # adding nodes to node graph + # just in case we are in group lets jump out of it + nuke.endGroup() + + with anlib.maintained_selection(): + xpos = GN.xpos() + ypos = GN.ypos() + avalon_data = anlib.get_avalon_knob_data(GN) + nuke.delete(GN) + # add group from nk + nuke.nodePaste(file) + + GN = nuke.selectedNode() + anlib.set_avalon_knob_data(GN, avalon_data) + GN.setXYpos(xpos, ypos) + GN["name"].setValue(object_name) + + # get all versions in list + versions = io.find({ + "type": "version", + "parent": version["parent"] + }).distinct('name') + + max_version = max(versions) + + # change color of node + if version.get("name") not in [max_version]: + GN["tile_color"].setValue(int("0xd88467ff", 16)) + else: + GN["tile_color"].setValue(int(self.node_color, 16)) + + self.log.info("udated to version: {}".format(version.get("name"))) + + return update_container(GN, data_imprint) + + def connect_active_viewer(self, group_node): + """ + Finds Active viewer and + place the node under it, also adds + name of group into Input Process of the viewer + + Arguments: + group_node (nuke node): nuke group node object + + """ + group_node_name = group_node["name"].value() + + viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()] + if len(viewer) > 0: + viewer = viewer[0] + else: + if not (len(nodes) < 2): + msg = "Please create Viewer node before you " + "run this action again" + self.log.error(msg) + nuke.message(msg) + return None + + # get coordinates of Viewer1 + xpos = viewer["xpos"].value() + ypos = viewer["ypos"].value() + + ypos += 150 + + viewer["ypos"].setValue(ypos) + + # set coordinates to group node + group_node["xpos"].setValue(xpos) + group_node["ypos"].setValue(ypos + 50) + + # add group node name to Viewer Input Process + viewer["input_process_node"].setValue(group_node_name) + + # put backdrop under + pnlib.create_backdrop(label="Input Process", layer=2, + nodes=[viewer, group_node], color="0x7c7faaff") + + return True + + def get_item(self, data, trackIndex, subTrackIndex): + return {key: val for key, val in data.items() + if subTrackIndex == val["subTrackIndex"] + if trackIndex == val["trackIndex"]} + + def byteify(self, input): + """ + Converts unicode strings to strings + It goes trought all dictionary + + Arguments: + input (dict/str): input + + Returns: + dict: with fixed values and keys + + """ + + if isinstance(input, dict): + return {self.byteify(key): self.byteify(value) + for key, value in input.iteritems()} + elif isinstance(input, list): + return [self.byteify(element) for element in input] + elif isinstance(input, unicode): + return input.encode('utf-8') + else: + return input + + def switch(self, container, representation): + self.update(container, representation) + + def remove(self, container): + from avalon.nuke import viewer_update_and_undo_stop + node = nuke.toNode(container['objectName']) + with viewer_update_and_undo_stop(): + nuke.delete(node) diff --git a/pype/plugins/nuke/load/load_gizmo_ip.py b/pype/plugins/nuke/load/load_gizmo_ip.py index 0d78c14214..23d7ef2f4a 100644 --- a/pype/plugins/nuke/load/load_gizmo_ip.py +++ b/pype/plugins/nuke/load/load_gizmo_ip.py @@ -176,8 +176,10 @@ class LoadGizmoInputProcess(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - self.log.error("Please create Viewer node before you " - "run this action again") + msg = "Please create Viewer node before you " + "run this action again" + self.log.error(msg) + nuke.message(msg) return None # get coordinates of Viewer1 diff --git a/pype/plugins/nuke/load/load_luts_ip.py b/pype/plugins/nuke/load/load_luts_ip.py index 5f09adb05f..2b38a9ff08 100644 --- a/pype/plugins/nuke/load/load_luts_ip.py +++ b/pype/plugins/nuke/load/load_luts_ip.py @@ -276,7 +276,10 @@ class LoadLutsInputProcess(api.Loader): if len(viewer) > 0: viewer = viewer[0] else: - self.log.error("Please create Viewer node before you run this action again") + msg = "Please create Viewer node before you " + "run this action again" + self.log.error(msg) + nuke.message(msg) return None # get coordinates of Viewer1 diff --git a/pype/plugins/nuke/load/load_matchmove.py b/pype/plugins/nuke/load/load_matchmove.py index 6a674368fb..60d5dc026f 100644 --- a/pype/plugins/nuke/load/load_matchmove.py +++ b/pype/plugins/nuke/load/load_matchmove.py @@ -1,4 +1,5 @@ from avalon import api +import nuke class MatchmoveLoader(api.Loader): @@ -19,6 +20,8 @@ class MatchmoveLoader(api.Loader): exec(open(self.fname).read()) else: - self.log.error("Unsupported script type") + msg = "Unsupported script type" + self.log.error(msg) + nuke.message(msg) return True diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py index e598839405..77346a82a4 100644 --- a/pype/plugins/nuke/load/load_mov.py +++ b/pype/plugins/nuke/load/load_mov.py @@ -1,11 +1,10 @@ +import re +import nuke import contextlib from avalon import api, io - -import nuke - -from pype.api import Logger -log = Logger().get_logger(__name__, "nuke") +from pype.nuke import presets +from pypeapp import config @contextlib.contextmanager @@ -24,7 +23,7 @@ def preserve_trim(node): offset_frame = None if node['frame_mode'].value() == "start at": start_at_frame = node['frame'].value() - if node['frame_mode'].value() is "offset": + if node['frame_mode'].value() == "offset": offset_frame = node['frame'].value() try: @@ -33,14 +32,14 @@ def preserve_trim(node): if start_at_frame: node['frame_mode'].setValue("start at") node['frame'].setValue(str(script_start)) - log.info("start frame of Read was set to" - "{}".format(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) if offset_frame: node['frame_mode'].setValue("offset") node['frame'].setValue(str((script_start + offset_frame))) - log.info("start frame of Read was set to" - "{}".format(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) def loader_shift(node, frame, relative=True): @@ -69,11 +68,37 @@ def loader_shift(node, frame, relative=True): return int(script_start) +def add_review_presets_config(): + returning = { + "families": list(), + "representations": list() + } + review_presets = config.get_presets()["plugins"]["global"]["publish"].get( + "ExtractReview", {}) + + outputs = review_presets.get("outputs", {}) + # + for output, properities in outputs.items(): + returning["representations"].append(output) + returning["families"] += properities.get("families", []) + + return returning + + class LoadMov(api.Loader): """Load mov file into Nuke""" + presets = add_review_presets_config() + families = [ + "source", + "plate", + "render", + "review"] + presets["families"] - families = ["write", "source", "plate", "render", "review"] - representations = ["wipmov", "h264", "mov", "preview", "review", "mp4"] + representations = [ + "mov", + "preview", + "review", + "mp4"] + presets["representations"] label = "Load mov" order = -10 @@ -85,47 +110,48 @@ class LoadMov(api.Loader): containerise, viewer_update_and_undo_stop ) - version = context['version'] version_data = version.get("data", {}) - orig_first = version_data.get("frameStart", None) - orig_last = version_data.get("frameEnd", None) + orig_first = version_data.get("frameStart") + orig_last = version_data.get("frameEnd") diff = orig_first - 1 - # set first to 1 + first = orig_first - diff last = orig_last - diff - handles = version_data.get("handles", None) - handle_start = version_data.get("handleStart", None) - handle_end = version_data.get("handleEnd", None) - repr_cont = context["representation"]["context"] - # fix handle start and end if none are available - if not handle_start and not handle_end: - handle_start = handles - handle_end = handles + handle_start = version_data.get("handleStart") + handle_end = version_data.get("handleEnd") + + colorspace = version_data.get("colorspace") + repr_cont = context["representation"]["context"] # create handles offset (only to last, because of mov) last += handle_start + handle_end # offset should be with handles so it match orig frame range - offset_frame = orig_first + handle_start + offset_frame = orig_first - handle_start # Fallback to asset name when namespace is None if namespace is None: namespace = context['asset']['name'] - file = self.fname.replace("\\", "/") - log.info("file: {}\n".format(self.fname)) + file = self.fname + + if not file: + repr_id = context["representation"]["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") read_name = "Read_{0}_{1}_{2}".format( repr_cont["asset"], repr_cont["subset"], repr_cont["representation"]) - # Create the Loader with the filename path set with viewer_update_and_undo_stop(): - # TODO: it might be universal read to img/geo/camera read_node = nuke.createNode( "Read", "name {}".format(read_name) @@ -139,7 +165,23 @@ class LoadMov(api.Loader): read_node["last"].setValue(last) read_node["frame_mode"].setValue("start at") read_node["frame"].setValue(str(offset_frame)) - # add additional metadata from the version to imprint to Avalon knob + + if colorspace: + read_node["colorspace"].setValue(str(colorspace)) + + # load nuke presets for Read's colorspace + read_clrs_presets = presets.get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + if preset_clrsp is not None: + read_node["colorspace"].setValue(str(preset_clrsp)) + + # add additional metadata from the version to imprint Avalon knob add_keys = [ "frameStart", "frameEnd", "handles", "source", "author", "fps", "version", "handleStart", "handleEnd" @@ -147,7 +189,7 @@ class LoadMov(api.Loader): data_imprint = {} for key in add_keys: - if key is 'version': + if key == 'version': data_imprint.update({ key: context["version"]['name'] }) @@ -186,10 +228,18 @@ class LoadMov(api.Loader): ) node = nuke.toNode(container['objectName']) - # TODO: prepare also for other Read img/geo/camera + assert node.Class() == "Read", "Must be Read" - file = api.get_representation_path(representation) + file = self.fname + + if not file: + repr_id = representation["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") # Get start frame from version data version = io.find_one({ @@ -207,20 +257,23 @@ class LoadMov(api.Loader): version_data = version.get("data", {}) - orig_first = version_data.get("frameStart", None) - orig_last = version_data.get("frameEnd", None) + orig_first = version_data.get("frameStart") + orig_last = version_data.get("frameEnd") diff = orig_first - 1 + # set first to 1 first = orig_first - diff last = orig_last - diff handles = version_data.get("handles", 0) handle_start = version_data.get("handleStart", 0) handle_end = version_data.get("handleEnd", 0) + colorspace = version_data.get("colorspace") if first is None: - log.warning("Missing start frame for updated version" - "assuming starts at frame 0 for: " - "{} ({})".format(node['name'].value(), representation)) + self.log.warning("Missing start frame for updated version" + "assuming starts at frame 0 for: " + "{} ({})".format( + node['name'].value(), representation)) first = 0 # fix handle start and end if none are available @@ -231,12 +284,12 @@ class LoadMov(api.Loader): # create handles offset (only to last, because of mov) last += handle_start + handle_end # offset should be with handles so it match orig frame range - offset_frame = orig_first + handle_start + offset_frame = orig_first - handle_start # Update the loader's path whilst preserving some values with preserve_trim(node): - node["file"].setValue(file["path"]) - log.info("__ node['file']: {}".format(node["file"].value())) + node["file"].setValue(file) + self.log.info("__ node['file']: {}".format(node["file"].value())) # Set the global in to the start frame of the sequence loader_shift(node, first, relative=True) @@ -247,19 +300,34 @@ class LoadMov(api.Loader): node["frame_mode"].setValue("start at") node["frame"].setValue(str(offset_frame)) + if colorspace: + node["colorspace"].setValue(str(colorspace)) + + # load nuke presets for Read's colorspace + read_clrs_presets = presets.get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + if preset_clrsp is not None: + node["colorspace"].setValue(str(preset_clrsp)) + updated_dict = {} updated_dict.update({ "representation": str(representation["_id"]), - "frameStart": version_data.get("frameStart"), - "frameEnd": version_data.get("frameEnd"), - "version": version.get("name"), + "frameStart": str(first), + "frameEnd": str(last), + "version": str(version.get("name")), + "colorspace": version_data.get("colorspace"), "source": version_data.get("source"), - "handles": version_data.get("handles"), - "handleStart": version_data.get("handleStart"), - "handleEnd": version_data.get("handleEnd"), - "fps": version_data.get("fps"), + "handleStart": str(handle_start), + "handleEnd": str(handle_end), + "fps": str(version_data.get("fps")), "author": version_data.get("author"), - "outputDir": version_data.get("outputDir"), + "outputDir": version_data.get("outputDir") }) # change color of node @@ -272,7 +340,7 @@ class LoadMov(api.Loader): update_container( node, updated_dict ) - log.info("udated to version: {}".format(version.get("name"))) + self.log.info("udated to version: {}".format(version.get("name"))) def remove(self, container): diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index 8f01d4511b..db77c53aff 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -1,11 +1,9 @@ +import re +import nuke import contextlib from avalon import api, io - -import nuke - -from pype.api import Logger -log = Logger().get_logger(__name__, "nuke") +from pype.nuke import presets @contextlib.contextmanager @@ -24,7 +22,7 @@ def preserve_trim(node): offset_frame = None if node['frame_mode'].value() == "start at": start_at_frame = node['frame'].value() - if node['frame_mode'].value() is "offset": + if node['frame_mode'].value() == "offset": offset_frame = node['frame'].value() try: @@ -33,14 +31,14 @@ def preserve_trim(node): if start_at_frame: node['frame_mode'].setValue("start at") node['frame'].setValue(str(script_start)) - log.info("start frame of Read was set to" - "{}".format(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) if offset_frame: node['frame_mode'].setValue("offset") node['frame'].setValue(str((script_start + offset_frame))) - log.info("start frame of Read was set to" - "{}".format(script_start)) + print("start frame of Read was set to" + "{}".format(script_start)) def loader_shift(node, frame, relative=True): @@ -72,8 +70,8 @@ def loader_shift(node, frame, relative=True): class LoadSequence(api.Loader): """Load image sequence into Nuke""" - families = ["write", "source", "plate", "render"] - representations = ["exr", "dpx", "jpg", "jpeg"] + families = ["render2d", "source", "plate", "render"] + representations = ["exr", "dpx", "jpg", "jpeg", "png"] label = "Load sequence" order = -10 @@ -89,11 +87,10 @@ class LoadSequence(api.Loader): version = context['version'] version_data = version.get("data", {}) - log.info("version_data: {}\n".format(version_data)) + self.log.info("version_data: {}\n".format(version_data)) self.first_frame = int(nuke.root()["first_frame"].getValue()) self.handle_start = version_data.get("handleStart", 0) - self.handle_start = version_data.get("handleStart", 0) self.handle_end = version_data.get("handleEnd", 0) first = version_data.get("frameStart", None) @@ -106,21 +103,27 @@ class LoadSequence(api.Loader): first -= self.handle_start last += self.handle_end - file = self.fname.replace("\\", "/") + file = self.fname - log.info("file: {}\n".format(self.fname)) + if not file: + repr_id = context["representation"]["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") repr_cont = context["representation"]["context"] - read_name = "Read_{0}_{1}_{2}".format( - repr_cont["asset"], - repr_cont["subset"], - repr_cont["representation"]) - if "#" not in file: frame = repr_cont.get("frame") padding = len(frame) file = file.replace(frame, "#"*padding) + read_name = "Read_{0}_{1}_{2}".format( + repr_cont["asset"], + repr_cont["subset"], + repr_cont["representation"]) + # Create the Loader with the filename path set with viewer_update_and_undo_stop(): # TODO: it might be universal read to img/geo/camera @@ -130,24 +133,36 @@ class LoadSequence(api.Loader): r["file"].setValue(file) # Set colorspace defined in version data - colorspace = context["version"]["data"].get("colorspace", None) - if colorspace is not None: + colorspace = context["version"]["data"].get("colorspace") + if colorspace: r["colorspace"].setValue(str(colorspace)) + # load nuke presets for Read's colorspace + read_clrs_presets = presets.get_colorspace_preset().get( + "nuke", {}).get("read", {}) + + # check if any colorspace presets for read is mathing + preset_clrsp = next((read_clrs_presets[k] + for k in read_clrs_presets + if bool(re.search(k, file))), + None) + if preset_clrsp is not None: + r["colorspace"].setValue(str(preset_clrsp)) + loader_shift(r, first, relative=True) r["origfirst"].setValue(int(first)) r["first"].setValue(int(first)) r["origlast"].setValue(int(last)) r["last"].setValue(int(last)) - # add additional metadata from the version to imprint to Avalon knob + # add additional metadata from the version to imprint Avalon knob add_keys = ["frameStart", "frameEnd", "source", "colorspace", "author", "fps", "version", "handleStart", "handleEnd"] data_imprint = {} for k in add_keys: - if k is 'version': + if k == 'version': data_imprint.update({k: context["version"]['name']}) else: data_imprint.update( @@ -179,7 +194,7 @@ class LoadSequence(api.Loader): rtn["after"].setValue("continue") rtn["input.first_lock"].setValue(True) rtn["input.first"].setValue( - self.handle_start + self.first_frame + self.handle_start + self.first_frame ) if time_warp_nodes != []: @@ -210,16 +225,29 @@ class LoadSequence(api.Loader): """ from avalon.nuke import ( - ls_img_sequence, update_container ) node = nuke.toNode(container['objectName']) - # TODO: prepare also for other Read img/geo/camera + assert node.Class() == "Read", "Must be Read" - path = api.get_representation_path(representation) - file = ls_img_sequence(path) + repr_cont = representation["context"] + + file = self.fname + + if not file: + repr_id = representation["_id"] + self.log.warning( + "Representation id `{}` is failing to load".format(repr_id)) + return + + file = file.replace("\\", "/") + + if "#" not in file: + frame = repr_cont.get("frame") + padding = len(frame) + file = file.replace(frame, "#"*padding) # Get start frame from version data version = io.find_one({ @@ -241,13 +269,14 @@ class LoadSequence(api.Loader): self.handle_start = version_data.get("handleStart", 0) self.handle_end = version_data.get("handleEnd", 0) - first = version_data.get("frameStart", None) - last = version_data.get("frameEnd", None) + first = version_data.get("frameStart") + last = version_data.get("frameEnd") if first is None: - log.warning("Missing start frame for updated version" - "assuming starts at frame 0 for: " - "{} ({})".format(node['name'].value(), representation)) + self.log.warning("Missing start frame for updated version" + "assuming starts at frame 0 for: " + "{} ({})".format( + node['name'].value(), representation)) first = 0 first -= self.handle_start @@ -255,8 +284,8 @@ class LoadSequence(api.Loader): # Update the loader's path whilst preserving some values with preserve_trim(node): - node["file"].setValue(file["path"]) - log.info("__ node['file']: {}".format(node["file"].value())) + node["file"].setValue(file) + self.log.info("__ node['file']: {}".format(node["file"].value())) # Set the global in to the start frame of the sequence loader_shift(node, first, relative=True) @@ -268,14 +297,14 @@ class LoadSequence(api.Loader): updated_dict = {} updated_dict.update({ "representation": str(representation["_id"]), - "frameStart": version_data.get("frameStart"), - "frameEnd": version_data.get("frameEnd"), - "version": version.get("name"), + "frameStart": str(first), + "frameEnd": str(last), + "version": str(version.get("name")), "colorspace": version_data.get("colorspace"), "source": version_data.get("source"), - "handleStart": version_data.get("handleStart"), - "handleEnd": version_data.get("handleEnd"), - "fps": version_data.get("fps"), + "handleStart": str(self.handle_start), + "handleEnd": str(self.handle_end), + "fps": str(version_data.get("fps")), "author": version_data.get("author"), "outputDir": version_data.get("outputDir"), }) @@ -296,7 +325,7 @@ class LoadSequence(api.Loader): node, updated_dict ) - log.info("udated to version: {}".format(version.get("name"))) + self.log.info("udated to version: {}".format(version.get("name"))) def remove(self, container): diff --git a/pype/plugins/nuke/publish/collect_asset_info.py b/pype/plugins/nuke/publish/collect_asset_info.py index 76b93ef3d0..8a8791ec36 100644 --- a/pype/plugins/nuke/publish/collect_asset_info.py +++ b/pype/plugins/nuke/publish/collect_asset_info.py @@ -13,8 +13,10 @@ class CollectAssetInfo(pyblish.api.ContextPlugin): ] def process(self, context): - asset_data = io.find_one({"type": "asset", - "name": api.Session["AVALON_ASSET"]}) + asset_data = io.find_one({ + "type": "asset", + "name": api.Session["AVALON_ASSET"] + }) self.log.info("asset_data: {}".format(asset_data)) context.data['handles'] = int(asset_data["data"].get("handles", 0)) diff --git a/pype/plugins/nuke/publish/collect_instances.py b/pype/plugins/nuke/publish/collect_instances.py index c5fb289a1e..cbbef70e4a 100644 --- a/pype/plugins/nuke/publish/collect_instances.py +++ b/pype/plugins/nuke/publish/collect_instances.py @@ -15,9 +15,10 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): hosts = ["nuke", "nukeassist"] def process(self, context): - - asset_data = io.find_one({"type": "asset", - "name": api.Session["AVALON_ASSET"]}) + asset_data = io.find_one({ + "type": "asset", + "name": api.Session["AVALON_ASSET"] + }) self.log.debug("asset_data: {}".format(asset_data["data"])) instances = [] @@ -27,12 +28,15 @@ class CollectNukeInstances(pyblish.api.ContextPlugin): self.log.debug("nuke.allNodes(): {}".format(nuke.allNodes())) for node in nuke.allNodes(): + if node.Class() in ["Viewer", "Dot"]: + continue + try: if node["disable"].value(): continue except Exception as E: self.log.warning(E) - + # get data from avalon knob self.log.debug("node[name]: {}".format(node['name'].value())) diff --git a/pype/plugins/nuke/publish/collect_review.py b/pype/plugins/nuke/publish/collect_review.py index 7e7cbedd6c..c95c94541d 100644 --- a/pype/plugins/nuke/publish/collect_review.py +++ b/pype/plugins/nuke/publish/collect_review.py @@ -1,12 +1,12 @@ import pyblish.api import nuke + class CollectReview(pyblish.api.InstancePlugin): """Collect review instance from rendered frames """ order = pyblish.api.CollectorOrder + 0.3 - family = "review" label = "Collect Review" hosts = ["nuke"] families = ["render", "render.local", "render.farm"] @@ -25,4 +25,6 @@ class CollectReview(pyblish.api.InstancePlugin): instance.data["families"].append("review") instance.data['families'].append('ftrack') + self.log.info("Review collected: `{}`".format(instance)) + self.log.debug("__ instance.data: `{}`".format(instance.data)) diff --git a/pype/plugins/nuke/publish/collect_script_version.py b/pype/plugins/nuke/publish/collect_script_version.py new file mode 100644 index 0000000000..9a6b5bf572 --- /dev/null +++ b/pype/plugins/nuke/publish/collect_script_version.py @@ -0,0 +1,22 @@ +import os +import pype.api as pype +import pyblish.api + + +class CollectScriptVersion(pyblish. api.ContextPlugin): + """Collect Script Version.""" + + order = pyblish.api.CollectorOrder + label = "Collect Script Version" + hosts = [ + "nuke", + "nukeassist" + ] + + def process(self, context): + file_path = context.data["currentFile"] + base_name = os.path.basename(file_path) + # get version string + version = pype.get_version_from_path(base_name) + + context.data['version'] = version diff --git a/pype/plugins/nuke/publish/collect_slate_node.py b/pype/plugins/nuke/publish/collect_slate_node.py new file mode 100644 index 0000000000..d8d6b50f05 --- /dev/null +++ b/pype/plugins/nuke/publish/collect_slate_node.py @@ -0,0 +1,40 @@ +import pyblish.api +import nuke + + +class CollectSlate(pyblish.api.InstancePlugin): + """Check if SLATE node is in scene and connected to rendering tree""" + + order = pyblish.api.CollectorOrder + 0.09 + label = "Collect Slate Node" + hosts = ["nuke"] + families = ["write"] + + def process(self, instance): + node = instance[0] + + slate = next((n for n in nuke.allNodes() + if "slate" in n.name().lower() + if not n["disable"].getValue()), + None) + + if slate: + # check if slate node is connected to write node tree + slate_check = 0 + slate_node = None + while slate_check == 0: + try: + node = node.dependencies()[0] + if slate.name() in node.name(): + slate_node = node + slate_check = 1 + except IndexError: + break + + if slate_node: + instance.data["slateNode"] = slate_node + instance.data["families"].append("slate") + self.log.info( + "Slate node is in node graph: `{}`".format(slate.name())) + self.log.debug( + "__ instance: `{}`".format(instance)) diff --git a/pype/plugins/nuke/publish/collect_workfile.py b/pype/plugins/nuke/publish/collect_workfile.py index aaee554fbf..9c01a3ec97 100644 --- a/pype/plugins/nuke/publish/collect_workfile.py +++ b/pype/plugins/nuke/publish/collect_workfile.py @@ -2,8 +2,6 @@ import nuke import pyblish.api import os -import pype.api as pype - from avalon.nuke import ( get_avalon_knob_data, add_publish_knob @@ -11,7 +9,7 @@ from avalon.nuke import ( class CollectWorkfile(pyblish.api.ContextPlugin): - """Publish current script version.""" + """Collect current script for publish.""" order = pyblish.api.CollectorOrder + 0.1 label = "Collect Workfile" @@ -31,9 +29,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin): base_name = os.path.basename(file_path) subset = "{0}_{1}".format(os.getenv("AVALON_TASK", None), family) - # get version string - version = pype.get_version_from_path(base_name) - # Get frame range first_frame = int(root["first_frame"].getValue()) last_frame = int(root["last_frame"].getValue()) @@ -53,7 +48,6 @@ class CollectWorkfile(pyblish.api.ContextPlugin): script_data = { "asset": os.getenv("AVALON_ASSET", None), - "version": version, "frameStart": first_frame + handle_start, "frameEnd": last_frame - handle_end, "resolutionWidth": resolution_width, @@ -78,8 +72,7 @@ class CollectWorkfile(pyblish.api.ContextPlugin): "publish": root.knob('publish').value(), "family": family, "families": [family], - "representations": list(), - "subsetGroup": "workfiles" + "representations": list() }) # adding basic script data diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index dd3049834d..bf1c6a4b66 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -14,6 +14,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): families = ["write"] def process(self, instance): + # adding 2d focused rendering + instance.data["families"].append("render2d") node = None for x in instance: @@ -50,9 +52,10 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): output_dir = os.path.dirname(path) self.log.debug('output dir: {}'.format(output_dir)) - # get version - version = pype.get_version_from_path(nuke.root().name()) - instance.data['version'] = version + # get version to instance for integration + instance.data['version'] = instance.context.data.get( + "version", pype.get_version_from_path(nuke.root().name())) + self.log.debug('Write Version: %s' % instance.data('version')) # create label @@ -94,12 +97,13 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "handleEnd": handle_end, "frameStart": first_frame + handle_start, "frameEnd": last_frame - handle_end, - "version": int(version), + "version": int(instance.data['version']), "colorspace": node["colorspace"].value(), - "families": [instance.data["family"]], + "families": ["render"], "subset": instance.data["subset"], "fps": instance.context.data["fps"] } + instance.data["family"] = "write" group_node = [x for x in instance if x.Class() == "Group"][0] deadlineChunkSize = 1 @@ -125,9 +129,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "families": families, "colorspace": node["colorspace"].value(), "deadlineChunkSize": deadlineChunkSize, - "deadlinePriority": deadlinePriority, - "subsetGroup": "renders" + "deadlinePriority": deadlinePriority }) - self.log.debug("instance.data: {}".format(instance.data)) diff --git a/pype/plugins/nuke/publish/extract_render_local.py b/pype/plugins/nuke/publish/extract_render_local.py index 825db67e9d..9b8baa468b 100644 --- a/pype/plugins/nuke/publish/extract_render_local.py +++ b/pype/plugins/nuke/publish/extract_render_local.py @@ -28,6 +28,11 @@ class NukeRenderLocal(pype.api.Extractor): self.log.debug("instance collected: {}".format(instance.data)) first_frame = instance.data.get("frameStart", None) + + # exception for slate workflow + if "slate" in instance.data["families"]: + first_frame -= 1 + last_frame = instance.data.get("frameEnd", None) node_subset_name = instance.data.get("name", None) @@ -47,6 +52,10 @@ class NukeRenderLocal(pype.api.Extractor): int(last_frame) ) + # exception for slate workflow + if "slate" in instance.data["families"]: + first_frame += 1 + path = node['file'].value() out_dir = os.path.dirname(path) ext = node["file_type"].value() diff --git a/pype/plugins/nuke/publish/extract_review_data_lut.py b/pype/plugins/nuke/publish/extract_review_data_lut.py index 4373309363..90b1fda1ec 100644 --- a/pype/plugins/nuke/publish/extract_review_data_lut.py +++ b/pype/plugins/nuke/publish/extract_review_data_lut.py @@ -41,7 +41,7 @@ class ExtractReviewDataLut(pype.api.Extractor): with anlib.maintained_selection(): exporter = pnlib.ExporterReviewLut( self, instance - ) + ) data = exporter.generate_lut() # assign to representations diff --git a/pype/plugins/nuke/publish/extract_review_data_mov.py b/pype/plugins/nuke/publish/extract_review_data_mov.py index 333774bcd7..8b204680a7 100644 --- a/pype/plugins/nuke/publish/extract_review_data_mov.py +++ b/pype/plugins/nuke/publish/extract_review_data_mov.py @@ -3,7 +3,6 @@ import pyblish.api from avalon.nuke import lib as anlib from pype.nuke import lib as pnlib import pype -reload(pnlib) class ExtractReviewDataMov(pype.api.Extractor): @@ -16,23 +15,20 @@ class ExtractReviewDataMov(pype.api.Extractor): order = pyblish.api.ExtractorOrder + 0.01 label = "Extract Review Data Mov" - families = ["review"] + families = ["review", "render", "render.local"] hosts = ["nuke"] def process(self, instance): families = instance.data["families"] self.log.info("Creating staging dir...") - if "representations" in instance.data: - staging_dir = instance.data[ - "representations"][0]["stagingDir"].replace("\\", "/") - instance.data["stagingDir"] = staging_dir - instance.data["representations"][0]["tags"] = [] - else: - instance.data["representations"] = [] - # get output path - render_path = instance.data['path'] - staging_dir = os.path.normpath(os.path.dirname(render_path)) - instance.data["stagingDir"] = staging_dir + + if "representations" not in instance.data: + instance.data["representations"] = list() + + staging_dir = os.path.normpath( + os.path.dirname(instance.data['path'])) + + instance.data["stagingDir"] = staging_dir self.log.info( "StagingDir `{0}`...".format(instance.data["stagingDir"])) @@ -46,6 +42,15 @@ class ExtractReviewDataMov(pype.api.Extractor): instance.data["families"].remove("review") instance.data["families"].remove("ftrack") data = exporter.generate_mov(farm=True) + + self.log.debug( + "_ data: {}".format(data)) + + instance.data.update({ + "bakeRenderPath": data.get("bakeRenderPath"), + "bakeScriptPath": data.get("bakeScriptPath"), + "bakeWriteNodeName": data.get("bakeWriteNodeName") + }) else: data = exporter.generate_mov() diff --git a/pype/plugins/nuke/publish/extract_slate_frame.py b/pype/plugins/nuke/publish/extract_slate_frame.py new file mode 100644 index 0000000000..4d43f38859 --- /dev/null +++ b/pype/plugins/nuke/publish/extract_slate_frame.py @@ -0,0 +1,154 @@ +import os +import nuke +from avalon.nuke import lib as anlib +import pyblish.api +import pype + + +class ExtractSlateFrame(pype.api.Extractor): + """Extracts movie and thumbnail with baked in luts + + must be run after extract_render_local.py + + """ + + order = pyblish.api.ExtractorOrder - 0.001 + label = "Extract Slate Frame" + + families = ["slate"] + hosts = ["nuke"] + + + def process(self, instance): + if hasattr(self, "viewer_lut_raw"): + self.viewer_lut_raw = self.viewer_lut_raw + else: + self.viewer_lut_raw = False + + with anlib.maintained_selection(): + self.log.debug("instance: {}".format(instance)) + self.log.debug("instance.data[families]: {}".format( + instance.data["families"])) + + self.render_slate(instance) + + def render_slate(self, instance): + node = instance[0] # group node + self.log.info("Creating staging dir...") + + if "representations" not in instance.data: + instance.data["representations"] = list() + + staging_dir = os.path.normpath( + os.path.dirname(instance.data['path'])) + + instance.data["stagingDir"] = staging_dir + + self.log.info( + "StagingDir `{0}`...".format(instance.data["stagingDir"])) + + temporary_nodes = [] + collection = instance.data.get("collection", None) + + if collection: + # get path + fname = os.path.basename(collection.format( + "{head}{padding}{tail}")) + fhead = collection.format("{head}") + + # get first and last frame + first_frame = min(collection.indexes) - 1 + + if "slate" in instance.data["families"]: + first_frame += 1 + + last_frame = first_frame + else: + fname = os.path.basename(instance.data.get("path", None)) + fhead = os.path.splitext(fname)[0] + "." + first_frame = instance.data.get("frameStart", None) - 1 + last_frame = first_frame + + if "#" in fhead: + fhead = fhead.replace("#", "")[:-1] + + previous_node = node + + # get input process and connect it to baking + ipn = self.get_view_process_node() + if ipn is not None: + ipn.setInput(0, previous_node) + previous_node = ipn + temporary_nodes.append(ipn) + + if not self.viewer_lut_raw: + dag_node = nuke.createNode("OCIODisplay") + dag_node.setInput(0, previous_node) + previous_node = dag_node + temporary_nodes.append(dag_node) + + # create write node + write_node = nuke.createNode("Write") + file = fhead + "slate.png" + path = os.path.join(staging_dir, file).replace("\\", "/") + instance.data["slateFrame"] = path + write_node["file"].setValue(path) + write_node["file_type"].setValue("png") + write_node["raw"].setValue(1) + write_node.setInput(0, previous_node) + temporary_nodes.append(write_node) + + # fill slate node with comments + self.add_comment_slate_node(instance) + + # Render frames + nuke.execute(write_node.name(), int(first_frame), int(last_frame)) + + self.log.debug( + "slate frame path: {}".format(instance.data["slateFrame"])) + + # Clean up + for node in temporary_nodes: + nuke.delete(node) + + + def get_view_process_node(self): + + # Select only the target node + if nuke.selectedNodes(): + [n.setSelected(False) for n in nuke.selectedNodes()] + + ipn_orig = None + for v in [n for n in nuke.allNodes() + if "Viewer" in n.Class()]: + ip = v['input_process'].getValue() + ipn = v['input_process_node'].getValue() + if "VIEWER_INPUT" not in ipn and ip: + ipn_orig = nuke.toNode(ipn) + ipn_orig.setSelected(True) + + if ipn_orig: + nuke.nodeCopy('%clipboard%') + + [n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all + + nuke.nodePaste('%clipboard%') + + ipn = nuke.selectedNode() + + return ipn + + def add_comment_slate_node(self, instance): + node = instance.data.get("slateNode") + if not node: + return + + comment = instance.context.data.get("comment") + intent = instance.context.data.get("intent") + + try: + node["f_submission_note"].setValue(comment) + node["f_submitting_for"].setValue(intent) + except NameError: + return + instance.data.pop("slateNode") diff --git a/pype/plugins/nuke/publish/extract_thumbnail.py b/pype/plugins/nuke/publish/extract_thumbnail.py index 450bb39928..55ba34a0d4 100644 --- a/pype/plugins/nuke/publish/extract_thumbnail.py +++ b/pype/plugins/nuke/publish/extract_thumbnail.py @@ -28,19 +28,16 @@ class ExtractThumbnail(pype.api.Extractor): self.render_thumbnail(instance) def render_thumbnail(self, instance): - node = instance[0] # group node + node = instance[0] # group node self.log.info("Creating staging dir...") - if "representations" in instance.data: - staging_dir = instance.data[ - "representations"][0]["stagingDir"].replace("\\", "/") - instance.data["stagingDir"] = staging_dir - instance.data["representations"][0]["tags"] = ["review"] - else: - instance.data["representations"] = [] - # get output path - render_path = instance.data['path'] - staging_dir = os.path.normpath(os.path.dirname(render_path)) - instance.data["stagingDir"] = staging_dir + + if "representations" not in instance.data: + instance.data["representations"] = list() + + staging_dir = os.path.normpath( + os.path.dirname(instance.data['path'])) + + instance.data["stagingDir"] = staging_dir self.log.info( "StagingDir `{0}`...".format(instance.data["stagingDir"])) @@ -165,7 +162,7 @@ class ExtractThumbnail(pype.api.Extractor): if ipn_orig: nuke.nodeCopy('%clipboard%') - [n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all + [n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all nuke.nodePaste('%clipboard%') diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index d9207d2bfc..71108189c0 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -1,7 +1,7 @@ import os import json import getpass - + from avalon import api from avalon.vendor import requests import re @@ -26,31 +26,69 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): def process(self, instance): node = instance[0] - # for x in instance: - # if x.Class() == "Write": - # node = x - # - # if node is None: - # return + context = instance.context DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL", "http://localhost:8082") assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL" - context = instance.context + self.deadline_url = "{}/api/jobs".format(DEADLINE_REST_URL) + self._comment = context.data.get("comment", "") + self._ver = re.search(r"\d+\.\d+", context.data.get("hostVersion")) + self._deadline_user = context.data.get( + "deadlineUser", getpass.getuser()) + self._frame_start = int(instance.data["frameStart"]) + self._frame_end = int(instance.data["frameEnd"]) # get output path render_path = instance.data['path'] - render_dir = os.path.normpath(os.path.dirname(render_path)) - script_path = context.data["currentFile"] - script_name = os.path.basename(script_path) - comment = context.data.get("comment", "") + # exception for slate workflow + if "slate" in instance.data["families"]: + self._frame_start -= 1 - deadline_user = context.data.get("deadlineUser", getpass.getuser()) + response = self.payload_submit(instance, + script_path, + render_path, + node.name() + ) + # Store output dir for unified publisher (filesequence) + instance.data["deadlineSubmissionJob"] = response.json() + instance.data["publishJobState"] = "Active" + + if instance.data.get("bakeScriptPath"): + render_path = instance.data.get("bakeRenderPath") + script_path = instance.data.get("bakeScriptPath") + exe_node_name = instance.data.get("bakeWriteNodeName") + + # exception for slate workflow + if "slate" in instance.data["families"]: + self._frame_start += 1 + + resp = self.payload_submit(instance, + script_path, + render_path, + exe_node_name, + response.json() + ) + # Store output dir for unified publisher (filesequence) + instance.data["deadlineSubmissionJob"] = resp.json() + instance.data["publishJobState"] = "Suspended" + + def payload_submit(self, + instance, + script_path, + render_path, + exe_node_name, + responce_data=None + ): + render_dir = os.path.normpath(os.path.dirname(render_path)) + script_name = os.path.basename(script_path) jobname = "%s - %s" % (script_name, instance.name) - ver = re.search(r"\d+\.\d+", context.data.get("hostVersion")) + + if not responce_data: + responce_data = {} try: # Ensure render folder exists @@ -58,10 +96,6 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): except OSError: pass - # Documentation for keys available at: - # https://docs.thinkboxsoftware.com - # /products/deadline/8.0/1_User%20Manual/manual - # /manual-submission.html#job-info-file-options payload = { "JobInfo": { # Top-level group name @@ -71,21 +105,20 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "Name": jobname, # Arbitrary username, for visualisation in Monitor - "UserName": deadline_user, + "UserName": self._deadline_user, + + "Priority": instance.data["deadlinePriority"], + + "Pool": "2d", + "SecondaryPool": "2d", "Plugin": "Nuke", "Frames": "{start}-{end}".format( - start=int(instance.data["frameStart"]), - end=int(instance.data["frameEnd"]) + start=self._frame_start, + end=self._frame_end ), - "ChunkSize": instance.data["deadlineChunkSize"], - "Priority": instance.data["deadlinePriority"], + "Comment": self._comment, - "Comment": comment, - - # Optional, enable double-click to preview rendered - # frames from Deadline Monitor - # "OutputFilename0": output_filename_0.replace("\\", "/"), }, "PluginInfo": { # Input @@ -96,27 +129,29 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # "OutputFilePrefix": render_variables["filename_prefix"], # Mandatory for Deadline - "Version": ver.group(), + "Version": self._ver.group(), # Resolve relative references "ProjectPath": script_path, "AWSAssetFile0": render_path, # Only the specific write node is rendered. - "WriteNode": node.name() + "WriteNode": exe_node_name }, # Mandatory for Deadline, may be empty "AuxFiles": [] } + if responce_data.get("_id"): + payload["JobInfo"].update({ + "JobType": "Normal", + "BatchName": responce_data["Props"]["Batch"], + "JobDependency0": responce_data["_id"], + "ChunkSize": 99999999 + }) + # Include critical environment variables with submission keys = [ - # This will trigger `userSetup.py` on the slave - # such that proper initialisation happens the same - # way as it does on a local machine. - # TODO(marcus): This won't work if the slaves don't - # have accesss to these paths, such as if slaves are - # running Linux and the submitter is on Windows. "PYTHONPATH", "PATH", "AVALON_SCHEMA", @@ -162,11 +197,12 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): if key == "PYTHONPATH": clean_path = clean_path.replace('python2', 'python3') + clean_path = clean_path.replace( - os.path.normpath( - environment['PYPE_STUDIO_CORE_MOUNT']), # noqa - os.path.normpath( - environment['PYPE_STUDIO_CORE_PATH'])) # noqa + os.path.normpath( + environment['PYPE_STUDIO_CORE_MOUNT']), # noqa + os.path.normpath( + environment['PYPE_STUDIO_CORE_PATH'])) # noqa clean_environment[key] = clean_path environment = clean_environment @@ -181,20 +217,15 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): plugin = payload["JobInfo"]["Plugin"] self.log.info("using render plugin : {}".format(plugin)) - self.preflight_check(instance) - self.log.info("Submitting..") self.log.info(json.dumps(payload, indent=4, sort_keys=True)) - # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(DEADLINE_REST_URL) - response = requests.post(url, json=payload) + response = requests.post(self.deadline_url, json=payload) + if not response.ok: raise Exception(response.text) - # Store output dir for unified publisher (filesequence) - instance.data["deadlineSubmissionJob"] = response.json() - instance.data["publishJobState"] = "Active" + return response def preflight_check(self, instance): """Ensure the startFrame, endFrame and byFrameStep are integers""" diff --git a/pype/plugins/nuke/publish/validate_output_resolution.py b/pype/plugins/nuke/publish/validate_output_resolution.py new file mode 100644 index 0000000000..2563ee929f --- /dev/null +++ b/pype/plugins/nuke/publish/validate_output_resolution.py @@ -0,0 +1,78 @@ +import nuke + +import pyblish.api + + +class RepairWriteResolutionDifference(pyblish.api.Action): + + label = "Repair" + icon = "wrench" + on = "failed" + + def process(self, context, plugin): + + # Get the errored instances + failed = [] + for result in context.data["results"]: + if (result["error"] is not None and result["instance"] is not None + and result["instance"] not in failed): + failed.append(result["instance"]) + + # Apply pyblish.logic to get the instances for the plug-in + instances = pyblish.api.instances_by_plugin(failed, plugin) + + for instance in instances: + reformat = instance[0].dependencies()[0] + if reformat.Class() != "Reformat": + reformat = nuke.nodes.Reformat(inputs=[instance[0].input(0)]) + + xpos = instance[0].xpos() + ypos = instance[0].ypos() - 26 + + dependent_ypos = instance[0].dependencies()[0].ypos() + if (instance[0].ypos() - dependent_ypos) <= 51: + xpos += 110 + + reformat.setXYpos(xpos, ypos) + + instance[0].setInput(0, reformat) + + reformat["resize"].setValue("none") + + +class ValidateOutputResolution(pyblish.api.InstancePlugin): + """Validates Output Resolution. + + It is making sure the resolution of write's input is the same as + Format definition of script in Root node. + """ + + order = pyblish.api.ValidatorOrder + optional = True + families = ["render", "render.local", "render.farm"] + label = "Write Resolution" + hosts = ["nuke"] + actions = [RepairWriteResolutionDifference] + + def process(self, instance): + + # Skip bounding box check if a crop node exists. + if instance[0].dependencies()[0].Class() == "Crop": + return + + msg = "Bounding box is outside the format." + assert self.check_resolution(instance), msg + + def check_resolution(self, instance): + node = instance[0] + + root_width = instance.data["resolutionWidth"] + root_height = instance.data["resolutionHeight"] + + write_width = node.format().width() + write_height = node.format().height() + + if (root_width != write_width) or (root_height != write_height): + return None + else: + return True diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py index 3887b5d5b7..169ea1ecb5 100644 --- a/pype/plugins/nuke/publish/validate_rendered_frames.py +++ b/pype/plugins/nuke/publish/validate_rendered_frames.py @@ -41,7 +41,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): if not repre.get('files'): msg = ("no frames were collected, " "you need to render them") - self.log.warning(msg) + self.log.error(msg) raise ValidationException(msg) collections, remainder = clique.assemble(repre["files"]) @@ -76,6 +76,9 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): 'len(collection.indexes): {}'.format(collected_frames_len) ) + if "slate" in instance.data["families"]: + collected_frames_len -= 1 + assert (collected_frames_len == frame_length), ( "{} missing frames. Use repair to render all frames" ).format(__name__) diff --git a/pype/plugins/nuke/publish/validate_script.py b/pype/plugins/nuke/publish/validate_script.py index 307e3ade59..f7dd84d714 100644 --- a/pype/plugins/nuke/publish/validate_script.py +++ b/pype/plugins/nuke/publish/validate_script.py @@ -15,12 +15,6 @@ class ValidateScript(pyblish.api.InstancePlugin): def process(self, instance): ctx_data = instance.context.data asset_name = ctx_data["asset"] - - # asset = io.find_one({ - # "type": "asset", - # "name": asset_name - # }) - asset = lib.get_asset(asset_name) asset_data = asset["data"] diff --git a/pype/plugins/nuke/publish/validate_write_bounding_box.py b/pype/plugins/nuke/publish/validate_write_bounding_box.py index 417d4ab004..e4b7c77a25 100644 --- a/pype/plugins/nuke/publish/validate_write_bounding_box.py +++ b/pype/plugins/nuke/publish/validate_write_bounding_box.py @@ -57,7 +57,7 @@ class ValidateNukeWriteBoundingBox(pyblish.api.InstancePlugin): order = pyblish.api.ValidatorOrder optional = True - families = ["render"] + families = ["render", "render.local", "render.farm"] label = "Write Bounding Box" hosts = ["nuke"] actions = [RepairNukeBoundingBoxAction] diff --git a/pype/plugins/nuke/publish/validate_write_knobs.py b/pype/plugins/nuke/publish/validate_write_knobs.py index 072ffd4b17..24572bedb3 100644 --- a/pype/plugins/nuke/publish/validate_write_knobs.py +++ b/pype/plugins/nuke/publish/validate_write_knobs.py @@ -8,24 +8,31 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin): """Ensure knobs are consistent. Knobs to validate and their values comes from the - "nuke/knobs.json" preset, which needs this structure: - { - "family": { - "knob_name": knob_value - } - } + + Example for presets in config: + "presets/plugins/nuke/publish.json" preset, which needs this structure: + "ValidateNukeWriteKnobs": { + "enabled": true, + "knobs": { + "family": { + "knob_name": knob_value + } + } + } """ order = pyblish.api.ValidatorOrder - label = "Knobs" + label = "Validate Write Knobs" hosts = ["nuke"] actions = [pype.api.RepairContextAction] optional = True def process(self, context): # Check for preset existence. - if not context.data["presets"]["nuke"].get("knobs"): + if not getattr(self, "knobs"): return + + self.log.debug("__ self.knobs: {}".format(self.knobs)) invalid = self.get_invalid(context, compute=True) if invalid: @@ -43,7 +50,6 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin): @classmethod def get_invalid_knobs(cls, context): - presets = context.data["presets"]["nuke"]["knobs"] invalid_knobs = [] for instance in context: # Filter publisable instances. @@ -53,15 +59,15 @@ class ValidateNukeWriteKnobs(pyblish.api.ContextPlugin): # Filter families. families = [instance.data["family"]] families += instance.data.get("families", []) - families = list(set(families) & set(presets.keys())) + families = list(set(families) & set(cls.knobs.keys())) if not families: continue # Get all knobs to validate. knobs = {} for family in families: - for preset in presets[family]: - knobs.update({preset: presets[family][preset]}) + for preset in cls.knobs[family]: + knobs.update({preset: cls.knobs[family][preset]}) # Get invalid knobs. nodes = [] diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py index 0729f20957..4525b4947f 100644 --- a/pype/plugins/nukestudio/publish/collect_clips.py +++ b/pype/plugins/nukestudio/publish/collect_clips.py @@ -105,9 +105,8 @@ class CollectClips(api.ContextPlugin): "asset": asset, "family": "clip", "families": [], - "handles": 0, - "handleStart": projectdata.get("handles", 0), - "handleEnd": projectdata.get("handles", 0), + "handleStart": projectdata.get("handleStart", 0), + "handleEnd": projectdata.get("handleEnd", 0), "version": int(version)}) instance = context.create_instance(**data) diff --git a/pype/plugins/nukestudio/publish/collect_effects.py b/pype/plugins/nukestudio/publish/collect_effects.py index 0aee0adf2e..55ff849c88 100644 --- a/pype/plugins/nukestudio/publish/collect_effects.py +++ b/pype/plugins/nukestudio/publish/collect_effects.py @@ -11,7 +11,9 @@ class CollectVideoTracksLuts(pyblish.api.InstancePlugin): def process(self, instance): - self.log.debug("Finding soft effect for subset: `{}`".format(instance.data.get("subset"))) + self.log.debug( + "Finding soft effect for subset: `{}`".format( + instance.data.get("subset"))) # taking active sequence subset = instance.data.get("subset") @@ -41,8 +43,12 @@ class CollectVideoTracksLuts(pyblish.api.InstancePlugin): if len(instance.data.get("effectTrackItems", {}).keys()) > 0: instance.data["families"] += ["lut"] - self.log.debug("effects.keys: {}".format(instance.data.get("effectTrackItems", {}).keys())) - self.log.debug("effects: {}".format(instance.data.get("effectTrackItems", {}))) + self.log.debug( + "effects.keys: {}".format( + instance.data.get("effectTrackItems", {}).keys())) + self.log.debug( + "effects: {}".format( + instance.data.get("effectTrackItems", {}))) def add_effect(self, instance, track_index, item): track = item.parentTrack().name() diff --git a/pype/plugins/nukestudio/publish/collect_handles.py b/pype/plugins/nukestudio/publish/collect_handles.py index 8da83e715b..28f502d846 100644 --- a/pype/plugins/nukestudio/publish/collect_handles.py +++ b/pype/plugins/nukestudio/publish/collect_handles.py @@ -24,7 +24,6 @@ class CollectClipHandles(api.ContextPlugin): continue # get handles - handles = int(instance.data["handles"]) handle_start = int(instance.data["handleStart"]) handle_end = int(instance.data["handleEnd"]) @@ -38,19 +37,16 @@ class CollectClipHandles(api.ContextPlugin): self.log.debug("Adding to shared assets: `{}`".format( instance.data["name"])) asset_shared.update({ - "handles": handles, "handleStart": handle_start, "handleEnd": handle_end }) - for instance in filtered_instances: if not instance.data.get("main") and not instance.data.get("handleTag"): self.log.debug("Synchronize handles on: `{}`".format( instance.data["name"])) name = instance.data["asset"] s_asset_data = assets_shared.get(name) - instance.data["handles"] = s_asset_data.get("handles", 0) instance.data["handleStart"] = s_asset_data.get( "handleStart", 0 ) diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 5f29837d80..5085b9719e 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -263,7 +263,6 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): # get custom attributes of the shot if instance.data.get("main"): in_info['custom_attributes'] = { - 'handles': int(instance.data.get('handles', 0)), "handleStart": handle_start, "handleEnd": handle_end, "frameStart": instance.data["frameStart"], diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index be448931c8..b98eccce7f 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -134,7 +134,6 @@ class CollectPlatesData(api.InstancePlugin): # add to data of representation version_data.update({ - "handles": version_data['handleStart'], "colorspace": item.sourceMediaColourTransform(), "colorspaceScript": instance.context.data["colorspace"], "families": [f for f in families if 'ftrack' not in f], @@ -156,8 +155,9 @@ class CollectPlatesData(api.InstancePlugin): ext=ext ) - start_frame = source_first_frame + instance.data["sourceInH"] - duration = instance.data["sourceOutH"] - instance.data["sourceInH"] + start_frame = int(source_first_frame + instance.data["sourceInH"]) + duration = int( + instance.data["sourceOutH"] - instance.data["sourceInH"]) end_frame = start_frame + duration self.log.debug("start_frame: `{}`".format(start_frame)) self.log.debug("end_frame: `{}`".format(end_frame)) diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py index f9032b2ca4..f223e5ca65 100644 --- a/pype/plugins/nukestudio/publish/collect_reviews.py +++ b/pype/plugins/nukestudio/publish/collect_reviews.py @@ -125,7 +125,7 @@ class CollectReviews(api.InstancePlugin): thumb_path, format='png' ) - + self.log.debug("__ sourceIn: `{}`".format(instance.data["sourceIn"])) self.log.debug("__ thumbnail: `{}`, frame: `{}`".format(thumbnail, thumb_frame)) @@ -145,7 +145,10 @@ class CollectReviews(api.InstancePlugin): item = instance.data["item"] transfer_data = [ - "handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track", "version" + "handleStart", "handleEnd", "sourceIn", "sourceOut", + "frameStart", "frameEnd", "sourceInH", "sourceOutH", + "clipIn", "clipOut", "clipInH", "clipOutH", "asset", + "track", "version" ] version_data = dict() @@ -154,7 +157,6 @@ class CollectReviews(api.InstancePlugin): # add to data of representation version_data.update({ - "handles": version_data['handleStart'], "colorspace": item.sourceMediaColourTransform(), "families": instance.data["families"], "subset": instance.data["subset"], diff --git a/pype/plugins/nukestudio/publish/collect_tag_handles.py b/pype/plugins/nukestudio/publish/collect_tag_handles.py index 929f5e3b68..a6a63faea9 100644 --- a/pype/plugins/nukestudio/publish/collect_tag_handles.py +++ b/pype/plugins/nukestudio/publish/collect_tag_handles.py @@ -38,7 +38,9 @@ class CollectClipTagHandles(api.ContextPlugin): # gets arguments if there are any t_args = t_metadata.get("tag.args", "") - assert t_args, self.log.error("Tag with Handles is missing Args. Use only handle start/end") + assert t_args, self.log.error( + "Tag with Handles is missing Args. " + "Use only handle start/end") t_args = json.loads(t_args.replace("'", "\"")) # add in start @@ -55,8 +57,8 @@ class CollectClipTagHandles(api.ContextPlugin): # adding handles to asset_shared on context if instance.data.get("handleEnd"): - assets_shared_a["handleEnd"] = instance.data["handleEnd"] + assets_shared_a[ + "handleEnd"] = instance.data["handleEnd"] if instance.data.get("handleStart"): - assets_shared_a["handleStart"] = instance.data["handleStart"] - if instance.data.get("handles"): - assets_shared_a["handles"] = instance.data["handles"] + assets_shared_a[ + "handleStart"] = instance.data["handleStart"] diff --git a/pype/plugins/nukestudio/publish/extract_effects.py b/pype/plugins/nukestudio/publish/extract_effects.py index 7aa79d6cc3..5e2721aa8e 100644 --- a/pype/plugins/nukestudio/publish/extract_effects.py +++ b/pype/plugins/nukestudio/publish/extract_effects.py @@ -6,6 +6,7 @@ import pyblish.api import tempfile from avalon import io, api + class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): """Collect video tracks effects into context.""" @@ -17,9 +18,12 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): item = instance.data["item"] effects = instance.data.get("effectTrackItems") - instance.data["families"] = [f for f in instance.data.get("families", []) if f not in ["lut"]] + instance.data["families"] = [f for f in instance.data.get( + "families", []) if f not in ["lut"]] - self.log.debug("___ instance.data[families]: `{}`".format(instance.data["families"])) + self.log.debug( + "__ instance.data[families]: `{}`".format( + instance.data["families"])) # skip any without effects if not effects: @@ -102,7 +106,6 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): # add to data of representation version_data.update({ - "handles": version_data['handleStart'], "colorspace": item.sourceMediaColourTransform(), "colorspaceScript": instance.context.data["colorspace"], "families": ["plate", "lut"], @@ -132,7 +135,7 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): def copy_linked_files(self, effect, dst_dir): for k, v in effect["node"].items(): - if k in "file" and v is not '': + if k in "file" and v != '': base_name = os.path.basename(v) dst = os.path.join(dst_dir, base_name).replace("\\", "/") @@ -169,32 +172,44 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): project_name = api.Session["AVALON_PROJECT"] a_template = anatomy.templates - project = io.find_one({"type": "project", - "name": project_name}, - projection={"config": True, "data": True}) + project = io.find_one( + { + "type": "project", + "name": project_name + }, + projection={"config": True, "data": True} + ) template = a_template['publish']['path'] # anatomy = instance.context.data['anatomy'] - asset = io.find_one({"type": "asset", - "name": asset_name, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project["_id"] + }) assert asset, ("No asset found by the name '{}' " "in project '{}'".format(asset_name, project_name)) silo = asset.get('silo') - subset = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset["_id"]}) + subset = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset["_id"] + }) # assume there is no version yet, we start at `1` version = None version_number = 1 if subset is not None: - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) # if there is a subset there ought to be version if version is not None: diff --git a/pype/plugins/nukestudio/publish/validate_version.py b/pype/plugins/nukestudio/publish/validate_version.py deleted file mode 100644 index 194b270d51..0000000000 --- a/pype/plugins/nukestudio/publish/validate_version.py +++ /dev/null @@ -1,74 +0,0 @@ -import pyblish -from avalon import io -from pype.action import get_errored_instances_from_context -import pype.api as pype - -@pyblish.api.log -class RepairNukestudioVersionUp(pyblish.api.Action): - label = "Version Up Workfile" - on = "failed" - icon = "wrench" - - def process(self, context, plugin): - - errored_instances = get_errored_instances_from_context(context) - - # Apply pyblish logic to get the instances for the plug-in - instances = pyblish.api.instances_by_plugin(errored_instances, plugin) - - if instances: - project = context.data["activeProject"] - path = context.data.get("currentFile") - - new_path = pype.version_up(path) - - if project: - project.saveAs(new_path) - - self.log.info("Project workfile version was fixed") - - -class ValidateVersion(pyblish.api.InstancePlugin): - """Validate clip's versions. - - """ - - order = pyblish.api.ValidatorOrder - families = ["plate"] - label = "Validate Version" - actions = [RepairNukestudioVersionUp] - hosts = ["nukestudio"] - - def process(self, instance): - version = int(instance.data.get("version", 0)) - asset_name = instance.data.get("asset", None) - subset_name = instance.data.get("subset", None) - - assert version, "The file is missing version string! example: filename_v001.hrox `{}`" - - self.log.debug("Collected version: `{0}`".format(version)) - - found_v = 0 - try: - io.install() - project = io.find_one({"type": "project"}) - - asset = io.find_one({"type": "asset", - "name": asset_name, - "parent": project["_id"]}) - - subset = io.find_one({"type": "subset", - "parent": asset["_id"], - "name": subset_name}) - - version_db = io.find_one({ - 'type': 'version', - 'parent': subset["_id"], - 'name': version - }) or {} - found_v = version_db.get("name", 0) - self.log.debug("Found version: `{0}`".format(found_v)) - except Exception as e: - self.log.debug("Problem to get data from database for asset `{0}` subset `{1}`. Error: `{2}`".format(asset_name, subset_name, e)) - - assert (found_v != version), "Version must not be the same as in database `{0}`, Versions file: `{1}`, db: `{2}`".format(asset_name, version, found_v) diff --git a/pype/plugins/premiere/publish/integrate_assumed_destination.py b/pype/plugins/premiere/publish/integrate_assumed_destination.py index c82b70c66f..a0393e8a43 100644 --- a/pype/plugins/premiere/publish/integrate_assumed_destination.py +++ b/pype/plugins/premiere/publish/integrate_assumed_destination.py @@ -77,32 +77,44 @@ class IntegrateAssumedDestination(pyblish.api.InstancePlugin): asset_name = instance.data["asset"] project_name = api.Session["AVALON_PROJECT"] - project = io.find_one({"type": "project", - "name": project_name}, - projection={"config": True, "data": True}) + project = io.find_one( + { + "type": "project", + "name": project_name + }, + projection={"config": True, "data": True} + ) template = project["config"]["template"]["publish"] # anatomy = instance.context.data['anatomy'] - asset = io.find_one({"type": "asset", - "name": asset_name, - "parent": project["_id"]}) + asset = io.find_one({ + "type": "asset", + "name": asset_name, + "parent": project["_id"] + }) assert asset, ("No asset found by the name '{}' " "in project '{}'".format(asset_name, project_name)) silo = asset.get('silo') - subset = io.find_one({"type": "subset", - "name": subset_name, - "parent": asset["_id"]}) + subset = io.find_one({ + "type": "subset", + "name": subset_name, + "parent": asset["_id"] + }) # assume there is no version yet, we start at `1` version = None version_number = 1 if subset is not None: - version = io.find_one({"type": "version", - "parent": subset["_id"]}, - sort=[("name", -1)]) + version = io.find_one( + { + "type": "version", + "parent": subset["_id"] + }, + sort=[("name", -1)] + ) # if there is a subset there ought to be version if version is not None: diff --git a/pype/plugins/standalonepublisher/publish/collect_matchmove.py b/pype/plugins/standalonepublisher/publish/collect_matchmove.py index b46efc1cf3..5d9e8ddfb4 100644 --- a/pype/plugins/standalonepublisher/publish/collect_matchmove.py +++ b/pype/plugins/standalonepublisher/publish/collect_matchmove.py @@ -21,7 +21,7 @@ class CollectMatchmovePublish(pyblish.api.InstancePlugin): label = "Collect Matchmove - SA Publish" order = pyblish.api.CollectorOrder - family = ["matchmove"] + families = ["matchmove"] hosts = ["standalonepublisher"] def process(self, instance): diff --git a/pype/scripts/fusion_switch_shot.py b/pype/scripts/fusion_switch_shot.py index 26a93b9b9a..539bcf4f68 100644 --- a/pype/scripts/fusion_switch_shot.py +++ b/pype/scripts/fusion_switch_shot.py @@ -170,8 +170,10 @@ def switch(asset_name, filepath=None, new=True): assert asset, "Could not find '%s' in the database" % asset_name # Get current project - self._project = io.find_one({"type": "project", - "name": api.Session["AVALON_PROJECT"]}) + self._project = io.find_one({ + "type": "project", + "name": api.Session["AVALON_PROJECT"] + }) # Go to comp if not filepath: diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 89b74e258e..f128352974 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -5,6 +5,7 @@ import json import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins from pypeapp.lib import config from pype import api as pype +from subprocess import Popen, PIPE # FFmpeg in PATH is required @@ -21,6 +22,7 @@ else: FFMPEG = ( '{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s' ).format(os.path.normpath(ffmpeg_path + "ffmpeg")) + FFPROBE = ( '{} -v quiet -print_format json -show_format -show_streams %(source)s' ).format(os.path.normpath(ffmpeg_path + "ffprobe")) @@ -39,6 +41,25 @@ def _streams(source): return json.loads(out)['streams'] +def get_fps(str_value): + if str_value == "0/0": + print("Source has \"r_frame_rate\" value set to \"0/0\".") + return "Unknown" + + items = str_value.split("/") + if len(items) == 1: + fps = float(items[0]) + + elif len(items) == 2: + fps = float(items[0]) / float(items[1]) + + # Check if fps is integer or float number + if int(fps) == fps: + fps = int(fps) + + return str(fps) + + class ModifiedBurnins(ffmpeg_burnins.Burnins): ''' This is modification of OTIO FFmpeg Burnin adapter. @@ -95,6 +116,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): streams = _streams(source) super().__init__(source, streams) + if options_init: self.options_init.update(options_init) @@ -228,6 +250,33 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins): 'filters': filters }).strip() + def render(self, output, args=None, overwrite=False, **kwargs): + """ + Render the media to a specified destination. + + :param str output: output file + :param str args: additional FFMPEG arguments + :param bool overwrite: overwrite the output if it exists + """ + if not overwrite and os.path.exists(output): + raise RuntimeError("Destination '%s' exists, please " + "use overwrite" % output) + + is_sequence = "%" in output + + command = self.command(output=output, + args=args, + overwrite=overwrite) + proc = Popen(command, shell=True) + proc.communicate() + if proc.returncode != 0: + raise RuntimeError("Failed to render '%s': %s'" + % (output, command)) + if is_sequence: + output = output % kwargs.get("duration") + if not os.path.exists(output): + raise RuntimeError("Failed to generate this fucking file '%s'" % output) + def example(input_path, output_path): options_init = { @@ -329,6 +378,17 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) frame_start = data.get("frame_start") frame_start_tc = data.get('frame_start_tc', frame_start) + + stream = burnin._streams[0] + if "resolution_width" not in data: + data["resolution_width"] = stream.get("width", "Unknown") + + if "resolution_height" not in data: + data["resolution_height"] = stream.get("height", "Unknown") + + if "fps" not in data: + data["fps"] = get_fps(stream.get("r_frame_rate", "0/0")) + for align_text, preset in presets.get('burnins', {}).items(): align = None if align_text == 'TOP_LEFT': @@ -383,12 +443,14 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) elif bi_func == 'timecode': burnin.add_timecode(align, start_frame=frame_start_tc) + elif bi_func == 'text': if not preset.get('text'): log.error('Text is not set for text function burnin!') return text = preset['text'].format(**data) burnin.add_text(text, align) + elif bi_func == "datetime": date_format = preset["format"] burnin.add_datetime(date_format, align) @@ -403,7 +465,7 @@ def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True) if codec_data is not []: codec_args = " ".join(codec_data) - burnin.render(output_path, args=codec_args, overwrite=overwrite) + burnin.render(output_path, args=codec_args, overwrite=overwrite, **data) if __name__ == '__main__': @@ -415,4 +477,4 @@ if __name__ == '__main__': data['codec'], data['output'], data['burnin_data'] - ) + ) diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py index 5517cfeb4c..620ee3d851 100644 --- a/pype/scripts/publish_filesequence.py +++ b/pype/scripts/publish_filesequence.py @@ -1,9 +1,12 @@ """This module is used for command line publishing of image sequences.""" import os +import sys +import argparse import logging import subprocess import platform + try: from shutil import which except ImportError: @@ -23,7 +26,6 @@ error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" def __main__(): - import argparse parser = argparse.ArgumentParser() parser.add_argument("--paths", nargs="*", @@ -43,7 +45,11 @@ def __main__(): print("Running pype ...") auto_pype_root = os.path.dirname(os.path.abspath(__file__)) auto_pype_root = os.path.abspath(auto_pype_root + "../../../../..") + auto_pype_root = os.environ.get('PYPE_ROOT') or auto_pype_root + if os.environ.get('PYPE_ROOT'): + print("Got Pype location from environment: {}".format( + os.environ.get('PYPE_ROOT'))) pype_command = "pype.ps1" if platform.system().lower() == "linux": @@ -69,7 +75,7 @@ def __main__(): print("Set pype root to: {}".format(pype_root)) print("Paths: {}".format(kwargs.paths or [os.getcwd()])) - paths = kwargs.paths or [os.getcwd()] + paths = kwargs.paths or [os.environ.get("PYPE_METADATA_FILE")] or [os.getcwd()] # noqa args = [ os.path.join(pype_root, pype_command), diff --git a/pype/setdress_api.py b/pype/setdress_api.py index 55a6b4a2fb..707a5b713f 100644 --- a/pype/setdress_api.py +++ b/pype/setdress_api.py @@ -462,8 +462,12 @@ def update_scene(set_container, containers, current_data, new_data, new_file): # Check whether the conversion can be done by the Loader. # They *must* use the same asset, subset and Loader for # `api.update` to make sense. - old = io.find_one({"_id": io.ObjectId(representation_current)}) - new = io.find_one({"_id": io.ObjectId(representation_new)}) + old = io.find_one({ + "_id": io.ObjectId(representation_current) + }) + new = io.find_one({ + "_id": io.ObjectId(representation_new) + }) is_valid = compare_representations(old=old, new=new) if not is_valid: log.error("Skipping: %s. See log for details.", diff --git a/pype/vendor/ftrack_api_old/_version.py b/pype/vendor/ftrack_api_old/_version.py index 07f744ca5d..aa1a8c4aba 100644 --- a/pype/vendor/ftrack_api_old/_version.py +++ b/pype/vendor/ftrack_api_old/_version.py @@ -1 +1 @@ -__version__ = '1.3.3' +__version__ = '1.8.2' diff --git a/pype/vendor/ftrack_api_old/_weakref.py b/pype/vendor/ftrack_api_old/_weakref.py new file mode 100644 index 0000000000..69cc6f4b4f --- /dev/null +++ b/pype/vendor/ftrack_api_old/_weakref.py @@ -0,0 +1,66 @@ +""" +Yet another backport of WeakMethod for Python 2.7. +Changes include removing exception chaining and adding args to super() calls. + +Copyright (c) 2001-2019 Python Software Foundation.All rights reserved. + +Full license available in LICENSE.python. +""" +from weakref import ref + + +class WeakMethod(ref): + """ + A custom `weakref.ref` subclass which simulates a weak reference to + a bound method, working around the lifetime problem of bound methods. + """ + + __slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__" + + def __new__(cls, meth, callback=None): + try: + obj = meth.__self__ + func = meth.__func__ + except AttributeError: + raise TypeError( + "argument should be a bound method, not {}".format(type(meth)) + ) + + def _cb(arg): + # The self-weakref trick is needed to avoid creating a reference + # cycle. + self = self_wr() + if self._alive: + self._alive = False + if callback is not None: + callback(self) + + self = ref.__new__(cls, obj, _cb) + self._func_ref = ref(func, _cb) + self._meth_type = type(meth) + self._alive = True + self_wr = ref(self) + return self + + def __call__(self): + obj = super(WeakMethod, self).__call__() + func = self._func_ref() + if obj is None or func is None: + return None + return self._meth_type(func, obj) + + def __eq__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is other + return ref.__eq__(self, other) and self._func_ref == other._func_ref + return NotImplemented + + def __ne__(self, other): + if isinstance(other, WeakMethod): + if not self._alive or not other._alive: + return self is not other + return ref.__ne__(self, other) or self._func_ref != other._func_ref + return NotImplemented + + __hash__ = ref.__hash__ diff --git a/pype/vendor/ftrack_api_old/attribute.py b/pype/vendor/ftrack_api_old/attribute.py index 66840bed66..47fd6c9616 100644 --- a/pype/vendor/ftrack_api_old/attribute.py +++ b/pype/vendor/ftrack_api_old/attribute.py @@ -148,7 +148,8 @@ class Attribute(object): '''A name and value pair persisted remotely.''' def __init__( - self, name, default_value=ftrack_api_old.symbol.NOT_SET, mutable=True + self, name, default_value=ftrack_api_old.symbol.NOT_SET, mutable=True, + computed=False ): '''Initialise attribute with *name*. @@ -161,10 +162,14 @@ class Attribute(object): are :attr:`ftrack_api_old.symbol.NOT_SET`. The exception to this is when the target value is also :attr:`ftrack_api_old.symbol.NOT_SET`. + If *computed* is set to True the value is a remote side computed value + and should not be long-term cached. + ''' super(Attribute, self).__init__() self._name = name self._mutable = mutable + self._computed = computed self.default_value = default_value self._local_key = 'local' @@ -205,6 +210,11 @@ class Attribute(object): '''Return whether attribute is mutable.''' return self._mutable + @property + def computed(self): + '''Return whether attribute is computed.''' + return self._computed + def get_value(self, entity): '''Return current value for *entity*. diff --git a/pype/vendor/ftrack_api_old/entity/factory.py b/pype/vendor/ftrack_api_old/entity/factory.py index 16721514bd..f47c92e563 100644 --- a/pype/vendor/ftrack_api_old/entity/factory.py +++ b/pype/vendor/ftrack_api_old/entity/factory.py @@ -49,9 +49,11 @@ class Factory(object): # Build attributes for class. attributes = ftrack_api_old.attribute.Attributes() - immutable = schema.get('immutable', []) + immutable_properties = schema.get('immutable', []) + computed_properties = schema.get('computed', []) for name, fragment in schema.get('properties', {}).items(): - mutable = name not in immutable + mutable = name not in immutable_properties + computed = name in computed_properties default = fragment.get('default', ftrack_api_old.symbol.NOT_SET) if default == '{uid}': @@ -62,7 +64,8 @@ class Factory(object): if data_type is not ftrack_api_old.symbol.NOT_SET: if data_type in ( - 'string', 'boolean', 'integer', 'number', 'variable' + 'string', 'boolean', 'integer', 'number', 'variable', + 'object' ): # Basic scalar attribute. if data_type == 'number': @@ -74,7 +77,7 @@ class Factory(object): data_type = 'datetime' attribute = self.create_scalar_attribute( - class_name, name, mutable, default, data_type + class_name, name, mutable, computed, default, data_type ) if attribute: attributes.add(attribute) @@ -139,11 +142,12 @@ class Factory(object): return cls def create_scalar_attribute( - self, class_name, name, mutable, default, data_type + self, class_name, name, mutable, computed, default, data_type ): '''Return appropriate scalar attribute instance.''' return ftrack_api_old.attribute.ScalarAttribute( - name, data_type=data_type, default_value=default, mutable=mutable + name, data_type=data_type, default_value=default, mutable=mutable, + computed=computed ) def create_reference_attribute(self, class_name, name, mutable, reference): diff --git a/pype/vendor/ftrack_api_old/entity/location.py b/pype/vendor/ftrack_api_old/entity/location.py index d48264abc2..8d9d52c654 100644 --- a/pype/vendor/ftrack_api_old/entity/location.py +++ b/pype/vendor/ftrack_api_old/entity/location.py @@ -526,7 +526,8 @@ class Location(ftrack_api_old.entity.base.Entity): for index, resource_identifier in enumerate(resource_identifiers): resource_identifiers[index] = ( self.resource_identifier_transformer.decode( - resource_identifier + resource_identifier, + context={'component': components[index]} ) ) diff --git a/pype/vendor/ftrack_api_old/entity/note.py b/pype/vendor/ftrack_api_old/entity/note.py index 4cacf6ac8a..c628886fd9 100644 --- a/pype/vendor/ftrack_api_old/entity/note.py +++ b/pype/vendor/ftrack_api_old/entity/note.py @@ -1,6 +1,8 @@ # :coding: utf-8 # :copyright: Copyright (c) 2015 ftrack +import warnings + import ftrack_api_old.entity.base @@ -33,26 +35,52 @@ class Note(ftrack_api_old.entity.base.Entity): class CreateNoteMixin(object): '''Mixin to add create_note method on entity class.''' - def create_note(self, content, author, recipients=None, category=None): + def create_note( + self, content, author, recipients=None, category=None, labels=None + ): '''Create note with *content*, *author*. - Note category can be set by including *category* and *recipients* - can be specified as a list of user or group instances. + NoteLabels can be set by including *labels*. + + Note category can be set by including *category*. + + *recipients* can be specified as a list of user or group instances. ''' + note_label_support = 'NoteLabel' in self.session.types + + if not labels: + labels = [] + + if labels and not note_label_support: + raise ValueError( + 'NoteLabel is not supported by the current server version.' + ) + + if category and labels: + raise ValueError( + 'Both category and labels cannot be set at the same time.' + ) + if not recipients: recipients = [] - category_id = None - if category: - category_id = category['id'] - data = { 'content': content, - 'author': author, - 'category_id': category_id + 'author': author } + if category: + if note_label_support: + labels = [category] + warnings.warn( + 'category argument will be removed in an upcoming version, ' + 'please use labels instead.', + PendingDeprecationWarning + ) + else: + data['category_id'] = category['id'] + note = self.session.create('Note', data) self['notes'].append(note) @@ -65,4 +93,13 @@ class CreateNoteMixin(object): note['recipients'].append(recipient) + for label in labels: + self.session.create( + 'NoteLabelLink', + { + 'label_id': label['id'], + 'note_id': note['id'] + } + ) + return note diff --git a/pype/vendor/ftrack_api_old/event/expression.py b/pype/vendor/ftrack_api_old/event/expression.py index e10cd85844..8de4be0d71 100644 --- a/pype/vendor/ftrack_api_old/event/expression.py +++ b/pype/vendor/ftrack_api_old/event/expression.py @@ -3,14 +3,15 @@ from operator import eq, ne, ge, le, gt, lt -from pyparsing import (ParserElement, Group, Word, CaselessKeyword, Forward, +from pyparsing import (Group, Word, CaselessKeyword, Forward, FollowedBy, Suppress, oneOf, OneOrMore, Optional, alphanums, quotedString, removeQuotes) import ftrack_api_old.exception -# Optimise parsing using packrat memoisation feature. -ParserElement.enablePackrat() +# Do not enable packrat since it is not thread-safe and will result in parsing +# exceptions in a multi threaded environment. +# ParserElement.enablePackrat() class Parser(object): diff --git a/pype/vendor/ftrack_api_old/event/hub.py b/pype/vendor/ftrack_api_old/event/hub.py index 25410aa1e1..3ffbd38056 100644 --- a/pype/vendor/ftrack_api_old/event/hub.py +++ b/pype/vendor/ftrack_api_old/event/hub.py @@ -14,6 +14,7 @@ import operator import functools import json import socket +import warnings import requests import requests.exceptions @@ -40,9 +41,20 @@ ServerDetails = collections.namedtuple('ServerDetails', [ ]) + + class EventHub(object): '''Manage routing of events.''' + _future_signature_warning = ( + 'When constructing your Session object you did not explicitly define ' + 'auto_connect_event_hub as True even though you appear to be publishing ' + 'and / or subscribing to asynchronous events. In version version 2.0 of ' + 'the ftrack-python-api the default behavior will change from True ' + 'to False. Please make sure to update your tools. You can read more at ' + 'http://ftrack-python-api.rtd.ftrack.com/en/stable/release/migration.html' + ) + def __init__(self, server_url, api_user, api_key): '''Initialise hub, connecting to ftrack *server_url*. @@ -76,6 +88,8 @@ class EventHub(object): self._auto_reconnect_attempts = 30 self._auto_reconnect_delay = 10 + self._deprecation_warning_auto_connect = False + # Mapping of Socket.IO codes to meaning. self._code_name_mapping = { '0': 'disconnect', @@ -134,6 +148,9 @@ class EventHub(object): connected or connection fails. ''' + + self._deprecation_warning_auto_connect = False + if self.connected: raise ftrack_api_old.exception.EventHubConnectionError( 'Already connected.' @@ -164,17 +181,26 @@ class EventHub(object): # https://docs.python.org/2/library/socket.html#socket.socket.setblocking self._connection = websocket.create_connection(url, timeout=60) - except Exception: + except Exception as error: + error_message = ( + 'Failed to connect to event server at {server_url} with ' + 'error: "{error}".' + ) + + error_details = { + 'error': unicode(error), + 'server_url': self.get_server_url() + } + self.logger.debug( L( - 'Error connecting to event server at {0}.', - self.get_server_url() + error_message, **error_details ), exc_info=1 ) raise ftrack_api_old.exception.EventHubConnectionError( - 'Failed to connect to event server at {0}.' - .format(self.get_server_url()) + error_message, + details=error_details ) # Start background processing thread. @@ -543,6 +569,11 @@ class EventHub(object): event will be caught by this method and ignored. ''' + if self._deprecation_warning_auto_connect and not synchronous: + warnings.warn( + self._future_signature_warning, FutureWarning + ) + try: return self._publish( event, synchronous=synchronous, on_reply=on_reply @@ -700,18 +731,23 @@ class EventHub(object): # Automatically publish a non None response as a reply when not in # synchronous mode. - if not synchronous and response is not None: - - try: - self.publish_reply( - event, data=response, source=subscriber.metadata + if not synchronous: + if self._deprecation_warning_auto_connect: + warnings.warn( + self._future_signature_warning, FutureWarning ) - except Exception: - self.logger.exception(L( - 'Error publishing response {0} from subscriber {1} ' - 'for event {2}.', response, subscriber, event - )) + if response is not None: + try: + self.publish_reply( + event, data=response, source=subscriber.metadata + ) + + except Exception: + self.logger.exception(L( + 'Error publishing response {0} from subscriber {1} ' + 'for event {2}.', response, subscriber, event + )) # Check whether to continue processing topic event. if event.is_stopped(): @@ -881,6 +917,7 @@ class EventHub(object): if code_name == 'connect': self.logger.debug('Connected to event server.') event = ftrack_api_old.event.base.Event('ftrack.meta.connected') + self._prepare_event(event) self._event_queue.put(event) elif code_name == 'disconnect': @@ -901,6 +938,7 @@ class EventHub(object): if not self.connected: event = ftrack_api_old.event.base.Event('ftrack.meta.disconnected') + self._prepare_event(event) self._event_queue.put(event) elif code_name == 'heartbeat': diff --git a/pype/vendor/ftrack_api_old/logging.py b/pype/vendor/ftrack_api_old/logging.py index 2b28ce900b..41969c5b2a 100644 --- a/pype/vendor/ftrack_api_old/logging.py +++ b/pype/vendor/ftrack_api_old/logging.py @@ -1,6 +1,23 @@ # :coding: utf-8 # :copyright: Copyright (c) 2016 ftrack +import functools +import warnings + + +def deprecation_warning(message): + def decorator(function): + @functools.wraps(function) + def wrapper(*args, **kwargs): + warnings.warn( + message, + PendingDeprecationWarning + ) + return function(*args, **kwargs) + return wrapper + + return decorator + class LazyLogMessage(object): '''A log message that can be evaluated lazily for improved performance. diff --git a/pype/vendor/ftrack_api_old/session.py b/pype/vendor/ftrack_api_old/session.py index c313203a0c..0986962ca4 100644 --- a/pype/vendor/ftrack_api_old/session.py +++ b/pype/vendor/ftrack_api_old/session.py @@ -16,6 +16,7 @@ import hashlib import tempfile import threading import atexit +import warnings import requests import requests.auth @@ -42,8 +43,14 @@ import ftrack_api_old.structure.origin import ftrack_api_old.structure.entity_id import ftrack_api_old.accessor.server import ftrack_api_old._centralized_storage_scenario +import ftrack_api_old.logging from ftrack_api_old.logging import LazyLogMessage as L +try: + from weakref import WeakMethod +except ImportError: + from ftrack_api_old._weakref import WeakMethod + class SessionAuthentication(requests.auth.AuthBase): '''Attach ftrack session authentication information to requests.''' @@ -69,7 +76,7 @@ class Session(object): def __init__( self, server_url=None, api_key=None, api_user=None, auto_populate=True, plugin_paths=None, cache=None, cache_key_maker=None, - auto_connect_event_hub=True, schema_cache_path=None, + auto_connect_event_hub=None, schema_cache_path=None, plugin_arguments=None ): '''Initialise session. @@ -233,7 +240,8 @@ class Session(object): self._api_key ) - if auto_connect_event_hub: + self._auto_connect_event_hub_thread = None + if auto_connect_event_hub in (None, True): # Connect to event hub in background thread so as not to block main # session usage waiting for event hub connection. self._auto_connect_event_hub_thread = threading.Thread( @@ -242,8 +250,14 @@ class Session(object): self._auto_connect_event_hub_thread.daemon = True self._auto_connect_event_hub_thread.start() + # To help with migration from auto_connect_event_hub default changing + # from True to False. + self._event_hub._deprecation_warning_auto_connect = ( + auto_connect_event_hub is None + ) + # Register to auto-close session on exit. - atexit.register(self.close) + atexit.register(WeakMethod(self.close)) self._plugin_paths = plugin_paths if self._plugin_paths is None: @@ -271,6 +285,15 @@ class Session(object): ftrack_api_old._centralized_storage_scenario.register(self) self._configure_locations() + self.event_hub.publish( + ftrack_api_old.event.base.Event( + topic='ftrack.api.session.ready', + data=dict( + session=self + ) + ), + synchronous=True + ) def __enter__(self): '''Return session as context manager.''' @@ -389,7 +412,8 @@ class Session(object): try: self.event_hub.disconnect() - self._auto_connect_event_hub_thread.join() + if self._auto_connect_event_hub_thread: + self._auto_connect_event_hub_thread.join() except ftrack_api_old.exception.EventHubConnectionError: pass @@ -428,6 +452,16 @@ class Session(object): # Re-configure certain session aspects that may be dependant on cache. self._configure_locations() + self.event_hub.publish( + ftrack_api_old.event.base.Event( + topic='ftrack.api.session.reset', + data=dict( + session=self + ) + ), + synchronous=True + ) + def auto_populating(self, auto_populate): '''Temporarily set auto populate to *auto_populate*. @@ -508,7 +542,7 @@ class Session(object): 'entity_key': entity.get('id') }) - result = self._call( + result = self.call( [payload] ) @@ -790,12 +824,13 @@ class Session(object): }] # TODO: When should this execute? How to handle background=True? - results = self._call(batch) + results = self.call(batch) # Merge entities into local cache and return merged entities. data = [] + merged = dict() for entity in results[0]['data']: - data.append(self.merge(entity)) + data.append(self._merge_recursive(entity, merged)) return data, results[0]['metadata'] @@ -856,6 +891,48 @@ class Session(object): else: return value + def _merge_recursive(self, entity, merged=None): + '''Merge *entity* and all its attributes recursivly.''' + log_debug = self.logger.isEnabledFor(logging.DEBUG) + + if merged is None: + merged = {} + + attached = self.merge(entity, merged) + + for attribute in entity.attributes: + # Remote attributes. + remote_value = attribute.get_remote_value(entity) + + if isinstance( + remote_value, + ( + ftrack_api_old.entity.base.Entity, + ftrack_api_old.collection.Collection, + ftrack_api_old.collection.MappedCollectionProxy + ) + ): + log_debug and self.logger.debug( + 'Merging remote value for attribute {0}.'.format(attribute) + ) + + if isinstance(remote_value, ftrack_api_old.entity.base.Entity): + self._merge_recursive(remote_value, merged=merged) + + elif isinstance( + remote_value, ftrack_api_old.collection.Collection + ): + for entry in remote_value: + self._merge_recursive(entry, merged=merged) + + elif isinstance( + remote_value, ftrack_api_old.collection.MappedCollectionProxy + ): + for entry in remote_value.collection: + self._merge_recursive(entry, merged=merged) + + return attached + def _merge_entity(self, entity, merged=None): '''Merge *entity* into session returning merged entity. @@ -1185,7 +1262,7 @@ class Session(object): # Process batch. if batch: - result = self._call(batch) + result = self.call(batch) # Clear recorded operations. self.recorded_operations.clear() @@ -1260,7 +1337,7 @@ class Session(object): def _fetch_server_information(self): '''Return server information.''' - result = self._call([{'action': 'query_server_information'}]) + result = self.call([{'action': 'query_server_information'}]) return result[0] def _discover_plugins(self, plugin_arguments=None): @@ -1362,7 +1439,7 @@ class Session(object): 'Loading schemas from server due to hash not matching.' 'Local: {0!r} != Server: {1!r}', local_schema_hash, server_hash )) - schemas = self._call([{'action': 'query_schemas'}])[0] + schemas = self.call([{'action': 'query_schemas'}])[0] if schema_cache_path: try: @@ -1525,8 +1602,24 @@ class Session(object): synchronous=True ) + @ftrack_api_old.logging.deprecation_warning( + 'Session._call is now available as public method Session.call. The ' + 'private method will be removed in version 2.0.' + ) def _call(self, data): - '''Make request to server with *data*.''' + '''Make request to server with *data* batch describing the actions. + + .. note:: + + This private method is now available as public method + :meth:`entity_reference`. This alias remains for backwards + compatibility, but will be removed in version 2.0. + + ''' + return self.call(data) + + def call(self, data): + '''Make request to server with *data* batch describing the actions.''' url = self._server_url + '/api' headers = { 'content-type': 'application/json', @@ -1553,7 +1646,7 @@ class Session(object): 'Server reported error in unexpected format. Raw error was: {0}' .format(response.text) ) - self.logger.error(error_message) + self.logger.exception(error_message) raise ftrack_api_old.exception.ServerError(error_message) else: @@ -1562,7 +1655,7 @@ class Session(object): error_message = 'Server reported error: {0}({1})'.format( result['exception'], result['content'] ) - self.logger.error(error_message) + self.logger.exception(error_message) raise ftrack_api_old.exception.ServerError(error_message) return result @@ -1620,12 +1713,12 @@ class Session(object): if "entity_data" in data: for key, value in data["entity_data"].items(): if isinstance(value, ftrack_api_old.entity.base.Entity): - data["entity_data"][key] = self._entity_reference(value) + data["entity_data"][key] = self.entity_reference(value) return data if isinstance(item, ftrack_api_old.entity.base.Entity): - data = self._entity_reference(item) + data = self.entity_reference(item) with self.auto_populating(True): @@ -1646,14 +1739,15 @@ class Session(object): value = attribute.get_local_value(item) elif entity_attribute_strategy == 'persisted_only': - value = attribute.get_remote_value(item) + if not attribute.computed: + value = attribute.get_remote_value(item) if value is not ftrack_api_old.symbol.NOT_SET: if isinstance( attribute, ftrack_api_old.attribute.ReferenceAttribute ): if isinstance(value, ftrack_api_old.entity.base.Entity): - value = self._entity_reference(value) + value = self.entity_reference(value) data[attribute.name] = value @@ -1668,14 +1762,14 @@ class Session(object): if isinstance(item, ftrack_api_old.collection.Collection): data = [] for entity in item: - data.append(self._entity_reference(entity)) + data.append(self.entity_reference(entity)) return data raise TypeError('{0!r} is not JSON serializable'.format(item)) - def _entity_reference(self, entity): - '''Return reference to *entity*. + def entity_reference(self, entity): + '''Return entity reference that uniquely identifies *entity*. Return a mapping containing the __entity_type__ of the entity along with the key, value pairs that make up it's primary key. @@ -1689,6 +1783,26 @@ class Session(object): return reference + @ftrack_api_old.logging.deprecation_warning( + 'Session._entity_reference is now available as public method ' + 'Session.entity_reference. The private method will be removed ' + 'in version 2.0.' + ) + def _entity_reference(self, entity): + '''Return entity reference that uniquely identifies *entity*. + + Return a mapping containing the __entity_type__ of the entity along + with the key, value pairs that make up it's primary key. + + .. note:: + + This private method is now available as public method + :meth:`entity_reference`. This alias remains for backwards + compatibility, but will be removed in version 2.0. + + ''' + return self.entity_reference(entity) + def decode(self, string): '''Return decoded JSON *string* as Python object.''' with self.operation_recording(False): @@ -2016,6 +2130,10 @@ class Session(object): return availabilities + @ftrack_api_old.logging.deprecation_warning( + 'Session.delayed_job has been deprecated in favour of session.call. ' + 'Please refer to the release notes for more information.' + ) def delayed_job(self, job_type): '''Execute a delayed job on the server, a `ftrack.entity.job.Job` is returned. @@ -2033,7 +2151,7 @@ class Session(object): } try: - result = self._call( + result = self.call( [operation] )[0] @@ -2070,7 +2188,7 @@ class Session(object): ) try: - result = self._call([operation]) + result = self.call([operation]) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2172,7 +2290,7 @@ class Session(object): } try: - result = self._call([operation]) + result = self.call([operation]) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2212,7 +2330,7 @@ class Session(object): } try: - result = self._call([operation]) + result = self.call([operation]) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2258,7 +2376,7 @@ class Session(object): ) try: - self._call(operations) + self.call(operations) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. @@ -2306,7 +2424,7 @@ class Session(object): ) try: - self._call(operations) + self.call(operations) except ftrack_api_old.exception.ServerError as error: # Raise informative error if the action is not supported. if 'Invalid action u\'send_review_session_invite\'' in error.message: diff --git a/pype/vendor/ftrack_api_old/symbol.py b/pype/vendor/ftrack_api_old/symbol.py index 10b3f55bd5..f46760f634 100644 --- a/pype/vendor/ftrack_api_old/symbol.py +++ b/pype/vendor/ftrack_api_old/symbol.py @@ -1,6 +1,8 @@ # :coding: utf-8 # :copyright: Copyright (c) 2014 ftrack +import os + class Symbol(object): '''A constant symbol.''' @@ -68,8 +70,8 @@ CONNECT_LOCATION_ID = '07b82a97-8cf9-11e3-9383-20c9d081909b' #: Identifier of builtin server location. SERVER_LOCATION_ID = '3a372bde-05bc-11e4-8908-20c9d081909b' -#: Chunk size used when working with data. -CHUNK_SIZE = 8192 +#: Chunk size used when working with data, default to 1Mb. +CHUNK_SIZE = int(os.getenv('FTRACK_API_FILE_CHUNK_SIZE', 0)) or 1024*1024 #: Symbol representing syncing users with ldap JOB_SYNC_USERS_LDAP = Symbol('SYNC_USERS_LDAP') diff --git a/res/app_icons/blender.png b/res/app_icons/blender.png new file mode 100644 index 0000000000..6070a51fae Binary files /dev/null and b/res/app_icons/blender.png differ diff --git a/res/ftrack/action_icons/Delivery.svg b/res/ftrack/action_icons/Delivery.svg new file mode 100644 index 0000000000..3380487c31 --- /dev/null +++ b/res/ftrack/action_icons/Delivery.svg @@ -0,0 +1,34 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/schema/application-1.0.json b/schema/application-1.0.json new file mode 100644 index 0000000000..e2418037c6 --- /dev/null +++ b/schema/application-1.0.json @@ -0,0 +1,68 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:application-1.0", + "description": "An application definition.", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "label", + "application_dir", + "executable" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "label": { + "description": "Nice name of application.", + "type": "string" + }, + "application_dir": { + "description": "Name of directory used for application resources.", + "type": "string" + }, + "executable": { + "description": "Name of callable executable, this is called to launch the application", + "type": "string" + }, + "description": { + "description": "Description of application.", + "type": "string" + }, + "environment": { + "description": "Key/value pairs for environment variables related to this application. Supports lists for paths, such as PYTHONPATH.", + "type": "object", + "items": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ] + } + }, + "default_dirs": { + "type": "array", + "items": { + "type": "string" + } + }, + "copy": { + "type": "object", + "patternProperties": { + "^.*$": { + "anyOf": [ + {"type": "string"}, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + } +} diff --git a/schema/asset-1.0.json b/schema/asset-1.0.json new file mode 100644 index 0000000000..6f3665c628 --- /dev/null +++ b/schema/asset-1.0.json @@ -0,0 +1,35 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:asset-1.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "name", + "subsets" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "name": { + "description": "Name of directory", + "type": "string" + }, + "subsets": { + "type": "array", + "items": { + "$ref": "subset.json" + } + } + }, + + "definitions": {} +} \ No newline at end of file diff --git a/schema/asset-2.0.json b/schema/asset-2.0.json new file mode 100644 index 0000000000..066cb33498 --- /dev/null +++ b/schema/asset-2.0.json @@ -0,0 +1,55 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:asset-2.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "silo", + "data" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:asset-2.0"], + "example": "avalon-core:asset-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["asset"], + "example": "asset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of asset", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "Bruce" + }, + "silo": { + "description": "Group or container of asset", + "type": "string", + "example": "assets" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": {"key": "value"} + } + }, + + "definitions": {} +} diff --git a/schema/asset-3.0.json b/schema/asset-3.0.json new file mode 100644 index 0000000000..a3a22e917b --- /dev/null +++ b/schema/asset-3.0.json @@ -0,0 +1,55 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:asset-3.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:asset-3.0", "pype:asset-3.0"], + "example": "avalon-core:asset-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["asset"], + "example": "asset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of asset", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "Bruce" + }, + "silo": { + "description": "Group or container of asset", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "assets" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": {"key": "value"} + } + }, + + "definitions": {} +} diff --git a/schema/config-1.0.json b/schema/config-1.0.json new file mode 100644 index 0000000000..b3c4362f41 --- /dev/null +++ b/schema/config-1.0.json @@ -0,0 +1,86 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:config-1.0", + "description": "A project configuration.", + + "type": "object", + + "additionalProperties": false, + "required": [ + "template", + "tasks", + "apps" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "template": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^.*$": { + "type": "string" + } + } + }, + "tasks": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "group": {"type": "string"}, + "label": {"type": "string"} + }, + "required": ["name"] + } + }, + "apps": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "group": {"type": "string"}, + "label": {"type": "string"} + }, + "required": ["name"] + } + }, + "families": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "label": {"type": "string"}, + "hideFilter": {"type": "boolean"} + }, + "required": ["name"] + } + }, + "groups": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": {"type": "string"}, + "icon": {"type": "string"}, + "color": {"type": "string"}, + "order": {"type": ["integer", "number"]} + }, + "required": ["name"] + } + }, + "copy": { + "type": "object" + } + } +} diff --git a/schema/container-1.0.json b/schema/container-1.0.json new file mode 100644 index 0000000000..d9e4e39f7f --- /dev/null +++ b/schema/container-1.0.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:container-1.0", + "description": "A loaded asset", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "id", + "objectName", + "name", + "author", + "loader", + "families", + "time", + "subset", + "asset", + "representation", + "version", + "silo", + "path", + "source" + ], + "properties": { + "id": { + "description": "Identifier for finding object in host", + "type": "string", + "enum": ["pyblish.mindbender.container"], + "example": "pyblish.mindbender.container" + }, + "objectName": { + "description": "Name of internal object, such as the objectSet in Maya.", + "type": "string", + "example": "Bruce_:rigDefault_CON" + }, + "name": { + "description": "Full name of application object", + "type": "string", + "example": "modelDefault" + }, + "author": { + "description": "Name of the author of the published version", + "type": "string", + "example": "Marcus Ottosson" + }, + "loader": { + "description": "Name of loader plug-in used to produce this container", + "type": "string", + "example": "ModelLoader" + }, + "families": { + "description": "Families associated with the this subset", + "type": "string", + "example": "mindbender.model" + }, + "time": { + "description": "File-system safe, formatted time", + "type": "string", + "example": "20170329T131545Z" + }, + "subset": { + "description": "Name of source subset", + "type": "string", + "example": "modelDefault" + }, + "asset": { + "description": "Name of source asset", + "type": "string" , + "example": "Bruce" + }, + "representation": { + "description": "Name of source representation", + "type": "string" , + "example": ".ma" + }, + "version": { + "description": "Version number", + "type": "number", + "example": 12 + }, + "silo": { + "description": "Silo of parent asset", + "type": "string", + "example": "assets" + }, + "path": { + "description": "Absolute path on disk", + "type": "string", + "example": "{root}/assets/Bruce/publish/rigDefault/v002" + }, + "source": { + "description": "Absolute path to file from which this version was published", + "type": "string", + "example": "{root}/assets/Bruce/work/rigging/maya/scenes/rig_v001.ma" + } + } +} diff --git a/schema/container-2.0.json b/schema/container-2.0.json new file mode 100644 index 0000000000..7b84209ea0 --- /dev/null +++ b/schema/container-2.0.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:container-2.0", + "description": "A loaded asset", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "id", + "objectName", + "name", + "namespace", + "loader", + "representation" + ], + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:container-2.0", "pype:container-2.0"], + "example": "pype:container-2.0" + }, + "id": { + "description": "Identifier for finding object in host", + "type": "string", + "enum": ["pyblish.avalon.container"], + "example": "pyblish.avalon.container" + }, + "objectName": { + "description": "Name of internal object, such as the objectSet in Maya.", + "type": "string", + "example": "Bruce_:rigDefault_CON" + }, + "loader": { + "description": "Name of loader plug-in used to produce this container", + "type": "string", + "example": "ModelLoader" + }, + "name": { + "description": "Internal object name of container in application", + "type": "string", + "example": "modelDefault_01" + }, + "namespace": { + "description": "Internal namespace of container in application", + "type": "string", + "example": "Bruce_" + }, + "representation": { + "description": "Unique id of representation in database", + "type": "string", + "example": "59523f355f8c1b5f6c5e8348" + } + } +} \ No newline at end of file diff --git a/schema/inventory-1.0.json b/schema/inventory-1.0.json new file mode 100644 index 0000000000..888ba7945a --- /dev/null +++ b/schema/inventory-1.0.json @@ -0,0 +1,10 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:config-1.0", + "description": "A project configuration.", + + "type": "object", + + "additionalProperties": true +} diff --git a/schema/project-2.0.json b/schema/project-2.0.json new file mode 100644 index 0000000000..ad0e460f4d --- /dev/null +++ b/schema/project-2.0.json @@ -0,0 +1,86 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:project-2.0", + "description": "A unit of data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "name", + "data", + "config" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:project-2.0", "pype:project-2.0"], + "example": "avalon-core:project-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["project"], + "example": "project" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "hulk" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": { + "fps": 24, + "width": 1920, + "height": 1080 + } + }, + "config": { + "type": "object", + "description": "Document metadata", + "example": { + "schema": "pype:config-1.0", + "apps": [ + { + "name": "maya2016", + "label": "Autodesk Maya 2016" + }, + { + "name": "nuke10", + "label": "The Foundry Nuke 10.0" + } + ], + "tasks": [ + {"name": "model"}, + {"name": "render"}, + {"name": "animate"}, + {"name": "rig"}, + {"name": "lookdev"}, + {"name": "layout"} + ], + "template": { + "work": + "{root}/{project}/{silo}/{asset}/work/{task}/{app}", + "publish": + "{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/{subset}.{representation}" + } + }, + "$ref": "config-1.0.json" + } + }, + + "definitions": {} +} diff --git a/schema/representation-1.0.json b/schema/representation-1.0.json new file mode 100644 index 0000000000..10ae72928e --- /dev/null +++ b/schema/representation-1.0.json @@ -0,0 +1,28 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:representation-1.0", + "description": "The inverse of an instance", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "format", + "path" + ], + + "properties": { + "schema": {"type": "string"}, + "format": { + "description": "File extension, including '.'", + "type": "string" + }, + "path": { + "description": "Unformatted path to version.", + "type": "string" + } + } +} diff --git a/schema/representation-2.0.json b/schema/representation-2.0.json new file mode 100644 index 0000000000..e12dea8564 --- /dev/null +++ b/schema/representation-2.0.json @@ -0,0 +1,78 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:representation-2.0", + "description": "The inverse of an instance", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string", + "enum": ["avalon-core:representation-2.0", "pype:representation-2.0"], + "example": "pype:representation-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["representation"], + "example": "representation" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of representation", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "abc" + }, + "data": { + "description": "Document metadata", + "type": "object", + "example": { + "label": "Alembic" + } + }, + "dependencies": { + "description": "Other representation that this representation depends on", + "type": "array", + "items": {"type": "string"}, + "example": [ + "592d547a5f8c1b388093c145" + ] + }, + "context": { + "description": "Summary of the context to which this representation belong.", + "type": "object", + "properties": { + "project": {"type": "object"}, + "asset": {"type": "string"}, + "silo": {"type": ["string", "null"]}, + "subset": {"type": "string"}, + "version": {"type": "number"}, + "representation": {"type": "string"} + }, + "example": { + "project": "hulk", + "asset": "Bruce", + "silo": "assets", + "subset": "rigDefault", + "version": 12, + "representation": "ma" + } + } + } +} diff --git a/schema/session-1.0.json b/schema/session-1.0.json new file mode 100644 index 0000000000..2b201f9c61 --- /dev/null +++ b/schema/session-1.0.json @@ -0,0 +1,143 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:session-1.0", + "description": "The Avalon environment", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "AVALON_PROJECTS", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_SILO", + "AVALON_CONFIG" + ], + + "properties": { + "AVALON_PROJECTS": { + "description": "Absolute path to root of project directories", + "type": "string", + "example": "/nas/projects" + }, + "AVALON_PROJECT": { + "description": "Name of project", + "type": "string", + "pattern": "^\\w*$", + "example": "Hulk" + }, + "AVALON_ASSET": { + "description": "Name of asset", + "type": "string", + "pattern": "^\\w*$", + "example": "Bruce" + }, + "AVALON_SILO": { + "description": "Name of asset group or container", + "type": "string", + "pattern": "^\\w*$", + "example": "assets" + }, + "AVALON_TASK": { + "description": "Name of task", + "type": "string", + "pattern": "^\\w*$", + "example": "modeling" + }, + "AVALON_CONFIG": { + "description": "Name of Avalon configuration", + "type": "string", + "pattern": "^\\w*$", + "example": "polly" + }, + "AVALON_APP": { + "description": "Name of application", + "type": "string", + "pattern": "^\\w*$", + "example": "maya2016" + }, + "AVALON_MONGO": { + "description": "Address to the asset database", + "type": "string", + "pattern": "^mongodb://[\\w/@:.]*$", + "example": "mongodb://localhost:27017", + "default": "mongodb://localhost:27017" + }, + "AVALON_DB": { + "description": "Name of database", + "type": "string", + "pattern": "^\\w*$", + "example": "avalon", + "default": "avalon" + }, + "AVALON_LABEL": { + "description": "Nice name of Avalon, used in e.g. graphical user interfaces", + "type": "string", + "example": "Mindbender", + "default": "Avalon" + }, + "AVALON_SENTRY": { + "description": "Address to Sentry", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "https://5b872b280de742919b115bdc8da076a5:8d278266fe764361b8fa6024af004a9c@logs.mindbender.com/2", + "default": null + }, + "AVALON_DEADLINE": { + "description": "Address to Deadline", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "http://192.168.99.101", + "default": null + }, + "AVALON_TIMEOUT": { + "description": "Wherever there is a need for a timeout, this is the default value.", + "type": "string", + "pattern": "^[0-9]*$", + "default": "1000", + "example": "1000" + }, + "AVALON_UPLOAD": { + "description": "Boolean of whether to upload published material to central asset repository", + "type": "string", + "default": null, + "example": "True" + }, + "AVALON_USERNAME": { + "description": "Generic username", + "type": "string", + "pattern": "^\\w*$", + "default": "avalon", + "example": "myself" + }, + "AVALON_PASSWORD": { + "description": "Generic password", + "type": "string", + "pattern": "^\\w*$", + "default": "secret", + "example": "abc123" + }, + "AVALON_INSTANCE_ID": { + "description": "Unique identifier for instances in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.instance", + "example": "avalon.instance" + }, + "AVALON_CONTAINER_ID": { + "description": "Unique identifier for a loaded representation in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.container", + "example": "avalon.container" + }, + "AVALON_DEBUG": { + "description": "Enable debugging mode. Some applications may use this for e.g. extended verbosity or mock plug-ins.", + "type": "string", + "default": null, + "example": "True" + } + } +} \ No newline at end of file diff --git a/schema/session-2.0.json b/schema/session-2.0.json new file mode 100644 index 0000000000..006a9e2dbf --- /dev/null +++ b/schema/session-2.0.json @@ -0,0 +1,142 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:session-2.0", + "description": "The Avalon environment", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "AVALON_PROJECTS", + "AVALON_PROJECT", + "AVALON_ASSET", + "AVALON_CONFIG" + ], + + "properties": { + "AVALON_PROJECTS": { + "description": "Absolute path to root of project directories", + "type": "string", + "example": "/nas/projects" + }, + "AVALON_PROJECT": { + "description": "Name of project", + "type": "string", + "pattern": "^\\w*$", + "example": "Hulk" + }, + "AVALON_ASSET": { + "description": "Name of asset", + "type": "string", + "pattern": "^\\w*$", + "example": "Bruce" + }, + "AVALON_SILO": { + "description": "Name of asset group or container", + "type": "string", + "pattern": "^\\w*$", + "example": "assets" + }, + "AVALON_TASK": { + "description": "Name of task", + "type": "string", + "pattern": "^\\w*$", + "example": "modeling" + }, + "AVALON_CONFIG": { + "description": "Name of Avalon configuration", + "type": "string", + "pattern": "^\\w*$", + "example": "polly" + }, + "AVALON_APP": { + "description": "Name of application", + "type": "string", + "pattern": "^\\w*$", + "example": "maya2016" + }, + "AVALON_MONGO": { + "description": "Address to the asset database", + "type": "string", + "pattern": "^mongodb://[\\w/@:.]*$", + "example": "mongodb://localhost:27017", + "default": "mongodb://localhost:27017" + }, + "AVALON_DB": { + "description": "Name of database", + "type": "string", + "pattern": "^\\w*$", + "example": "avalon", + "default": "avalon" + }, + "AVALON_LABEL": { + "description": "Nice name of Avalon, used in e.g. graphical user interfaces", + "type": "string", + "example": "Mindbender", + "default": "Avalon" + }, + "AVALON_SENTRY": { + "description": "Address to Sentry", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "https://5b872b280de742919b115bdc8da076a5:8d278266fe764361b8fa6024af004a9c@logs.mindbender.com/2", + "default": null + }, + "AVALON_DEADLINE": { + "description": "Address to Deadline", + "type": "string", + "pattern": "^http[\\w/@:.]*$", + "example": "http://192.168.99.101", + "default": null + }, + "AVALON_TIMEOUT": { + "description": "Wherever there is a need for a timeout, this is the default value.", + "type": "string", + "pattern": "^[0-9]*$", + "default": "1000", + "example": "1000" + }, + "AVALON_UPLOAD": { + "description": "Boolean of whether to upload published material to central asset repository", + "type": "string", + "default": null, + "example": "True" + }, + "AVALON_USERNAME": { + "description": "Generic username", + "type": "string", + "pattern": "^\\w*$", + "default": "avalon", + "example": "myself" + }, + "AVALON_PASSWORD": { + "description": "Generic password", + "type": "string", + "pattern": "^\\w*$", + "default": "secret", + "example": "abc123" + }, + "AVALON_INSTANCE_ID": { + "description": "Unique identifier for instances in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.instance", + "example": "avalon.instance" + }, + "AVALON_CONTAINER_ID": { + "description": "Unique identifier for a loaded representation in a working file", + "type": "string", + "pattern": "^[\\w.]*$", + "default": "avalon.container", + "example": "avalon.container" + }, + "AVALON_DEBUG": { + "description": "Enable debugging mode. Some applications may use this for e.g. extended verbosity or mock plug-ins.", + "type": "string", + "default": null, + "example": "True" + } + } +} diff --git a/schema/shaders-1.0.json b/schema/shaders-1.0.json new file mode 100644 index 0000000000..e66cc735e8 --- /dev/null +++ b/schema/shaders-1.0.json @@ -0,0 +1,32 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:shaders-1.0", + "description": "Relationships between shaders and Avalon IDs", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "shader" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "shader": { + "description": "Name of directory", + "type": "array", + "items": { + "type": "str", + "description": "Avalon ID and optional face indexes, e.g. 'f9520572-ac1d-11e6-b39e-3085a99791c9.f[5002:5185]'" + } + } + }, + + "definitions": {} +} diff --git a/schema/subset-1.0.json b/schema/subset-1.0.json new file mode 100644 index 0000000000..90ae0349fa --- /dev/null +++ b/schema/subset-1.0.json @@ -0,0 +1,35 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:subset-1.0", + "description": "A container of instances", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "name", + "versions" + ], + + "properties": { + "schema": { + "description": "Schema identifier for payload", + "type": "string" + }, + "name": { + "description": "Name of directory", + "type": "string" + }, + "versions": { + "type": "array", + "items": { + "$ref": "version.json" + } + } + }, + + "definitions": {} +} \ No newline at end of file diff --git a/schema/subset-2.0.json b/schema/subset-2.0.json new file mode 100644 index 0000000000..98f39c4f3e --- /dev/null +++ b/schema/subset-2.0.json @@ -0,0 +1,51 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:subset-2.0", + "description": "A container of instances", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["pype:subset-2.0"], + "example": "pype:subset-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["subset"], + "example": "subset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "shot01" + }, + "data": { + "type": "object", + "description": "Document metadata", + "example": { + "frameStart": 1000, + "frameEnd": 1201 + } + } + } +} diff --git a/schema/subset-3.0.json b/schema/subset-3.0.json new file mode 100644 index 0000000000..a0af9d340f --- /dev/null +++ b/schema/subset-3.0.json @@ -0,0 +1,62 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:subset-3.0", + "description": "A container of instances", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["avalon-core:subset-3.0", "pype:subset-3.0"], + "example": "pype:subset-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["subset"], + "example": "subset" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Name of directory", + "type": "string", + "pattern": "^[a-zA-Z0-9_.]*$", + "example": "shot01" + }, + "data": { + "description": "Document metadata", + "type": "object", + "required": ["families"], + "properties": { + "families": { + "type": "array", + "items": {"type": "string"}, + "description": "One or more families associated with this subset" + } + }, + "example": { + "families" : [ + "avalon.camera" + ], + "frameStart": 1000, + "frameEnd": 1201 + } + } + } +} diff --git a/schema/thumbnail-1.0.json b/schema/thumbnail-1.0.json new file mode 100644 index 0000000000..96b540ab7e --- /dev/null +++ b/schema/thumbnail-1.0.json @@ -0,0 +1,42 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:thumbnail-1.0", + "description": "Entity with thumbnail data", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["pype:thumbnail-1.0"], + "example": "pype:thumbnail-1.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["thumbnail"], + "example": "thumbnail" + }, + "data": { + "description": "Thumbnail data", + "type": "object", + "example": { + "binary_data": "Binary({byte data of image})", + "template": "{thumbnail_root}/{project[name]}/{_id}{ext}}", + "template_data": { + "ext": ".jpg" + } + } + } + } +} diff --git a/schema/version-1.0.json b/schema/version-1.0.json new file mode 100644 index 0000000000..c784a25175 --- /dev/null +++ b/schema/version-1.0.json @@ -0,0 +1,50 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:version-1.0", + "description": "An individual version", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "version", + "path", + "time", + "author", + "source", + "representations" + ], + + "properties": { + "schema": {"type": "string"}, + "representations": { + "type": "array", + "items": { + "$ref": "representation.json" + } + }, + "time": { + "description": "ISO formatted, file-system compatible time", + "type": "string" + }, + "author": { + "description": "User logged on to the machine at time of publish", + "type": "string" + }, + "version": { + "description": "Number of this version", + "type": "number" + }, + "path": { + "description": "Unformatted path, e.g. '{root}/assets/Bruce/publish/lookdevDefault/v001", + "type": "string" + }, + "source": { + "description": "Original file from which this version was made.", + "type": "string" + } + } +} diff --git a/schema/version-2.0.json b/schema/version-2.0.json new file mode 100644 index 0000000000..5bb4a56f96 --- /dev/null +++ b/schema/version-2.0.json @@ -0,0 +1,92 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:version-2.0", + "description": "An individual version", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["pype:version-2.0"], + "example": "pype:version-2.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["version"], + "example": "version" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Number of version", + "type": "number", + "example": 12 + }, + "locations": { + "description": "Where on the planet this version can be found.", + "type": "array", + "items": {"type": "string"}, + "example": ["data.avalon.com"] + }, + "data": { + "description": "Document metadata", + "type": "object", + "required": ["families", "author", "source", "time"], + "properties": { + "time": { + "description": "ISO formatted, file-system compatible time", + "type": "string" + }, + "timeFormat": { + "description": "ISO format of time", + "type": "string" + }, + "author": { + "description": "User logged on to the machine at time of publish", + "type": "string" + }, + "version": { + "description": "Number of this version", + "type": "number" + }, + "path": { + "description": "Unformatted path, e.g. '{root}/assets/Bruce/publish/lookdevDefault/v001", + "type": "string" + }, + "source": { + "description": "Original file from which this version was made.", + "type": "string" + }, + "families": { + "type": "array", + "items": {"type": "string"}, + "description": "One or more families associated with this version" + } + }, + "example": { + "source" : "{root}/f02_prod/assets/BubbleWitch/work/modeling/marcus/maya/scenes/model_v001.ma", + "author" : "marcus", + "families" : [ + "avalon.model" + ], + "time" : "20170510T090203Z" + } + } + } +} diff --git a/schema/version-3.0.json b/schema/version-3.0.json new file mode 100644 index 0000000000..808650da0d --- /dev/null +++ b/schema/version-3.0.json @@ -0,0 +1,84 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + + "title": "pype:version-3.0", + "description": "An individual version", + + "type": "object", + + "additionalProperties": true, + + "required": [ + "schema", + "type", + "parent", + "name", + "data" + ], + + "properties": { + "schema": { + "description": "The schema associated with this document", + "type": "string", + "enum": ["avalon-core:version-3.0", "pype:version-3.0"], + "example": "pype:version-3.0" + }, + "type": { + "description": "The type of document", + "type": "string", + "enum": ["version"], + "example": "version" + }, + "parent": { + "description": "Unique identifier to parent document", + "example": "592c33475f8c1b064c4d1696" + }, + "name": { + "description": "Number of version", + "type": "number", + "example": 12 + }, + "locations": { + "description": "Where on the planet this version can be found.", + "type": "array", + "items": {"type": "string"}, + "example": ["data.avalon.com"] + }, + "data": { + "description": "Document metadata", + "type": "object", + "required": ["author", "source", "time"], + "properties": { + "time": { + "description": "ISO formatted, file-system compatible time", + "type": "string" + }, + "timeFormat": { + "description": "ISO format of time", + "type": "string" + }, + "author": { + "description": "User logged on to the machine at time of publish", + "type": "string" + }, + "version": { + "description": "Number of this version", + "type": "number" + }, + "path": { + "description": "Unformatted path, e.g. '{root}/assets/Bruce/publish/lookdevDefault/v001", + "type": "string" + }, + "source": { + "description": "Original file from which this version was made.", + "type": "string" + } + }, + "example": { + "source" : "{root}/f02_prod/assets/BubbleWitch/work/modeling/marcus/maya/scenes/model_v001.ma", + "author" : "marcus", + "time" : "20170510T090203Z" + } + } + } +} diff --git a/setup/blender/init.py b/setup/blender/init.py new file mode 100644 index 0000000000..05c15eaeb2 --- /dev/null +++ b/setup/blender/init.py @@ -0,0 +1,3 @@ +from pype import blender + +blender.install() diff --git a/setup/nuke/nuke_path/KnobScripter/__init__.py b/setup/nuke/nuke_path/KnobScripter/__init__.py new file mode 100644 index 0000000000..8fe91d63f5 --- /dev/null +++ b/setup/nuke/nuke_path/KnobScripter/__init__.py @@ -0,0 +1 @@ +import knob_scripter \ No newline at end of file diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_clearConsole.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_clearConsole.png new file mode 100644 index 0000000000..75ac04ef84 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_clearConsole.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_download.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_download.png new file mode 100644 index 0000000000..1e3e9b7631 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_download.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_exitnode.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_exitnode.png new file mode 100644 index 0000000000..7714cd2b92 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_exitnode.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_pick.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_pick.png new file mode 100644 index 0000000000..2395537550 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_pick.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_prefs.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_prefs.png new file mode 100644 index 0000000000..efef5ffc92 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_prefs.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_prefs2.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_prefs2.png new file mode 100644 index 0000000000..5c3c941d59 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_prefs2.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_refresh.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_refresh.png new file mode 100644 index 0000000000..559bfd74ab Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_refresh.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_run.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_run.png new file mode 100644 index 0000000000..6b2e4ddc23 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_run.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_save.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_save.png new file mode 100644 index 0000000000..e29c667f34 Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_save.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_search.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_search.png new file mode 100644 index 0000000000..d4ed2e1a2b Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_search.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/icons/icon_snippets.png b/setup/nuke/nuke_path/KnobScripter/icons/icon_snippets.png new file mode 100644 index 0000000000..479c44f19e Binary files /dev/null and b/setup/nuke/nuke_path/KnobScripter/icons/icon_snippets.png differ diff --git a/setup/nuke/nuke_path/KnobScripter/knob_scripter.py b/setup/nuke/nuke_path/KnobScripter/knob_scripter.py new file mode 100644 index 0000000000..f03067aa4b --- /dev/null +++ b/setup/nuke/nuke_path/KnobScripter/knob_scripter.py @@ -0,0 +1,4196 @@ +# ------------------------------------------------- +# KnobScripter by Adrian Pueyo +# Complete python sript editor for Nuke +# adrianpueyo.com, 2016-2019 +import string +import traceback +from webbrowser import open as openUrl +from threading import Event, Thread +import platform +import subprocess +from functools import partial +import re +import sys +from nukescripts import panels +import json +import os +import nuke +version = "2.3 wip" +date = "Aug 12 2019" +# ------------------------------------------------- + + +# Symlinks on windows... +if os.name == "nt": + def symlink_ms(source, link_name): + import ctypes + csl = ctypes.windll.kernel32.CreateSymbolicLinkW + csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32) + csl.restype = ctypes.c_ubyte + flags = 1 if os.path.isdir(source) else 0 + try: + if csl(link_name, source.replace('/', '\\'), flags) == 0: + raise ctypes.WinError() + except: + pass + os.symlink = symlink_ms + +try: + if nuke.NUKE_VERSION_MAJOR < 11: + from PySide import QtCore, QtGui, QtGui as QtWidgets + from PySide.QtCore import Qt + else: + from PySide2 import QtWidgets, QtGui, QtCore + from PySide2.QtCore import Qt +except ImportError: + from Qt import QtCore, QtGui, QtWidgets + +KS_DIR = os.path.dirname(__file__) +icons_path = KS_DIR + "/icons/" +DebugMode = False +AllKnobScripters = [] # All open instances at a given time + +PrefsPanel = "" +SnippetEditPanel = "" + +nuke.tprint('KnobScripter v{}, built {}.\nCopyright (c) 2016-2019 Adrian Pueyo. All Rights Reserved.'.format(version, date)) + + +class KnobScripter(QtWidgets.QWidget): + + def __init__(self, node="", knob="knobChanged"): + super(KnobScripter, self).__init__() + + # Autosave the other knobscripters and add this one + for ks in AllKnobScripters: + try: + ks.autosave() + except: + pass + if self not in AllKnobScripters: + AllKnobScripters.append(self) + + self.nodeMode = (node != "") + if node == "": + self.node = nuke.toNode("root") + else: + self.node = node + + self.isPane = False + self.knob = knob + # For the option to also display the knob labels on the knob dropdown + self.show_labels = False + self.unsavedKnobs = {} + self.modifiedKnobs = set() + self.scrollPos = {} + self.cursorPos = {} + self.fontSize = 10 + self.font = "Monospace" + self.tabSpaces = 4 + self.windowDefaultSize = [500, 300] + self.color_scheme = "sublime" # Can be nuke or sublime + self.pinned = 1 + self.toLoadKnob = True + self.frw_open = False # Find replace widget closed by default + self.icon_size = 17 + self.btn_size = 24 + self.qt_icon_size = QtCore.QSize(self.icon_size, self.icon_size) + self.qt_btn_size = QtCore.QSize(self.btn_size, self.btn_size) + self.origConsoleText = "" + self.nukeSE = self.findSE() + self.nukeSEOutput = self.findSEOutput(self.nukeSE) + self.nukeSEInput = self.findSEInput(self.nukeSE) + self.nukeSERunBtn = self.findSERunBtn(self.nukeSE) + + self.scripts_dir = os.path.expandvars( + os.path.expanduser("~/.nuke/KnobScripter_Scripts")) + self.current_folder = "scripts" + self.folder_index = 0 + self.current_script = "Untitled.py" + self.current_script_modified = False + self.script_index = 0 + self.toAutosave = False + + # Load prefs + self.prefs_txt = os.path.expandvars( + os.path.expanduser("~/.nuke/KnobScripter_Prefs.txt")) + self.loadedPrefs = self.loadPrefs() + if self.loadedPrefs != []: + try: + if "font_size" in self.loadedPrefs: + self.fontSize = self.loadedPrefs['font_size'] + self.windowDefaultSize = [ + self.loadedPrefs['window_default_w'], self.loadedPrefs['window_default_h']] + self.tabSpaces = self.loadedPrefs['tab_spaces'] + self.pinned = self.loadedPrefs['pin_default'] + if "font" in self.loadedPrefs: + self.font = self.loadedPrefs['font'] + if "color_scheme" in self.loadedPrefs: + self.color_scheme = self.loadedPrefs['color_scheme'] + if "show_labels" in self.loadedPrefs: + self.show_labels = self.loadedPrefs['show_labels'] + except TypeError: + log("KnobScripter: Failed to load preferences.") + + # Load snippets + self.snippets_txt_path = os.path.expandvars( + os.path.expanduser("~/.nuke/KnobScripter_Snippets.txt")) + self.snippets = self.loadSnippets(maxDepth=5) + + # Current state of script (loaded when exiting node mode) + self.state_txt_path = os.path.expandvars( + os.path.expanduser("~/.nuke/KnobScripter_State.txt")) + + # Init UI + self.initUI() + + # Talk to Nuke's Script Editor + self.setSEOutputEvent() # Make the output windowS listen! + self.clearConsole() + + def initUI(self): + ''' Initializes the tool UI''' + # ------------------- + # 1. MAIN WINDOW + # ------------------- + self.resize(self.windowDefaultSize[0], self.windowDefaultSize[1]) + self.setWindowTitle("KnobScripter - %s %s" % + (self.node.fullName(), self.knob)) + self.setObjectName("com.adrianpueyo.knobscripter") + self.move(QtGui.QCursor().pos() - QtCore.QPoint(32, 74)) + + # --------------------- + # 2. TOP BAR + # --------------------- + # --- + # 2.1. Left buttons + self.change_btn = QtWidgets.QToolButton() + # self.exit_node_btn.setIcon(QtGui.QIcon(KS_DIR+"/KnobScripter/icons/icons8-delete-26.png")) + self.change_btn.setIcon(QtGui.QIcon(icons_path + "icon_pick.png")) + self.change_btn.setIconSize(self.qt_icon_size) + self.change_btn.setFixedSize(self.qt_btn_size) + self.change_btn.setToolTip( + "Change to node if selected. Otherwise, change to Script Mode.") + self.change_btn.clicked.connect(self.changeClicked) + + # --- + # 2.2.A. Node mode UI + self.exit_node_btn = QtWidgets.QToolButton() + self.exit_node_btn.setIcon(QtGui.QIcon( + icons_path + "icon_exitnode.png")) + self.exit_node_btn.setIconSize(self.qt_icon_size) + self.exit_node_btn.setFixedSize(self.qt_btn_size) + self.exit_node_btn.setToolTip( + "Exit the node, and change to Script Mode.") + self.exit_node_btn.clicked.connect(self.exitNodeMode) + self.current_node_label_node = QtWidgets.QLabel(" Node:") + self.current_node_label_name = QtWidgets.QLabel(self.node.fullName()) + self.current_node_label_name.setStyleSheet("font-weight:bold;") + self.current_knob_label = QtWidgets.QLabel("Knob: ") + self.current_knob_dropdown = QtWidgets.QComboBox() + self.current_knob_dropdown.setSizeAdjustPolicy( + QtWidgets.QComboBox.AdjustToContents) + self.updateKnobDropdown() + self.current_knob_dropdown.currentIndexChanged.connect( + lambda: self.loadKnobValue(False, updateDict=True)) + + # Layout + self.node_mode_bar_layout = QtWidgets.QHBoxLayout() + self.node_mode_bar_layout.addWidget(self.exit_node_btn) + self.node_mode_bar_layout.addSpacing(2) + self.node_mode_bar_layout.addWidget(self.current_node_label_node) + self.node_mode_bar_layout.addWidget(self.current_node_label_name) + self.node_mode_bar_layout.addSpacing(2) + self.node_mode_bar_layout.addWidget(self.current_knob_dropdown) + self.node_mode_bar = QtWidgets.QWidget() + self.node_mode_bar.setLayout(self.node_mode_bar_layout) + + self.node_mode_bar_layout.setContentsMargins(0, 0, 0, 0) + + # --- + # 2.2.B. Script mode UI + self.script_label = QtWidgets.QLabel("Script: ") + + self.current_folder_dropdown = QtWidgets.QComboBox() + self.current_folder_dropdown.setSizeAdjustPolicy( + QtWidgets.QComboBox.AdjustToContents) + self.current_folder_dropdown.currentIndexChanged.connect( + self.folderDropdownChanged) + # self.current_folder_dropdown.setEditable(True) + # self.current_folder_dropdown.lineEdit().setReadOnly(True) + # self.current_folder_dropdown.lineEdit().setAlignment(Qt.AlignRight) + + self.current_script_dropdown = QtWidgets.QComboBox() + self.current_script_dropdown.setSizeAdjustPolicy( + QtWidgets.QComboBox.AdjustToContents) + self.updateFoldersDropdown() + self.updateScriptsDropdown() + self.current_script_dropdown.currentIndexChanged.connect( + self.scriptDropdownChanged) + + # Layout + self.script_mode_bar_layout = QtWidgets.QHBoxLayout() + self.script_mode_bar_layout.addWidget(self.script_label) + self.script_mode_bar_layout.addSpacing(2) + self.script_mode_bar_layout.addWidget(self.current_folder_dropdown) + self.script_mode_bar_layout.addWidget(self.current_script_dropdown) + self.script_mode_bar = QtWidgets.QWidget() + self.script_mode_bar.setLayout(self.script_mode_bar_layout) + + self.script_mode_bar_layout.setContentsMargins(0, 0, 0, 0) + + # --- + # 2.3. File-system buttons + # Refresh dropdowns + self.refresh_btn = QtWidgets.QToolButton() + self.refresh_btn.setIcon(QtGui.QIcon(icons_path + "icon_refresh.png")) + self.refresh_btn.setIconSize(QtCore.QSize(50, 50)) + self.refresh_btn.setIconSize(self.qt_icon_size) + self.refresh_btn.setFixedSize(self.qt_btn_size) + self.refresh_btn.setToolTip("Refresh the dropdowns.\nShortcut: F5") + self.refresh_btn.setShortcut('F5') + self.refresh_btn.clicked.connect(self.refreshClicked) + + # Reload script + self.reload_btn = QtWidgets.QToolButton() + self.reload_btn.setIcon(QtGui.QIcon(icons_path + "icon_download.png")) + self.reload_btn.setIconSize(QtCore.QSize(50, 50)) + self.reload_btn.setIconSize(self.qt_icon_size) + self.reload_btn.setFixedSize(self.qt_btn_size) + self.reload_btn.setToolTip( + "Reload the current script. Will overwrite any changes made to it.\nShortcut: Ctrl+R") + self.reload_btn.setShortcut('Ctrl+R') + self.reload_btn.clicked.connect(self.reloadClicked) + + # Save script + self.save_btn = QtWidgets.QToolButton() + self.save_btn.setIcon(QtGui.QIcon(icons_path + "icon_save.png")) + self.save_btn.setIconSize(QtCore.QSize(50, 50)) + self.save_btn.setIconSize(self.qt_icon_size) + self.save_btn.setFixedSize(self.qt_btn_size) + self.save_btn.setToolTip( + "Save the script into the selected knob or python file.\nShortcut: Ctrl+S") + self.save_btn.setShortcut('Ctrl+S') + self.save_btn.clicked.connect(self.saveClicked) + + # Layout + self.top_file_bar_layout = QtWidgets.QHBoxLayout() + self.top_file_bar_layout.addWidget(self.refresh_btn) + self.top_file_bar_layout.addWidget(self.reload_btn) + self.top_file_bar_layout.addWidget(self.save_btn) + + # --- + # 2.4. Right Side buttons + + # Run script + self.run_script_button = QtWidgets.QToolButton() + self.run_script_button.setIcon( + QtGui.QIcon(icons_path + "icon_run.png")) + self.run_script_button.setIconSize(self.qt_icon_size) + # self.run_script_button.setIconSize(self.qt_icon_size) + self.run_script_button.setFixedSize(self.qt_btn_size) + self.run_script_button.setToolTip( + "Execute the current selection on the KnobScripter, or the whole script if no selection.\nShortcut: Ctrl+Enter") + self.run_script_button.clicked.connect(self.runScript) + + # Clear console + self.clear_console_button = QtWidgets.QToolButton() + self.clear_console_button.setIcon( + QtGui.QIcon(icons_path + "icon_clearConsole.png")) + self.clear_console_button.setIconSize(QtCore.QSize(50, 50)) + self.clear_console_button.setIconSize(self.qt_icon_size) + self.clear_console_button.setFixedSize(self.qt_btn_size) + self.clear_console_button.setToolTip( + "Clear the text in the console window.\nShortcut: Click Backspace on the console.") + self.clear_console_button.clicked.connect(self.clearConsole) + + # FindReplace button + self.find_button = QtWidgets.QToolButton() + self.find_button.setIcon(QtGui.QIcon(icons_path + "icon_search.png")) + self.find_button.setIconSize(self.qt_icon_size) + self.find_button.setFixedSize(self.qt_btn_size) + self.find_button.setToolTip( + "Call the snippets by writing the shortcut and pressing Tab.\nShortcut: Ctrl+F") + self.find_button.setShortcut('Ctrl+F') + #self.find_button.setMaximumWidth(self.find_button.fontMetrics().boundingRect("Find").width() + 20) + self.find_button.setCheckable(True) + self.find_button.setFocusPolicy(QtCore.Qt.NoFocus) + self.find_button.clicked[bool].connect(self.toggleFRW) + if self.frw_open: + self.find_button.toggle() + + # Snippets + self.snippets_button = QtWidgets.QToolButton() + self.snippets_button.setIcon( + QtGui.QIcon(icons_path + "icon_snippets.png")) + self.snippets_button.setIconSize(QtCore.QSize(50, 50)) + self.snippets_button.setIconSize(self.qt_icon_size) + self.snippets_button.setFixedSize(self.qt_btn_size) + self.snippets_button.setToolTip( + "Call the snippets by writing the shortcut and pressing Tab.") + self.snippets_button.clicked.connect(self.openSnippets) + + # PIN + ''' + self.pin_button = QtWidgets.QPushButton("P") + self.pin_button.setCheckable(True) + if self.pinned: + self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint) + self.pin_button.toggle() + self.pin_button.setToolTip("Toggle 'Always On Top'. Keeps the KnobScripter on top of all other windows.") + self.pin_button.setFocusPolicy(QtCore.Qt.NoFocus) + self.pin_button.setFixedSize(self.qt_btn_size) + self.pin_button.clicked[bool].connect(self.pin) + ''' + + # Prefs + self.createPrefsMenu() + self.prefs_button = QtWidgets.QPushButton() + self.prefs_button.setIcon(QtGui.QIcon(icons_path + "icon_prefs.png")) + self.prefs_button.setIconSize(self.qt_icon_size) + self.prefs_button.setFixedSize( + QtCore.QSize(self.btn_size + 10, self.btn_size)) + # self.prefs_button.clicked.connect(self.openPrefs) + self.prefs_button.setMenu(self.prefsMenu) + self.prefs_button.setStyleSheet("text-align:left;padding-left:2px;") + #self.prefs_button.setMaximumWidth(self.prefs_button.fontMetrics().boundingRect("Prefs").width() + 12) + + # Layout + self.top_right_bar_layout = QtWidgets.QHBoxLayout() + self.top_right_bar_layout.addWidget(self.run_script_button) + self.top_right_bar_layout.addWidget(self.clear_console_button) + self.top_right_bar_layout.addWidget(self.find_button) + # self.top_right_bar_layout.addWidget(self.snippets_button) + # self.top_right_bar_layout.addWidget(self.pin_button) + # self.top_right_bar_layout.addSpacing(10) + self.top_right_bar_layout.addWidget(self.prefs_button) + + # --- + # Layout + self.top_layout = QtWidgets.QHBoxLayout() + self.top_layout.setContentsMargins(0, 0, 0, 0) + # self.top_layout.setSpacing(10) + self.top_layout.addWidget(self.change_btn) + self.top_layout.addWidget(self.node_mode_bar) + self.top_layout.addWidget(self.script_mode_bar) + self.node_mode_bar.setVisible(False) + # self.top_layout.addSpacing(10) + self.top_layout.addLayout(self.top_file_bar_layout) + self.top_layout.addStretch() + self.top_layout.addLayout(self.top_right_bar_layout) + + # ---------------------- + # 3. SCRIPTING SECTION + # ---------------------- + # Splitter + self.splitter = QtWidgets.QSplitter(Qt.Vertical) + + # Output widget + self.script_output = ScriptOutputWidget(parent=self) + self.script_output.setReadOnly(1) + self.script_output.setAcceptRichText(0) + self.script_output.setTabStopWidth( + self.script_output.tabStopWidth() / 4) + self.script_output.setFocusPolicy(Qt.ClickFocus) + self.script_output.setAutoFillBackground(0) + self.script_output.installEventFilter(self) + + # Script Editor + self.script_editor = KnobScripterTextEditMain(self, self.script_output) + self.script_editor.setMinimumHeight(30) + self.script_editor.setStyleSheet( + 'background:#282828;color:#EEE;') # Main Colors + self.script_editor.textChanged.connect(self.setModified) + self.highlighter = KSScriptEditorHighlighter( + self.script_editor.document(), self) + self.script_editor.cursorPositionChanged.connect(self.setTextSelection) + self.script_editor_font = QtGui.QFont() + self.script_editor_font.setFamily(self.font) + self.script_editor_font.setStyleHint(QtGui.QFont.Monospace) + self.script_editor_font.setFixedPitch(True) + self.script_editor_font.setPointSize(self.fontSize) + self.script_editor.setFont(self.script_editor_font) + self.script_editor.setTabStopWidth( + self.tabSpaces * QtGui.QFontMetrics(self.script_editor_font).width(' ')) + + # Add input and output to splitter + self.splitter.addWidget(self.script_output) + self.splitter.addWidget(self.script_editor) + self.splitter.setStretchFactor(0, 0) + + # FindReplace widget + self.frw = FindReplaceWidget(self) + self.frw.setVisible(self.frw_open) + + # --- + # Layout + self.scripting_layout = QtWidgets.QVBoxLayout() + self.scripting_layout.setContentsMargins(0, 0, 0, 0) + self.scripting_layout.setSpacing(0) + self.scripting_layout.addWidget(self.splitter) + self.scripting_layout.addWidget(self.frw) + + # --------------- + # MASTER LAYOUT + # --------------- + self.master_layout = QtWidgets.QVBoxLayout() + self.master_layout.setSpacing(5) + self.master_layout.setContentsMargins(8, 8, 8, 8) + self.master_layout.addLayout(self.top_layout) + self.master_layout.addLayout(self.scripting_layout) + # self.master_layout.addLayout(self.bottom_layout) + self.setLayout(self.master_layout) + + # ---------------- + # MAIN WINDOW UI + # ---------------- + size_policy = QtWidgets.QSizePolicy( + QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Minimum) + self.setSizePolicy(size_policy) + self.setMinimumWidth(160) + + if self.pinned: + self.setWindowFlags(self.windowFlags() | + QtCore.Qt.WindowStaysOnTopHint) + + # Set default values based on mode + if self.nodeMode: + self.current_knob_dropdown.blockSignals(True) + self.node_mode_bar.setVisible(True) + self.script_mode_bar.setVisible(False) + self.setCurrentKnob(self.knob) + self.loadKnobValue(check=False) + self.setKnobModified(False) + self.current_knob_dropdown.blockSignals(False) + self.splitter.setSizes([0, 1]) + else: + self.exitNodeMode() + self.script_editor.setFocus() + + # Preferences submenus + def createPrefsMenu(self): + + # Actions + self.echoAct = QtWidgets.QAction("Echo python commands", self, checkable=True, + statusTip="Toggle nuke's 'Echo all python commands to ScriptEditor'", triggered=self.toggleEcho) + if nuke.toNode("preferences").knob("echoAllCommands").value(): + self.echoAct.toggle() + self.pinAct = QtWidgets.QAction("Always on top", self, checkable=True, + statusTip="Keeps the KnobScripter window always on top or not.", triggered=self.togglePin) + if self.pinned: + self.setWindowFlags(self.windowFlags() | + QtCore.Qt.WindowStaysOnTopHint) + self.pinAct.toggle() + self.helpAct = QtWidgets.QAction( + "&Help", self, statusTip="Open the KnobScripter help in your browser.", shortcut="F1", triggered=self.showHelp) + self.nukepediaAct = QtWidgets.QAction( + "Show in Nukepedia", self, statusTip="Open the KnobScripter download page on Nukepedia.", triggered=self.showInNukepedia) + self.githubAct = QtWidgets.QAction( + "Show in GitHub", self, statusTip="Open the KnobScripter repo on GitHub.", triggered=self.showInGithub) + self.snippetsAct = QtWidgets.QAction( + "Snippets", self, statusTip="Open the Snippets editor.", triggered=self.openSnippets) + self.snippetsAct.setIcon(QtGui.QIcon(icons_path + "icon_snippets.png")) + # self.snippetsAct = QtWidgets.QAction("Keywords", self, statusTip="Add custom keywords.", triggered=self.openSnippets) #TODO THIS + self.prefsAct = QtWidgets.QAction( + "Preferences", self, statusTip="Open the Preferences panel.", triggered=self.openPrefs) + self.prefsAct.setIcon(QtGui.QIcon(icons_path + "icon_prefs.png")) + + # Menus + self.prefsMenu = QtWidgets.QMenu("Preferences") + self.prefsMenu.addAction(self.echoAct) + self.prefsMenu.addAction(self.pinAct) + self.prefsMenu.addSeparator() + self.prefsMenu.addAction(self.nukepediaAct) + self.prefsMenu.addAction(self.githubAct) + self.prefsMenu.addSeparator() + self.prefsMenu.addAction(self.helpAct) + self.prefsMenu.addSeparator() + self.prefsMenu.addAction(self.snippetsAct) + self.prefsMenu.addAction(self.prefsAct) + + def initEcho(self): + ''' Initializes the echo chechable QAction based on nuke's state ''' + echo_knob = nuke.toNode("preferences").knob("echoAllCommands") + self.echoAct.setChecked(echo_knob.value()) + + def toggleEcho(self): + ''' Toggle the "Echo python commands" from Nuke ''' + echo_knob = nuke.toNode("preferences").knob("echoAllCommands") + echo_knob.setValue(self.echoAct.isChecked()) + + def togglePin(self): + ''' Toggle "always on top" based on the submenu button ''' + self.pin(self.pinAct.isChecked()) + + def showInNukepedia(self): + openUrl("http://www.nukepedia.com/python/ui/knobscripter") + + def showInGithub(self): + openUrl("https://github.com/adrianpueyo/KnobScripter") + + def showHelp(self): + openUrl("https://vimeo.com/adrianpueyo/knobscripter2") + + # Node Mode + + def updateKnobDropdown(self): + ''' Populate knob dropdown list ''' + self.current_knob_dropdown.clear() # First remove all items + defaultKnobs = ["knobChanged", "onCreate", "onScriptLoad", "onScriptSave", "onScriptClose", "onDestroy", + "updateUI", "autolabel", "beforeRender", "beforeFrameRender", "afterFrameRender", "afterRender"] + permittedKnobClasses = ["PyScript_Knob", "PythonCustomKnob"] + counter = 0 + for i in self.node.knobs(): + if i not in defaultKnobs and self.node.knob(i).Class() in permittedKnobClasses: + if self.show_labels: + i_full = "{} ({})".format(self.node.knob(i).label(), i) + else: + i_full = i + + if i in self.unsavedKnobs.keys(): + self.current_knob_dropdown.addItem(i_full + "(*)", i) + else: + self.current_knob_dropdown.addItem(i_full, i) + + counter += 1 + if counter > 0: + self.current_knob_dropdown.insertSeparator(counter) + counter += 1 + self.current_knob_dropdown.insertSeparator(counter) + counter += 1 + for i in self.node.knobs(): + if i in defaultKnobs: + if i in self.unsavedKnobs.keys(): + self.current_knob_dropdown.addItem(i + "(*)", i) + else: + self.current_knob_dropdown.addItem(i, i) + counter += 1 + return + + def loadKnobValue(self, check=True, updateDict=False): + ''' Get the content of the knob value and populate the editor ''' + if self.toLoadKnob == False: + return + dropdown_value = self.current_knob_dropdown.itemData( + self.current_knob_dropdown.currentIndex()) # knobChanged... + try: + obtained_knobValue = str(self.node[dropdown_value].value()) + obtained_scrollValue = 0 + edited_knobValue = self.script_editor.toPlainText() + except: + error_message = QtWidgets.QMessageBox.information( + None, "", "Unable to find %s.%s" % (self.node.name(), dropdown_value)) + error_message.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + error_message.exec_() + return + # If there were changes to the previous knob, update the dictionary + if updateDict == True: + self.unsavedKnobs[self.knob] = edited_knobValue + self.scrollPos[self.knob] = self.script_editor.verticalScrollBar( + ).value() + prev_knob = self.knob # knobChanged... + + self.knob = self.current_knob_dropdown.itemData( + self.current_knob_dropdown.currentIndex()) # knobChanged... + + if check and obtained_knobValue != edited_knobValue: + msgBox = QtWidgets.QMessageBox() + msgBox.setText("The Script Editor has been modified.") + msgBox.setInformativeText( + "Do you want to overwrite the current code on this editor?") + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + self.setCurrentKnob(prev_knob) + return + # If order comes from a dropdown update, update value from dictionary if possible, otherwise update normally + self.setWindowTitle("KnobScripter - %s %s" % + (self.node.name(), self.knob)) + if updateDict: + if self.knob in self.unsavedKnobs: + if self.unsavedKnobs[self.knob] == obtained_knobValue: + self.script_editor.setPlainText(obtained_knobValue) + self.setKnobModified(False) + else: + obtained_knobValue = self.unsavedKnobs[self.knob] + self.script_editor.setPlainText(obtained_knobValue) + self.setKnobModified(True) + else: + self.script_editor.setPlainText(obtained_knobValue) + self.setKnobModified(False) + + if self.knob in self.scrollPos: + obtained_scrollValue = self.scrollPos[self.knob] + else: + self.script_editor.setPlainText(obtained_knobValue) + + cursor = self.script_editor.textCursor() + self.script_editor.setTextCursor(cursor) + self.script_editor.verticalScrollBar().setValue(obtained_scrollValue) + return + + def loadAllKnobValues(self): + ''' Load all knobs button's function ''' + if len(self.unsavedKnobs) >= 1: + msgBox = QtWidgets.QMessageBox() + msgBox.setText( + "Do you want to reload all python and callback knobs?") + msgBox.setInformativeText( + "Unsaved changes on this editor will be lost.") + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + return + self.unsavedKnobs = {} + return + + def saveKnobValue(self, check=True): + ''' Save the text from the editor to the node's knobChanged knob ''' + dropdown_value = self.current_knob_dropdown.itemData( + self.current_knob_dropdown.currentIndex()) + try: + obtained_knobValue = str(self.node[dropdown_value].value()) + self.knob = dropdown_value + except: + error_message = QtWidgets.QMessageBox.information( + None, "", "Unable to find %s.%s" % (self.node.name(), dropdown_value)) + error_message.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + error_message.exec_() + return + edited_knobValue = self.script_editor.toPlainText() + if check and obtained_knobValue != edited_knobValue: + msgBox = QtWidgets.QMessageBox() + msgBox.setText("Do you want to overwrite %s.%s?" % + (self.node.name(), dropdown_value)) + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + return + self.node[dropdown_value].setValue(edited_knobValue) + self.setKnobModified( + modified=False, knob=dropdown_value, changeTitle=True) + nuke.tcl("modified 1") + if self.knob in self.unsavedKnobs: + del self.unsavedKnobs[self.knob] + return + + def saveAllKnobValues(self, check=True): + ''' Save all knobs button's function ''' + if self.updateUnsavedKnobs() > 0 and check: + msgBox = QtWidgets.QMessageBox() + msgBox.setText( + "Do you want to save all modified python and callback knobs?") + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + return + saveErrors = 0 + savedCount = 0 + for k in self.unsavedKnobs.copy(): + try: + self.node.knob(k).setValue(self.unsavedKnobs[k]) + del self.unsavedKnobs[k] + savedCount += 1 + nuke.tcl("modified 1") + except: + saveErrors += 1 + if saveErrors > 0: + errorBox = QtWidgets.QMessageBox() + errorBox.setText("Error saving %s knob%s." % + (str(saveErrors), int(saveErrors > 1) * "s")) + errorBox.setIcon(QtWidgets.QMessageBox.Warning) + errorBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + errorBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = errorBox.exec_() + else: + log("KnobScripter: %s knobs saved" % str(savedCount)) + return + + def setCurrentKnob(self, knobToSet): + ''' Set current knob ''' + KnobDropdownItems = [] + for i in range(self.current_knob_dropdown.count()): + if self.current_knob_dropdown.itemData(i) is not None: + KnobDropdownItems.append( + self.current_knob_dropdown.itemData(i)) + else: + KnobDropdownItems.append("---") + if knobToSet in KnobDropdownItems: + index = KnobDropdownItems.index(knobToSet) + self.current_knob_dropdown.setCurrentIndex(index) + return + + def updateUnsavedKnobs(self, first_time=False): + ''' Clear unchanged knobs from the dict and return the number of unsaved knobs ''' + if not self.node: + # Node has been deleted, so simply return 0. Who cares. + return 0 + edited_knobValue = self.script_editor.toPlainText() + self.unsavedKnobs[self.knob] = edited_knobValue + if len(self.unsavedKnobs) > 0: + for k in self.unsavedKnobs.copy(): + if self.node.knob(k): + if str(self.node.knob(k).value()) == str(self.unsavedKnobs[k]): + del self.unsavedKnobs[k] + else: + del self.unsavedKnobs[k] + # Set appropriate knobs modified... + knobs_dropdown = self.current_knob_dropdown + all_knobs = [knobs_dropdown.itemData(i) + for i in range(knobs_dropdown.count())] + for key in all_knobs: + if key in self.unsavedKnobs.keys(): + self.setKnobModified( + modified=True, knob=key, changeTitle=False) + else: + self.setKnobModified( + modified=False, knob=key, changeTitle=False) + + return len(self.unsavedKnobs) + + def setKnobModified(self, modified=True, knob="", changeTitle=True): + ''' Sets the current knob modified, title and whatever else we need ''' + if knob == "": + knob = self.knob + if modified: + self.modifiedKnobs.add(knob) + else: + self.modifiedKnobs.discard(knob) + + if changeTitle: + title_modified_string = " [modified]" + windowTitle = self.windowTitle().split(title_modified_string)[0] + if modified == True: + windowTitle += title_modified_string + self.setWindowTitle(windowTitle) + + try: + knobs_dropdown = self.current_knob_dropdown + kd_index = knobs_dropdown.currentIndex() + kd_data = knobs_dropdown.itemData(kd_index) + if self.show_labels and i not in defaultKnobs: + kd_data = "{} ({})".format( + self.node.knob(kd_data).label(), kd_data) + if modified == False: + knobs_dropdown.setItemText(kd_index, kd_data) + else: + knobs_dropdown.setItemText(kd_index, kd_data + "(*)") + except: + pass + + # Script Mode + def updateFoldersDropdown(self): + ''' Populate folders dropdown list ''' + self.current_folder_dropdown.blockSignals(True) + self.current_folder_dropdown.clear() # First remove all items + defaultFolders = ["scripts"] + scriptFolders = [] + counter = 0 + for f in defaultFolders: + self.makeScriptFolder(f) + self.current_folder_dropdown.addItem(f + "/", f) + counter += 1 + + try: + scriptFolders = sorted([f for f in os.listdir(self.scripts_dir) if os.path.isdir( + os.path.join(self.scripts_dir, f))]) # Accepts symlinks!!! + except: + log("Couldn't read any script folders.") + + for f in scriptFolders: + fname = f.split("/")[-1] + if fname in defaultFolders: + continue + self.current_folder_dropdown.addItem(fname + "/", fname) + counter += 1 + + # print scriptFolders + if counter > 0: + self.current_folder_dropdown.insertSeparator(counter) + counter += 1 + # self.current_folder_dropdown.insertSeparator(counter) + #counter += 1 + self.current_folder_dropdown.addItem("New", "create new") + self.current_folder_dropdown.addItem("Open...", "open in browser") + self.current_folder_dropdown.addItem("Add custom", "add custom path") + self.folder_index = self.current_folder_dropdown.currentIndex() + self.current_folder = self.current_folder_dropdown.itemData( + self.folder_index) + self.current_folder_dropdown.blockSignals(False) + return + + def updateScriptsDropdown(self): + ''' Populate py scripts dropdown list ''' + self.current_script_dropdown.blockSignals(True) + self.current_script_dropdown.clear() # First remove all items + QtWidgets.QApplication.processEvents() + log("# Updating scripts dropdown...") + log("scripts dir:" + self.scripts_dir) + log("current folder:" + self.current_folder) + log("previous current script:" + self.current_script) + #current_folder = self.current_folder_dropdown.itemData(self.current_folder_dropdown.currentIndex()) + current_folder_path = os.path.join( + self.scripts_dir, self.current_folder) + defaultScripts = ["Untitled.py"] + found_scripts = [] + counter = 0 + # All files and folders inside of the folder + dir_list = os.listdir(current_folder_path) + try: + found_scripts = sorted([f for f in dir_list if f.endswith(".py")]) + found_temp_scripts = [ + f for f in dir_list if f.endswith(".py.autosave")] + except: + log("Couldn't find any scripts in the selected folder.") + if not len(found_scripts): + for s in defaultScripts: + if s + ".autosave" in found_temp_scripts: + self.current_script_dropdown.addItem(s + "(*)", s) + else: + self.current_script_dropdown.addItem(s, s) + counter += 1 + else: + for s in defaultScripts: + if s + ".autosave" in found_temp_scripts: + self.current_script_dropdown.addItem(s + "(*)", s) + elif s in found_scripts: + self.current_script_dropdown.addItem(s, s) + for s in found_scripts: + if s in defaultScripts: + continue + sname = s.split("/")[-1] + if s + ".autosave" in found_temp_scripts: + self.current_script_dropdown.addItem(sname + "(*)", sname) + else: + self.current_script_dropdown.addItem(sname, sname) + counter += 1 + # else: #Add the found scripts to the dropdown + if counter > 0: + counter += 1 + self.current_script_dropdown.insertSeparator(counter) + counter += 1 + self.current_script_dropdown.insertSeparator(counter) + self.current_script_dropdown.addItem("New", "create new") + self.current_script_dropdown.addItem("Duplicate", "create duplicate") + self.current_script_dropdown.addItem("Delete", "delete script") + self.current_script_dropdown.addItem("Open", "open in browser") + #self.script_index = self.current_script_dropdown.currentIndex() + self.script_index = 0 + self.current_script = self.current_script_dropdown.itemData( + self.script_index) + log("Finished updating scripts dropdown.") + log("current_script:" + self.current_script) + self.current_script_dropdown.blockSignals(False) + return + + def makeScriptFolder(self, name="scripts"): + folder_path = os.path.join(self.scripts_dir, name) + if not os.path.exists(folder_path): + try: + os.makedirs(folder_path) + return True + except: + print "Couldn't create the scripting folders.\nPlease check your OS write permissions." + return False + + def makeScriptFile(self, name="Untitled.py", folder="scripts", empty=True): + script_path = os.path.join(self.scripts_dir, self.current_folder, name) + if not os.path.isfile(script_path): + try: + self.current_script_file = open(script_path, 'w') + return True + except: + print "Couldn't create the scripting folders.\nPlease check your OS write permissions." + return False + + def setCurrentFolder(self, folderName): + ''' Set current folder ON THE DROPDOWN ONLY''' + folderList = [self.current_folder_dropdown.itemData( + i) for i in range(self.current_folder_dropdown.count())] + if folderName in folderList: + index = folderList.index(folderName) + self.current_folder_dropdown.setCurrentIndex(index) + self.current_folder = folderName + self.folder_index = self.current_folder_dropdown.currentIndex() + self.current_folder = self.current_folder_dropdown.itemData( + self.folder_index) + return + + def setCurrentScript(self, scriptName): + ''' Set current script ON THE DROPDOWN ONLY ''' + scriptList = [self.current_script_dropdown.itemData( + i) for i in range(self.current_script_dropdown.count())] + if scriptName in scriptList: + index = scriptList.index(scriptName) + self.current_script_dropdown.setCurrentIndex(index) + self.current_script = scriptName + self.script_index = self.current_script_dropdown.currentIndex() + self.current_script = self.current_script_dropdown.itemData( + self.script_index) + return + + def loadScriptContents(self, check=False, pyOnly=False, folder=""): + ''' Get the contents of the selected script and populate the editor ''' + log("# About to load script contents now.") + obtained_scrollValue = 0 + obtained_cursorPosValue = [0, 0] # Position, anchor + if folder == "": + folder = self.current_folder + script_path = os.path.join( + self.scripts_dir, folder, self.current_script) + script_path_temp = script_path + ".autosave" + if (self.current_folder + "/" + self.current_script) in self.scrollPos: + obtained_scrollValue = self.scrollPos[self.current_folder + + "/" + self.current_script] + if (self.current_folder + "/" + self.current_script) in self.cursorPos: + obtained_cursorPosValue = self.cursorPos[self.current_folder + + "/" + self.current_script] + + # 1: If autosave exists and pyOnly is false, load it + if os.path.isfile(script_path_temp) and not pyOnly: + log("Loading .py.autosave file\n---") + with open(script_path_temp, 'r') as script: + content = script.read() + self.script_editor.setPlainText(content) + self.setScriptModified(True) + self.script_editor.verticalScrollBar().setValue(obtained_scrollValue) + + # 2: Try to load the .py as first priority, if it exists + elif os.path.isfile(script_path): + log("Loading .py file\n---") + with open(script_path, 'r') as script: + content = script.read() + current_text = self.script_editor.toPlainText().encode("utf8") + if check and current_text != content and current_text.strip() != "": + msgBox = QtWidgets.QMessageBox() + msgBox.setText("The script has been modified.") + msgBox.setInformativeText( + "Do you want to overwrite the current code on this editor?") + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + return + # Clear trash + if os.path.isfile(script_path_temp): + os.remove(script_path_temp) + log("Removed " + script_path_temp) + self.setScriptModified(False) + self.script_editor.setPlainText(content) + self.script_editor.verticalScrollBar().setValue(obtained_scrollValue) + self.setScriptModified(False) + self.loadScriptState() + self.setScriptState() + + # 3: If .py doesn't exist... only then stick to the autosave + elif os.path.isfile(script_path_temp): + with open(script_path_temp, 'r') as script: + content = script.read() + + msgBox = QtWidgets.QMessageBox() + msgBox.setText("The .py file hasn't been found.") + msgBox.setInformativeText( + "Do you want to clear the current code on this editor?") + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + return + + # Clear trash + os.remove(script_path_temp) + log("Removed " + script_path_temp) + self.script_editor.setPlainText("") + self.updateScriptsDropdown() + self.loadScriptContents(check=False) + self.loadScriptState() + self.setScriptState() + + else: + content = "" + self.script_editor.setPlainText(content) + self.setScriptModified(False) + if self.current_folder + "/" + self.current_script in self.scrollPos: + del self.scrollPos[self.current_folder + + "/" + self.current_script] + if self.current_folder + "/" + self.current_script in self.cursorPos: + del self.cursorPos[self.current_folder + + "/" + self.current_script] + + self.setWindowTitle("KnobScripter - %s/%s" % + (self.current_folder, self.current_script)) + return + + def saveScriptContents(self, temp=True): + ''' Save the current contents of the editor into the python file. If temp == True, saves a .py.autosave file ''' + log("\n# About to save script contents now.") + log("Temp mode is: " + str(temp)) + log("self.current_folder: " + self.current_folder) + log("self.current_script: " + self.current_script) + script_path = os.path.join( + self.scripts_dir, self.current_folder, self.current_script) + script_path_temp = script_path + ".autosave" + orig_content = "" + content = self.script_editor.toPlainText().encode('utf8') + + if temp == True: + if os.path.isfile(script_path): + with open(script_path, 'r') as script: + orig_content = script.read() + # If script path doesn't exist and autosave does but the script is empty... + elif content == "" and os.path.isfile(script_path_temp): + os.remove(script_path_temp) + return + if content != orig_content: + with open(script_path_temp, 'w') as script: + script.write(content) + else: + if os.path.isfile(script_path_temp): + os.remove(script_path_temp) + log("Nothing to save") + return + else: + with open(script_path, 'w') as script: + script.write(self.script_editor.toPlainText().encode('utf8')) + # Clear trash + if os.path.isfile(script_path_temp): + os.remove(script_path_temp) + log("Removed " + script_path_temp) + self.setScriptModified(False) + self.saveScrollValue() + self.saveCursorPosValue() + log("Saved " + script_path + "\n---") + return + + def deleteScript(self, check=True, folder=""): + ''' Get the contents of the selected script and populate the editor ''' + log("# About to delete the .py and/or autosave script now.") + if folder == "": + folder = self.current_folder + script_path = os.path.join( + self.scripts_dir, folder, self.current_script) + script_path_temp = script_path + ".autosave" + if check: + msgBox = QtWidgets.QMessageBox() + msgBox.setText("You're about to delete this script.") + msgBox.setInformativeText( + "Are you sure you want to delete {}?".format(self.current_script)) + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) + msgBox.setIcon(QtWidgets.QMessageBox.Question) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.No) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.No: + return False + + if os.path.isfile(script_path_temp): + os.remove(script_path_temp) + log("Removed " + script_path_temp) + + if os.path.isfile(script_path): + os.remove(script_path) + log("Removed " + script_path) + + return True + + def folderDropdownChanged(self): + '''Executed when the current folder dropdown is changed''' + self.saveScriptState() + log("# folder dropdown changed") + folders_dropdown = self.current_folder_dropdown + fd_value = folders_dropdown.currentText() + fd_index = folders_dropdown.currentIndex() + fd_data = folders_dropdown.itemData(fd_index) + if fd_data == "create new": + panel = FileNameDialog(self, mode="folder") + # panel.setWidth(260) + # panel.addSingleLineInput("Name:","") + if panel.exec_(): + # Accepted + folder_name = panel.text + if os.path.isdir(os.path.join(self.scripts_dir, folder_name)): + self.messageBox("Folder already exists.") + self.setCurrentFolder(self.current_folder) + if self.makeScriptFolder(name=folder_name): + self.saveScriptContents(temp=True) + # Success creating the folder + self.current_folder = folder_name + self.updateFoldersDropdown() + self.setCurrentFolder(folder_name) + self.updateScriptsDropdown() + self.loadScriptContents(check=False) + else: + self.messageBox("There was a problem creating the folder.") + self.current_folder_dropdown.blockSignals(True) + self.current_folder_dropdown.setCurrentIndex( + self.folder_index) + self.current_folder_dropdown.blockSignals(False) + else: + # Canceled/rejected + self.current_folder_dropdown.blockSignals(True) + self.current_folder_dropdown.setCurrentIndex(self.folder_index) + self.current_folder_dropdown.blockSignals(False) + return + + elif fd_data == "open in browser": + current_folder_path = os.path.join( + self.scripts_dir, self.current_folder) + self.openInFileBrowser(current_folder_path) + self.current_folder_dropdown.blockSignals(True) + self.current_folder_dropdown.setCurrentIndex(self.folder_index) + self.current_folder_dropdown.blockSignals(False) + return + + elif fd_data == "add custom path": + folder_path = nuke.getFilename('Select custom folder.') + if folder_path is not None: + if folder_path.endswith("/"): + aliasName = folder_path.split("/")[-2] + else: + aliasName = folder_path.split("/")[-1] + if not os.path.isdir(folder_path): + self.messageBox( + "Folder not found. Please try again with the full path to a folder.") + elif not len(aliasName): + self.messageBox( + "Folder with the same name already exists. Please delete or rename it first.") + else: + # All good + os.symlink(folder_path, os.path.join( + self.scripts_dir, aliasName)) + self.saveScriptContents(temp=True) + self.current_folder = aliasName + self.updateFoldersDropdown() + self.setCurrentFolder(aliasName) + self.updateScriptsDropdown() + self.loadScriptContents(check=False) + self.script_editor.setFocus() + return + self.current_folder_dropdown.blockSignals(True) + self.current_folder_dropdown.setCurrentIndex(self.folder_index) + self.current_folder_dropdown.blockSignals(False) + else: + # 1: Save current script as temp if needed + self.saveScriptContents(temp=True) + # 2: Set the new folder in the variables + self.current_folder = fd_data + self.folder_index = fd_index + # 3: Update the scripts dropdown + self.updateScriptsDropdown() + # 4: Load the current script! + self.loadScriptContents() + self.script_editor.setFocus() + + self.loadScriptState() + self.setScriptState() + + return + + def scriptDropdownChanged(self): + '''Executed when the current script dropdown is changed. Should only be called by the manual dropdown change. Not by other functions.''' + self.saveScriptState() + scripts_dropdown = self.current_script_dropdown + sd_value = scripts_dropdown.currentText() + sd_index = scripts_dropdown.currentIndex() + sd_data = scripts_dropdown.itemData(sd_index) + if sd_data == "create new": + self.current_script_dropdown.blockSignals(True) + panel = FileNameDialog(self, mode="script") + if panel.exec_(): + # Accepted + script_name = panel.text + ".py" + script_path = os.path.join( + self.scripts_dir, self.current_folder, script_name) + log(script_name) + log(script_path) + if os.path.isfile(script_path): + self.messageBox("Script already exists.") + self.current_script_dropdown.setCurrentIndex( + self.script_index) + if self.makeScriptFile(name=script_name, folder=self.current_folder): + # Success creating the folder + self.saveScriptContents(temp=True) + self.updateScriptsDropdown() + if self.current_script != "Untitled.py": + self.script_editor.setPlainText("") + self.current_script = script_name + self.setCurrentScript(script_name) + self.saveScriptContents(temp=False) + # self.loadScriptContents() + else: + self.messageBox("There was a problem creating the script.") + self.current_script_dropdown.setCurrentIndex( + self.script_index) + else: + # Canceled/rejected + self.current_script_dropdown.setCurrentIndex(self.script_index) + return + self.current_script_dropdown.blockSignals(False) + + elif sd_data == "create duplicate": + self.current_script_dropdown.blockSignals(True) + current_folder_path = os.path.join( + self.scripts_dir, self.current_folder, self.current_script) + current_script_path = os.path.join( + self.scripts_dir, self.current_folder, self.current_script) + + current_name = self.current_script + if self.current_script.endswith(".py"): + current_name = current_name[:-3] + + test_name = current_name + while True: + test_name += "_copy" + new_script_path = os.path.join( + self.scripts_dir, self.current_folder, test_name + ".py") + if not os.path.isfile(new_script_path): + break + + script_name = test_name + ".py" + + if self.makeScriptFile(name=script_name, folder=self.current_folder): + # Success creating the folder + self.saveScriptContents(temp=True) + self.updateScriptsDropdown() + # self.script_editor.setPlainText("") + self.current_script = script_name + self.setCurrentScript(script_name) + self.script_editor.setFocus() + else: + self.messageBox("There was a problem duplicating the script.") + self.current_script_dropdown.setCurrentIndex(self.script_index) + + self.current_script_dropdown.blockSignals(False) + + elif sd_data == "open in browser": + current_script_path = os.path.join( + self.scripts_dir, self.current_folder, self.current_script) + self.openInFileBrowser(current_script_path) + self.current_script_dropdown.blockSignals(True) + self.current_script_dropdown.setCurrentIndex(self.script_index) + self.current_script_dropdown.blockSignals(False) + return + + elif sd_data == "delete script": + if self.deleteScript(): + self.updateScriptsDropdown() + self.loadScriptContents() + else: + self.current_script_dropdown.blockSignals(True) + self.current_script_dropdown.setCurrentIndex(self.script_index) + self.current_script_dropdown.blockSignals(False) + + else: + self.saveScriptContents() + self.current_script = sd_data + self.script_index = sd_index + self.setCurrentScript(self.current_script) + self.loadScriptContents() + self.script_editor.setFocus() + self.loadScriptState() + self.setScriptState() + return + + def setScriptModified(self, modified=True): + ''' Sets self.current_script_modified, title and whatever else we need ''' + self.current_script_modified = modified + title_modified_string = " [modified]" + windowTitle = self.windowTitle().split(title_modified_string)[0] + if modified == True: + windowTitle += title_modified_string + self.setWindowTitle(windowTitle) + try: + scripts_dropdown = self.current_script_dropdown + sd_index = scripts_dropdown.currentIndex() + sd_data = scripts_dropdown.itemData(sd_index) + if modified == False: + scripts_dropdown.setItemText(sd_index, sd_data) + else: + scripts_dropdown.setItemText(sd_index, sd_data + "(*)") + except: + pass + + def openInFileBrowser(self, path=""): + OS = platform.system() + if not os.path.exists(path): + path = KS_DIR + if OS == "Windows": + os.startfile(path) + elif OS == "Darwin": + subprocess.Popen(["open", path]) + else: + subprocess.Popen(["xdg-open", path]) + + def loadScriptState(self): + ''' + Loads the last state of the script from a file inside the SE directory's root. + SAVES self.scroll_pos, self.cursor_pos, self.last_open_script + ''' + self.state_dict = {} + if not os.path.isfile(self.state_txt_path): + return False + else: + with open(self.state_txt_path, "r") as f: + self.state_dict = json.load(f) + + log("Loading script state into self.state_dict, self.scrollPos, self.cursorPos") + log(self.state_dict) + + if "scroll_pos" in self.state_dict: + self.scrollPos = self.state_dict["scroll_pos"] + if "cursor_pos" in self.state_dict: + self.cursorPos = self.state_dict["cursor_pos"] + + def setScriptState(self): + ''' + Sets the already script state from self.state_dict into the current script if applicable + ''' + script_fullname = self.current_folder + "/" + self.current_script + + if "scroll_pos" in self.state_dict: + if script_fullname in self.state_dict["scroll_pos"]: + self.script_editor.verticalScrollBar().setValue( + int(self.state_dict["scroll_pos"][script_fullname])) + + if "cursor_pos" in self.state_dict: + if script_fullname in self.state_dict["cursor_pos"]: + cursor = self.script_editor.textCursor() + cursor.setPosition(int( + self.state_dict["cursor_pos"][script_fullname][1]), QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(int( + self.state_dict["cursor_pos"][script_fullname][0]), QtGui.QTextCursor.KeepAnchor) + self.script_editor.setTextCursor(cursor) + + if 'splitter_sizes' in self.state_dict: + self.splitter.setSizes(self.state_dict['splitter_sizes']) + + def setLastScript(self): + if 'last_folder' in self.state_dict and 'last_script' in self.state_dict: + self.updateFoldersDropdown() + self.setCurrentFolder(self.state_dict['last_folder']) + self.updateScriptsDropdown() + self.setCurrentScript(self.state_dict['last_script']) + self.loadScriptContents() + self.script_editor.setFocus() + + def saveScriptState(self): + ''' Stores the current state of the script into a file inside the SE directory's root ''' + log("About to save script state...") + ''' + # self.state_dict = {} + if os.path.isfile(self.state_txt_path): + with open(self.state_txt_path, "r") as f: + self.state_dict = json.load(f) + + if "scroll_pos" in self.state_dict: + self.scrollPos = self.state_dict["scroll_pos"] + if "cursor_pos" in self.state_dict: + self.cursorPos = self.state_dict["cursor_pos"] + + ''' + self.loadScriptState() + + # Overwrite current values into the scriptState + self.saveScrollValue() + self.saveCursorPosValue() + + self.state_dict['scroll_pos'] = self.scrollPos + self.state_dict['cursor_pos'] = self.cursorPos + self.state_dict['last_folder'] = self.current_folder + self.state_dict['last_script'] = self.current_script + self.state_dict['splitter_sizes'] = self.splitter.sizes() + + with open(self.state_txt_path, "w") as f: + state = json.dump(self.state_dict, f, sort_keys=True, indent=4) + return state + + # Autosave background loop + def autosave(self): + if self.toAutosave: + # Save the script... + self.saveScriptContents() + self.toAutosave = False + self.saveScriptState() + log("autosaving...") + return + + # Global stuff + def setTextSelection(self): + self.highlighter.selected_text = self.script_editor.textCursor().selection().toPlainText() + return + + def eventFilter(self, object, event): + if event.type() == QtCore.QEvent.KeyPress: + return QtWidgets.QWidget.eventFilter(self, object, event) + else: + return QtWidgets.QWidget.eventFilter(self, object, event) + + def resizeEvent(self, res_event): + w = self.frameGeometry().width() + self.current_node_label_node.setVisible(w > 460) + self.script_label.setVisible(w > 460) + return super(KnobScripter, self).resizeEvent(res_event) + + def changeClicked(self, newNode=""): + ''' Change node ''' + try: + print "Changing from " + self.node.name() + except: + self.node = None + if not len(nuke.selectedNodes()): + self.exitNodeMode() + return + nuke.menu("Nuke").findItem( + "Edit/Node/Update KnobScripter Context").invoke() + selection = knobScripterSelectedNodes + if self.nodeMode: # Only update the number of unsaved knobs if we were already in node mode + if self.node is not None: + updatedCount = self.updateUnsavedKnobs() + else: + updatedCount = 0 + else: + updatedCount = 0 + self.autosave() + if newNode != "" and nuke.exists(newNode): + selection = [newNode] + elif not len(selection): + node_dialog = ChooseNodeDialog(self) + if node_dialog.exec_(): + # Accepted + selection = [nuke.toNode(node_dialog.name)] + else: + return + + # Change to node mode... + self.node_mode_bar.setVisible(True) + self.script_mode_bar.setVisible(False) + if not self.nodeMode: + self.saveScriptContents() + self.toAutosave = False + self.saveScriptState() + self.splitter.setSizes([0, 1]) + self.nodeMode = True + + # If already selected, pass + if self.node is not None and selection[0].fullName() == self.node.fullName(): + self.messageBox("Please select a different node first!") + return + elif updatedCount > 0: + msgBox = QtWidgets.QMessageBox() + msgBox.setText( + "Save changes to %s knob%s before changing the node?" % (str(updatedCount), int(updatedCount > 1) * "s")) + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.Yes: + self.saveAllKnobValues(check=False) + elif reply == QtWidgets.QMessageBox.Cancel: + return + if len(selection) > 1: + self.messageBox( + "More than one node selected.\nChanging knobChanged editor to %s" % selection[0].fullName()) + # Reinitialise everything, wooo! + self.current_knob_dropdown.blockSignals(True) + self.node = selection[0] + + self.script_editor.setPlainText("") + self.unsavedKnobs = {} + self.scrollPos = {} + self.setWindowTitle("KnobScripter - %s %s" % + (self.node.fullName(), self.knob)) + self.current_node_label_name.setText(self.node.fullName()) + + self.toLoadKnob = False + self.updateKnobDropdown() # onee + # self.current_knob_dropdown.repaint() + # self.current_knob_dropdown.setMinimumWidth(self.current_knob_dropdown.minimumSizeHint().width()) + self.toLoadKnob = True + self.setCurrentKnob(self.knob) + self.loadKnobValue(False) + self.script_editor.setFocus() + self.setKnobModified(False) + self.current_knob_dropdown.blockSignals(False) + # self.current_knob_dropdown.setMinimumContentsLength(80) + return + + def exitNodeMode(self): + self.nodeMode = False + self.setWindowTitle("KnobScripter - Script Mode") + self.node_mode_bar.setVisible(False) + self.script_mode_bar.setVisible(True) + self.node = nuke.toNode("root") + # self.updateFoldersDropdown() + # self.updateScriptsDropdown() + self.splitter.setSizes([1, 1]) + self.loadScriptState() + self.setLastScript() + + self.loadScriptContents(check=False) + self.setScriptState() + + def clearConsole(self): + self.origConsoleText = self.nukeSEOutput.document().toPlainText().encode("utf8") + self.script_output.setPlainText("") + + def toggleFRW(self, frw_pressed): + self.frw_open = frw_pressed + self.frw.setVisible(self.frw_open) + if self.frw_open: + self.frw.find_lineEdit.setFocus() + self.frw.find_lineEdit.selectAll() + else: + self.script_editor.setFocus() + return + + def openSnippets(self): + ''' Whenever the 'snippets' button is pressed... open the panel ''' + global SnippetEditPanel + if SnippetEditPanel == "": + SnippetEditPanel = SnippetsPanel(self) + + if not SnippetEditPanel.isVisible(): + SnippetEditPanel.reload() + + if SnippetEditPanel.show(): + self.snippets = self.loadSnippets(maxDepth=5) + SnippetEditPanel = "" + + def loadSnippets(self, path="", maxDepth=5, depth=0): + ''' + Load prefs recursive. When maximum recursion depth, ignores paths. + ''' + max_depth = maxDepth + cur_depth = depth + if path == "": + path = self.snippets_txt_path + if not os.path.isfile(path): + return {} + else: + loaded_snippets = {} + with open(path, "r") as f: + file = json.load(f) + for i, (key, val) in enumerate(file.items()): + if re.match(r"\[custom-path-[0-9]+\]$", key): + if cur_depth < max_depth: + new_dict = self.loadSnippets( + path=val, maxDepth=max_depth, depth=cur_depth + 1) + loaded_snippets.update(new_dict) + else: + loaded_snippets[key] = val + return loaded_snippets + + def messageBox(self, the_text=""): + ''' Just a simple message box ''' + if self.isPane: + msgBox = QtWidgets.QMessageBox() + else: + msgBox = QtWidgets.QMessageBox(self) + msgBox.setText(the_text) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.exec_() + + def openPrefs(self): + ''' Open the preferences panel ''' + global PrefsPanel + if PrefsPanel == "": + PrefsPanel = KnobScripterPrefs(self) + + if PrefsPanel.show(): + PrefsPanel = "" + + def loadPrefs(self): + ''' Load prefs ''' + if not os.path.isfile(self.prefs_txt): + return [] + else: + with open(self.prefs_txt, "r") as f: + prefs = json.load(f) + return prefs + + def runScript(self): + ''' Run the current script... ''' + self.script_editor.runScript() + + def saveScrollValue(self): + ''' Save scroll values ''' + if self.nodeMode: + self.scrollPos[self.knob] = self.script_editor.verticalScrollBar( + ).value() + else: + self.scrollPos[self.current_folder + "/" + + self.current_script] = self.script_editor.verticalScrollBar().value() + + def saveCursorPosValue(self): + ''' Save cursor pos and anchor values ''' + self.cursorPos[self.current_folder + "/" + self.current_script] = [ + self.script_editor.textCursor().position(), self.script_editor.textCursor().anchor()] + + def closeEvent(self, close_event): + if self.nodeMode: + updatedCount = self.updateUnsavedKnobs() + if updatedCount > 0: + msgBox = QtWidgets.QMessageBox() + msgBox.setText("Save changes to %s knob%s before closing?" % ( + str(updatedCount), int(updatedCount > 1) * "s")) + msgBox.setStandardButtons( + QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel) + msgBox.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) + msgBox.setDefaultButton(QtWidgets.QMessageBox.Yes) + reply = msgBox.exec_() + if reply == QtWidgets.QMessageBox.Yes: + self.saveAllKnobValues(check=False) + close_event.accept() + return + elif reply == QtWidgets.QMessageBox.Cancel: + close_event.ignore() + return + else: + close_event.accept() + else: + self.autosave() + if self in AllKnobScripters: + AllKnobScripters.remove(self) + close_event.accept() + + # Landing functions + + def refreshClicked(self): + ''' Function to refresh the dropdowns ''' + if self.nodeMode: + knob = self.current_knob_dropdown.itemData( + self.current_knob_dropdown.currentIndex()).encode('UTF8') + self.current_knob_dropdown.blockSignals(True) + self.current_knob_dropdown.clear() # First remove all items + self.updateKnobDropdown() + availableKnobs = [] + for i in range(self.current_knob_dropdown.count()): + if self.current_knob_dropdown.itemData(i) is not None: + availableKnobs.append( + self.current_knob_dropdown.itemData(i).encode('UTF8')) + if knob in availableKnobs: + self.setCurrentKnob(knob) + self.current_knob_dropdown.blockSignals(False) + else: + folder = self.current_folder + script = self.current_script + self.autosave() + self.updateFoldersDropdown() + self.setCurrentFolder(folder) + self.updateScriptsDropdown() + self.setCurrentScript(script) + self.script_editor.setFocus() + + def reloadClicked(self): + if self.nodeMode: + self.loadKnobValue() + else: + log("Node mode is off") + self.loadScriptContents(check=True, pyOnly=True) + + def saveClicked(self): + if self.nodeMode: + self.saveKnobValue(False) + else: + self.saveScriptContents(temp=False) + + def setModified(self): + if self.nodeMode: + self.setKnobModified(True) + elif not self.current_script_modified: + self.setScriptModified(True) + if not self.nodeMode: + self.toAutosave = True + + def pin(self, pressed): + if pressed: + self.setWindowFlags(self.windowFlags() | + QtCore.Qt.WindowStaysOnTopHint) + self.pinned = True + self.show() + else: + self.setWindowFlags(self.windowFlags() & ~ + QtCore.Qt.WindowStaysOnTopHint) + self.pinned = False + self.show() + + def findSE(self): + for widget in QtWidgets.QApplication.allWidgets(): + if "Script Editor" in widget.windowTitle(): + return widget + + # FunctiosaveScrollValuens for Nuke's Script Editor + def findScriptEditors(self): + script_editors = [] + for widget in QtWidgets.QApplication.allWidgets(): + if "Script Editor" in widget.windowTitle() and len(widget.children()) > 5: + script_editors.append(widget) + return script_editors + + def findSEInput(self, se): + return se.children()[-1].children()[0] + + def findSEOutput(self, se): + return se.children()[-1].children()[1] + + def findSERunBtn(self, se): + for btn in se.children(): + try: + if "Run the current script" in btn.toolTip(): + return btn + except: + pass + return False + + def setSEOutputEvent(self): + nukeScriptEditors = self.findScriptEditors() + # Take the console from the first script editor found... + self.origConsoleText = self.nukeSEOutput.document().toPlainText().encode("utf8") + for se in nukeScriptEditors: + se_output = self.findSEOutput(se) + se_output.textChanged.connect( + partial(consoleChanged, se_output, self)) + consoleChanged(se_output, self) # Initialise. + + +class KnobScripterPane(KnobScripter): + def __init__(self, node="", knob="knobChanged"): + super(KnobScripterPane, self).__init__() + self.isPane = True + + def showEvent(self, the_event): + try: + killPaneMargins(self) + except: + pass + return KnobScripter.showEvent(self, the_event) + + def hideEvent(self, the_event): + self.autosave() + return KnobScripter.hideEvent(self, the_event) + + +def consoleChanged(self, ks): + ''' This will be called every time the ScriptEditor Output text is changed ''' + try: + if ks: # KS exists + ksOutput = ks.script_output # The console TextEdit widget + ksText = self.document().toPlainText().encode("utf8") + # The text from the console that will be omitted + origConsoleText = ks.origConsoleText + if ksText.startswith(origConsoleText): + ksText = ksText[len(origConsoleText):] + else: + ks.origConsoleText = "" + ksOutput.setPlainText(ksText) + ksOutput.verticalScrollBar().setValue(ksOutput.verticalScrollBar().maximum()) + except: + pass + + +def killPaneMargins(widget_object): + if widget_object: + target_widgets = set() + target_widgets.add(widget_object.parentWidget().parentWidget()) + target_widgets.add(widget_object.parentWidget( + ).parentWidget().parentWidget().parentWidget()) + + for widget_layout in target_widgets: + try: + widget_layout.layout().setContentsMargins(0, 0, 0, 0) + except: + pass + + +def debug(lev=0): + ''' Convenience function to set the KnobScripter on debug mode''' + # levels = [logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL] + # for handler in logging.root.handlers[:]: + # logging.root.removeHandler(handler) + # logging.basicConfig(level=levels[lev]) + # Changed to a shitty way for now + global DebugMode + DebugMode = True + + +def log(text): + ''' Display a debug info message. Yes, in a stupid way. I know.''' + global DebugMode + if DebugMode: + print(text) + + +# --------------------------------------------------------------------- +# Dialogs +# --------------------------------------------------------------------- +class FileNameDialog(QtWidgets.QDialog): + ''' + Dialog for creating new... (mode = "folder", "script" or "knob"). + ''' + + def __init__(self, parent=None, mode="folder", text=""): + if parent.isPane: + super(FileNameDialog, self).__init__() + else: + super(FileNameDialog, self).__init__(parent) + #self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint) + self.mode = mode + self.text = text + + title = "Create new {}.".format(self.mode) + self.setWindowTitle(title) + + self.initUI() + + def initUI(self): + # Widgets + self.name_label = QtWidgets.QLabel("Name: ") + self.name_label.setAlignment( + QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) + self.name_lineEdit = QtWidgets.QLineEdit() + self.name_lineEdit.setText(self.text) + self.name_lineEdit.textChanged.connect(self.nameChanged) + + # Buttons + self.button_box = QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) + self.button_box.button( + QtWidgets.QDialogButtonBox.Ok).setEnabled(self.text != "") + self.button_box.accepted.connect(self.clickedOk) + self.button_box.rejected.connect(self.clickedCancel) + + # Layout + self.master_layout = QtWidgets.QVBoxLayout() + self.name_layout = QtWidgets.QHBoxLayout() + self.name_layout.addWidget(self.name_label) + self.name_layout.addWidget(self.name_lineEdit) + self.master_layout.addLayout(self.name_layout) + self.master_layout.addWidget(self.button_box) + self.setLayout(self.master_layout) + + self.name_lineEdit.setFocus() + self.setMinimumWidth(250) + + def nameChanged(self): + txt = self.name_lineEdit.text() + m = r"[\w]*$" + if self.mode == "knob": # Knobs can't start with a number... + m = r"[a-zA-Z_]+" + m + + if re.match(m, txt) or txt == "": + self.text = txt + else: + self.name_lineEdit.setText(self.text) + + self.button_box.button( + QtWidgets.QDialogButtonBox.Ok).setEnabled(self.text != "") + return + + def clickedOk(self): + self.accept() + return + + def clickedCancel(self): + self.reject() + return + + +class TextInputDialog(QtWidgets.QDialog): + ''' + Simple dialog for a text input. + ''' + + def __init__(self, parent=None, name="", text="", title=""): + if parent.isPane: + super(TextInputDialog, self).__init__() + else: + super(TextInputDialog, self).__init__(parent) + #self.setWindowFlags(self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint) + + self.name = name # title of textinput + self.text = text # default content of textinput + + self.setWindowTitle(title) + + self.initUI() + + def initUI(self): + # Widgets + self.name_label = QtWidgets.QLabel(self.name + ": ") + self.name_label.setAlignment( + QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) + self.name_lineEdit = QtWidgets.QLineEdit() + self.name_lineEdit.setText(self.text) + self.name_lineEdit.textChanged.connect(self.nameChanged) + + # Buttons + self.button_box = QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) + #self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setEnabled(self.text != "") + self.button_box.accepted.connect(self.clickedOk) + self.button_box.rejected.connect(self.clickedCancel) + + # Layout + self.master_layout = QtWidgets.QVBoxLayout() + self.name_layout = QtWidgets.QHBoxLayout() + self.name_layout.addWidget(self.name_label) + self.name_layout.addWidget(self.name_lineEdit) + self.master_layout.addLayout(self.name_layout) + self.master_layout.addWidget(self.button_box) + self.setLayout(self.master_layout) + + self.name_lineEdit.setFocus() + self.setMinimumWidth(250) + + def nameChanged(self): + self.text = self.name_lineEdit.text() + + def clickedOk(self): + self.accept() + return + + def clickedCancel(self): + self.reject() + return + + +class ChooseNodeDialog(QtWidgets.QDialog): + ''' + Dialog for selecting a node by its name. Only admits nodes that exist (including root, preferences...) + ''' + + def __init__(self, parent=None, name=""): + if parent.isPane: + super(ChooseNodeDialog, self).__init__() + else: + super(ChooseNodeDialog, self).__init__(parent) + + self.name = name # Name of node (will be "" by default) + self.allNodes = [] + + self.setWindowTitle("Enter the node's name...") + + self.initUI() + + def initUI(self): + # Widgets + self.name_label = QtWidgets.QLabel("Name: ") + self.name_label.setAlignment( + QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) + self.name_lineEdit = QtWidgets.QLineEdit() + self.name_lineEdit.setText(self.name) + self.name_lineEdit.textChanged.connect(self.nameChanged) + + self.allNodes = self.getAllNodes() + completer = QtWidgets.QCompleter(self.allNodes, self) + completer.setCaseSensitivity(QtCore.Qt.CaseInsensitive) + self.name_lineEdit.setCompleter(completer) + + # Buttons + self.button_box = QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) + self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setEnabled( + nuke.exists(self.name)) + self.button_box.accepted.connect(self.clickedOk) + self.button_box.rejected.connect(self.clickedCancel) + + # Layout + self.master_layout = QtWidgets.QVBoxLayout() + self.name_layout = QtWidgets.QHBoxLayout() + self.name_layout.addWidget(self.name_label) + self.name_layout.addWidget(self.name_lineEdit) + self.master_layout.addLayout(self.name_layout) + self.master_layout.addWidget(self.button_box) + self.setLayout(self.master_layout) + + self.name_lineEdit.setFocus() + self.setMinimumWidth(250) + + def getAllNodes(self): + self.allNodes = [n.fullName() for n in nuke.allNodes( + recurseGroups=True)] # if parent is in current context?? + self.allNodes.extend(["root", "preferences"]) + return self.allNodes + + def nameChanged(self): + self.name = self.name_lineEdit.text() + self.button_box.button(QtWidgets.QDialogButtonBox.Ok).setEnabled( + self.name in self.allNodes) + + def clickedOk(self): + self.accept() + return + + def clickedCancel(self): + self.reject() + return + + +# ------------------------------------------------------------------------------------------------------ +# Script Editor Widget +# Wouter Gilsing built an incredibly useful python script editor for his Hotbox Manager, so I had it +# really easy for this part! +# Starting from his script editor, I changed the style and added the sublime-like functionality. +# I think this bit of code has the potential to get used in many nuke tools. +# Credit to him: http://www.woutergilsing.com/ +# Originally used on W_Hotbox v1.5: http://www.nukepedia.com/python/ui/w_hotbox +# ------------------------------------------------------------------------------------------------------ +class KnobScripterTextEdit(QtWidgets.QPlainTextEdit): + # Signal that will be emitted when the user has changed the text + userChangedEvent = QtCore.Signal() + + def __init__(self, knobScripter=""): + super(KnobScripterTextEdit, self).__init__() + + self.knobScripter = knobScripter + self.selected_text = "" + + # Setup line numbers + if self.knobScripter != "": + self.tabSpaces = self.knobScripter.tabSpaces + else: + self.tabSpaces = 4 + self.lineNumberArea = KSLineNumberArea(self) + self.blockCountChanged.connect(self.updateLineNumberAreaWidth) + self.updateRequest.connect(self.updateLineNumberArea) + self.updateLineNumberAreaWidth() + + # Highlight line + self.cursorPositionChanged.connect(self.highlightCurrentLine) + + # -------------------------------------------------------------------------------------------------- + # This is adapted from an original version by Wouter Gilsing. + # Extract from his original comments: + # While researching the implementation of line number, I had a look at Nuke's Blinkscript node. [..] + # thefoundry.co.uk/products/nuke/developers/100/pythonreference/nukescripts.blinkscripteditor-pysrc.html + # I stripped and modified the useful bits of the line number related parts of the code [..] + # Credits to theFoundry for writing the blinkscripteditor, best example code I could wish for. + # -------------------------------------------------------------------------------------------------- + + def lineNumberAreaWidth(self): + digits = 1 + maxNum = max(1, self.blockCount()) + while (maxNum >= 10): + maxNum /= 10 + digits += 1 + + space = 7 + self.fontMetrics().width('9') * digits + return space + + def updateLineNumberAreaWidth(self): + self.setViewportMargins(self.lineNumberAreaWidth(), 0, 0, 0) + + def updateLineNumberArea(self, rect, dy): + + if (dy): + self.lineNumberArea.scroll(0, dy) + else: + self.lineNumberArea.update( + 0, rect.y(), self.lineNumberArea.width(), rect.height()) + + if (rect.contains(self.viewport().rect())): + self.updateLineNumberAreaWidth() + + def resizeEvent(self, event): + QtWidgets.QPlainTextEdit.resizeEvent(self, event) + + cr = self.contentsRect() + self.lineNumberArea.setGeometry(QtCore.QRect( + cr.left(), cr.top(), self.lineNumberAreaWidth(), cr.height())) + + def lineNumberAreaPaintEvent(self, event): + + if self.isReadOnly(): + return + + painter = QtGui.QPainter(self.lineNumberArea) + painter.fillRect(event.rect(), QtGui.QColor(36, 36, 36)) # Number bg + + block = self.firstVisibleBlock() + blockNumber = block.blockNumber() + top = int(self.blockBoundingGeometry( + block).translated(self.contentOffset()).top()) + bottom = top + int(self.blockBoundingRect(block).height()) + currentLine = self.document().findBlock( + self.textCursor().position()).blockNumber() + + painter.setPen(self.palette().color(QtGui.QPalette.Text)) + + painterFont = QtGui.QFont() + painterFont.setFamily("Courier") + painterFont.setStyleHint(QtGui.QFont.Monospace) + painterFont.setFixedPitch(True) + if self.knobScripter != "": + painterFont.setPointSize(self.knobScripter.fontSize) + painter.setFont(self.knobScripter.script_editor_font) + + while (block.isValid() and top <= event.rect().bottom()): + + textColor = QtGui.QColor(110, 110, 110) # Numbers + + if blockNumber == currentLine and self.hasFocus(): + textColor = QtGui.QColor(255, 170, 0) # Number highlighted + + painter.setPen(textColor) + + number = "%s" % str(blockNumber + 1) + painter.drawText(-3, top, self.lineNumberArea.width(), + self.fontMetrics().height(), QtCore.Qt.AlignRight, number) + + # Move to the next block + block = block.next() + top = bottom + bottom = top + int(self.blockBoundingRect(block).height()) + blockNumber += 1 + + def keyPressEvent(self, event): + ''' + Custom actions for specific keystrokes + ''' + key = event.key() + ctrl = bool(event.modifiers() & Qt.ControlModifier) + alt = bool(event.modifiers() & Qt.AltModifier) + shift = bool(event.modifiers() & Qt.ShiftModifier) + pre_scroll = self.verticalScrollBar().value() + #modifiers = QtWidgets.QApplication.keyboardModifiers() + #ctrl = (modifiers == Qt.ControlModifier) + #shift = (modifiers == Qt.ShiftModifier) + + up_arrow = 16777235 + down_arrow = 16777237 + + # if Tab convert to Space + if key == 16777217: + self.indentation('indent') + + # if Shift+Tab remove indent + elif key == 16777218: + self.indentation('unindent') + + # if BackSpace try to snap to previous indent level + elif key == 16777219: + if not self.unindentBackspace(): + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + else: + # COOL BEHAVIORS SIMILAR TO SUBLIME GO NEXT! + cursor = self.textCursor() + cpos = cursor.position() + apos = cursor.anchor() + text_before_cursor = self.toPlainText()[:min(cpos, apos)] + text_after_cursor = self.toPlainText()[max(cpos, apos):] + text_all = self.toPlainText() + to_line_start = text_before_cursor[::-1].find("\n") + if to_line_start == -1: + # Position of the start of the line that includes the cursor selection start + linestart_pos = 0 + else: + linestart_pos = len(text_before_cursor) - to_line_start + + to_line_end = text_after_cursor.find("\n") + if to_line_end == -1: + # Position of the end of the line that includes the cursor selection end + lineend_pos = len(text_all) + else: + lineend_pos = max(cpos, apos) + to_line_end + + text_before_lines = text_all[:linestart_pos] + text_after_lines = text_all[lineend_pos:] + if len(text_after_lines) and text_after_lines.startswith("\n"): + text_after_lines = text_after_lines[1:] + text_lines = text_all[linestart_pos:lineend_pos] + + if cursor.hasSelection(): + selection = cursor.selection().toPlainText() + else: + selection = "" + if key == Qt.Key_ParenLeft and (len(selection) > 0 or re.match(r"[\s)}\];]+", text_after_cursor) or not len(text_after_cursor)): # ( + cursor.insertText("(" + selection + ")") + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + # ) + elif key == Qt.Key_ParenRight and text_after_cursor.startswith(")"): + cursor.movePosition(QtGui.QTextCursor.NextCharacter) + self.setTextCursor(cursor) + elif key == Qt.Key_BracketLeft and (len(selection) > 0 or re.match(r"[\s)}\];]+", text_after_cursor) or not len(text_after_cursor)): # [ + cursor.insertText("[" + selection + "]") + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + # ] + elif key in [Qt.Key_BracketRight, 43] and text_after_cursor.startswith("]"): + cursor.movePosition(QtGui.QTextCursor.NextCharacter) + self.setTextCursor(cursor) + elif key == Qt.Key_BraceLeft and (len(selection) > 0 or re.match(r"[\s)}\];]+", text_after_cursor) or not len(text_after_cursor)): # { + cursor.insertText("{" + selection + "}") + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + # } + elif key in [199, Qt.Key_BraceRight] and text_after_cursor.startswith("}"): + cursor.movePosition(QtGui.QTextCursor.NextCharacter) + self.setTextCursor(cursor) + elif key == 34: # " + if len(selection) > 0: + cursor.insertText('"' + selection + '"') + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + # and not re.search(r"(?:[\s)\]]+|$)",text_before_cursor): + elif text_after_cursor.startswith('"') and '"' in text_before_cursor.split("\n")[-1]: + cursor.movePosition(QtGui.QTextCursor.NextCharacter) + # If chars after cursor, act normal + elif not re.match(r"(?:[\s)\]]+|$)", text_after_cursor): + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + # If chars before cursor, act normal + elif not re.search(r"[\s.({\[,]$", text_before_cursor) and text_before_cursor != "": + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + else: + cursor.insertText('"' + selection + '"') + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + elif key == 39: # ' + if len(selection) > 0: + cursor.insertText("'" + selection + "'") + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + # and not re.search(r"(?:[\s)\]]+|$)",text_before_cursor): + elif text_after_cursor.startswith("'") and "'" in text_before_cursor.split("\n")[-1]: + cursor.movePosition(QtGui.QTextCursor.NextCharacter) + # If chars after cursor, act normal + elif not re.match(r"(?:[\s)\]]+|$)", text_after_cursor): + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + # If chars before cursor, act normal + elif not re.search(r"[\s.({\[,]$", text_before_cursor) and text_before_cursor != "": + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + else: + cursor.insertText("'" + selection + "'") + cursor.setPosition(apos + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + 1, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + elif key == 35 and len(selection): # (yes, a hash) + # If there's a selection, insert a hash at the start of each line.. how the fuck? + if selection != "": + selection_split = selection.split("\n") + if all(i.startswith("#") for i in selection_split): + selection_commented = "\n".join( + [s[1:] for s in selection_split]) # Uncommented + else: + selection_commented = "#" + "\n#".join(selection_split) + cursor.insertText(selection_commented) + if apos > cpos: + cursor.setPosition( + apos + len(selection_commented) - len(selection), QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos, QtGui.QTextCursor.KeepAnchor) + else: + cursor.setPosition(apos, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition( + cpos + len(selection_commented) - len(selection), QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + + elif key == 68 and ctrl and shift: # Ctrl+Shift+D, to duplicate text or line/s + + if not len(selection): + self.setPlainText( + text_before_lines + text_lines + "\n" + text_lines + "\n" + text_after_lines) + cursor.setPosition( + apos + len(text_lines) + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition( + cpos + len(text_lines) + 1, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + self.verticalScrollBar().setValue(pre_scroll) + self.scrollToCursor() + else: + if text_before_cursor.endswith("\n") and not selection.startswith("\n"): + cursor.insertText(selection + "\n" + selection) + cursor.setPosition( + apos + len(selection) + 1, QtGui.QTextCursor.MoveAnchor) + cursor.setPosition( + cpos + len(selection) + 1, QtGui.QTextCursor.KeepAnchor) + else: + cursor.insertText(selection + selection) + cursor.setPosition( + apos + len(selection), QtGui.QTextCursor.MoveAnchor) + cursor.setPosition( + cpos + len(selection), QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + + # Ctrl+Shift+Up, to move the selected line/s up + elif key == up_arrow and ctrl and shift and len(text_before_lines): + prev_line_start_distance = text_before_lines[:-1][::-1].find( + "\n") + if prev_line_start_distance == -1: + prev_line_start_pos = 0 # Position of the start of the previous line + else: + prev_line_start_pos = len( + text_before_lines) - 1 - prev_line_start_distance + prev_line = text_before_lines[prev_line_start_pos:] + + text_before_prev_line = text_before_lines[:prev_line_start_pos] + + if prev_line.endswith("\n"): + prev_line = prev_line[:-1] + + if len(text_after_lines): + text_after_lines = "\n" + text_after_lines + + self.setPlainText( + text_before_prev_line + text_lines + "\n" + prev_line + text_after_lines) + cursor.setPosition(apos - len(prev_line) - 1, + QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos - len(prev_line) - 1, + QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + self.verticalScrollBar().setValue(pre_scroll) + self.scrollToCursor() + return + + elif key == down_arrow and ctrl and shift: # Ctrl+Shift+Up, to move the selected line/s up + if not len(text_after_lines): + text_after_lines = "" + next_line_end_distance = text_after_lines.find("\n") + if next_line_end_distance == -1: + next_line_end_pos = len(text_all) + else: + next_line_end_pos = next_line_end_distance + next_line = text_after_lines[:next_line_end_pos] + text_after_next_line = text_after_lines[next_line_end_pos:] + + self.setPlainText(text_before_lines + next_line + + "\n" + text_lines + text_after_next_line) + cursor.setPosition(apos + len(next_line) + 1, + QtGui.QTextCursor.MoveAnchor) + cursor.setPosition(cpos + len(next_line) + 1, + QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + self.verticalScrollBar().setValue(pre_scroll) + self.scrollToCursor() + return + + # If up key and nothing happens, go to start + elif key == up_arrow and not len(text_before_lines): + if not shift: + cursor.setPosition(0, QtGui.QTextCursor.MoveAnchor) + self.setTextCursor(cursor) + else: + cursor.setPosition(0, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + + # If up key and nothing happens, go to start + elif key == down_arrow and not len(text_after_lines): + if not shift: + cursor.setPosition( + len(text_all), QtGui.QTextCursor.MoveAnchor) + self.setTextCursor(cursor) + else: + cursor.setPosition( + len(text_all), QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(cursor) + + # if enter or return, match indent level + elif key in [16777220, 16777221]: + self.indentNewLine() + else: + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + + self.scrollToCursor() + + def scrollToCursor(self): + self.cursor = self.textCursor() + # Does nothing, but makes the scroll go to the right place... + self.cursor.movePosition(QtGui.QTextCursor.NoMove) + self.setTextCursor(self.cursor) + + def getCursorInfo(self): + + self.cursor = self.textCursor() + + self.firstChar = self.cursor.selectionStart() + self.lastChar = self.cursor.selectionEnd() + + self.noSelection = False + if self.firstChar == self.lastChar: + self.noSelection = True + + self.originalPosition = self.cursor.position() + self.cursorBlockPos = self.cursor.positionInBlock() + + def unindentBackspace(self): + ''' + #snap to previous indent level + ''' + self.getCursorInfo() + + if not self.noSelection or self.cursorBlockPos == 0: + return False + + # check text in front of cursor + textInFront = self.document().findBlock( + self.firstChar).text()[:self.cursorBlockPos] + + # check whether solely spaces + if textInFront != ' ' * self.cursorBlockPos: + return False + + # snap to previous indent level + spaces = len(textInFront) + for space in range(spaces - ((spaces - 1) / self.tabSpaces) * self.tabSpaces - 1): + self.cursor.deletePreviousChar() + + def indentNewLine(self): + + # in case selection covers multiple line, make it one line first + self.insertPlainText('') + + self.getCursorInfo() + + # check how many spaces after cursor + text = self.document().findBlock(self.firstChar).text() + + textInFront = text[:self.cursorBlockPos] + + if len(textInFront) == 0: + self.insertPlainText('\n') + return + + indentLevel = 0 + for i in textInFront: + if i == ' ': + indentLevel += 1 + else: + break + + indentLevel /= self.tabSpaces + + # find out whether textInFront's last character was a ':' + # if that's the case add another indent. + # ignore any spaces at the end, however also + # make sure textInFront is not just an indent + if textInFront.count(' ') != len(textInFront): + while textInFront[-1] == ' ': + textInFront = textInFront[:-1] + + if textInFront[-1] == ':': + indentLevel += 1 + + # new line + self.insertPlainText('\n') + # match indent + self.insertPlainText(' ' * (self.tabSpaces * indentLevel)) + + def indentation(self, mode): + + pre_scroll = self.verticalScrollBar().value() + self.getCursorInfo() + + # if nothing is selected and mode is set to indent, simply insert as many + # space as needed to reach the next indentation level. + if self.noSelection and mode == 'indent': + + remainingSpaces = self.tabSpaces - \ + (self.cursorBlockPos % self.tabSpaces) + self.insertPlainText(' ' * remainingSpaces) + return + + selectedBlocks = self.findBlocks(self.firstChar, self.lastChar) + beforeBlocks = self.findBlocks( + last=self.firstChar - 1, exclude=selectedBlocks) + afterBlocks = self.findBlocks( + first=self.lastChar + 1, exclude=selectedBlocks) + + beforeBlocksText = self.blocks2list(beforeBlocks) + selectedBlocksText = self.blocks2list(selectedBlocks, mode) + afterBlocksText = self.blocks2list(afterBlocks) + + combinedText = '\n'.join( + beforeBlocksText + selectedBlocksText + afterBlocksText) + + # make sure the line count stays the same + originalBlockCount = len(self.toPlainText().split('\n')) + combinedText = '\n'.join(combinedText.split('\n')[:originalBlockCount]) + + self.clear() + self.setPlainText(combinedText) + + if self.noSelection: + self.cursor.setPosition(self.lastChar) + + # check whether the the orignal selection was from top to bottom or vice versa + else: + if self.originalPosition == self.firstChar: + first = self.lastChar + last = self.firstChar + firstBlockSnap = QtGui.QTextCursor.EndOfBlock + lastBlockSnap = QtGui.QTextCursor.StartOfBlock + else: + first = self.firstChar + last = self.lastChar + firstBlockSnap = QtGui.QTextCursor.StartOfBlock + lastBlockSnap = QtGui.QTextCursor.EndOfBlock + + self.cursor.setPosition(first) + self.cursor.movePosition( + firstBlockSnap, QtGui.QTextCursor.MoveAnchor) + self.cursor.setPosition(last, QtGui.QTextCursor.KeepAnchor) + self.cursor.movePosition( + lastBlockSnap, QtGui.QTextCursor.KeepAnchor) + + self.setTextCursor(self.cursor) + self.verticalScrollBar().setValue(pre_scroll) + + def findBlocks(self, first=0, last=None, exclude=[]): + blocks = [] + if last == None: + last = self.document().characterCount() + for pos in range(first, last + 1): + block = self.document().findBlock(pos) + if block not in blocks and block not in exclude: + blocks.append(block) + return blocks + + def blocks2list(self, blocks, mode=None): + text = [] + for block in blocks: + blockText = block.text() + if mode == 'unindent': + if blockText.startswith(' ' * self.tabSpaces): + blockText = blockText[self.tabSpaces:] + self.lastChar -= self.tabSpaces + elif blockText.startswith('\t'): + blockText = blockText[1:] + self.lastChar -= 1 + + elif mode == 'indent': + blockText = ' ' * self.tabSpaces + blockText + self.lastChar += self.tabSpaces + + text.append(blockText) + + return text + + def highlightCurrentLine(self): + ''' + Highlight currently selected line + ''' + extraSelections = [] + + selection = QtWidgets.QTextEdit.ExtraSelection() + + lineColor = QtGui.QColor(62, 62, 62, 255) + + selection.format.setBackground(lineColor) + selection.format.setProperty( + QtGui.QTextFormat.FullWidthSelection, True) + selection.cursor = self.textCursor() + selection.cursor.clearSelection() + + extraSelections.append(selection) + + self.setExtraSelections(extraSelections) + self.scrollToCursor() + + def format(self, rgb, style=''): + ''' + Return a QtWidgets.QTextCharFormat with the given attributes. + ''' + color = QtGui.QColor(*rgb) + textFormat = QtGui.QTextCharFormat() + textFormat.setForeground(color) + + if 'bold' in style: + textFormat.setFontWeight(QtGui.QFont.Bold) + if 'italic' in style: + textFormat.setFontItalic(True) + if 'underline' in style: + textFormat.setUnderlineStyle(QtGui.QTextCharFormat.SingleUnderline) + + return textFormat + + +class KSLineNumberArea(QtWidgets.QWidget): + def __init__(self, scriptEditor): + super(KSLineNumberArea, self).__init__(scriptEditor) + + self.scriptEditor = scriptEditor + self.setStyleSheet("text-align: center;") + + def paintEvent(self, event): + self.scriptEditor.lineNumberAreaPaintEvent(event) + return + + +class KSScriptEditorHighlighter(QtGui.QSyntaxHighlighter): + ''' + This is also adapted from an original version by Wouter Gilsing. His comments: + + Modified, simplified version of some code found I found when researching: + wiki.python.org/moin/PyQt/Python%20syntax%20highlighting + They did an awesome job, so credits to them. I only needed to make some + modifications to make it fit my needs. + ''' + + def __init__(self, document, parent=None): + + super(KSScriptEditorHighlighter, self).__init__(document) + self.knobScripter = parent + self.script_editor = self.knobScripter.script_editor + self.selected_text = "" + self.selected_text_prev = "" + self.rules_sublime = "" + + self.styles = { + 'keyword': self.format([238, 117, 181], 'bold'), + 'string': self.format([242, 136, 135]), + 'comment': self.format([143, 221, 144]), + 'numbers': self.format([174, 129, 255]), + 'custom': self.format([255, 170, 0], 'italic'), + 'selected': self.format([255, 255, 255], 'bold underline'), + 'underline': self.format([240, 240, 240], 'underline'), + } + + self.keywords = [ + 'and', 'assert', 'break', 'class', 'continue', 'def', + 'del', 'elif', 'else', 'except', 'exec', 'finally', + 'for', 'from', 'global', 'if', 'import', 'in', + 'is', 'lambda', 'not', 'or', 'pass', 'print', + 'raise', 'return', 'try', 'while', 'yield', 'with', 'as' + ] + + self.operatorKeywords = [ + '=', '==', '!=', '<', '<=', '>', '>=', + '\+', '-', '\*', '/', '//', '\%', '\*\*', + '\+=', '-=', '\*=', '/=', '\%=', + '\^', '\|', '\&', '\~', '>>', '<<' + ] + + self.variableKeywords = ['int', 'str', + 'float', 'bool', 'list', 'dict', 'set'] + + self.numbers = ['True', 'False', 'None'] + self.loadAltStyles() + + self.tri_single = (QtCore.QRegExp("'''"), 1, self.styles['comment']) + self.tri_double = (QtCore.QRegExp('"""'), 2, self.styles['comment']) + + # rules + rules = [] + + rules += [(r'\b%s\b' % i, 0, self.styles['keyword']) + for i in self.keywords] + rules += [(i, 0, self.styles['keyword']) + for i in self.operatorKeywords] + rules += [(r'\b%s\b' % i, 0, self.styles['numbers']) + for i in self.numbers] + + rules += [ + + # integers + (r'\b[0-9]+\b', 0, self.styles['numbers']), + # Double-quoted string, possibly containing escape sequences + (r'"[^"\\]*(\\.[^"\\]*)*"', 0, self.styles['string']), + # Single-quoted string, possibly containing escape sequences + (r"'[^'\\]*(\\.[^'\\]*)*'", 0, self.styles['string']), + # From '#' until a newline + (r'#[^\n]*', 0, self.styles['comment']), + ] + + # Build a QRegExp for each pattern + self.rules_nuke = [(QtCore.QRegExp(pat), index, fmt) + for (pat, index, fmt) in rules] + self.rules = self.rules_nuke + + def loadAltStyles(self): + ''' Loads other color styles apart from Nuke's default. ''' + self.styles_sublime = { + 'base': self.format([255, 255, 255]), + 'keyword': self.format([237, 36, 110]), + 'string': self.format([237, 229, 122]), + 'comment': self.format([125, 125, 125]), + 'numbers': self.format([165, 120, 255]), + 'functions': self.format([184, 237, 54]), + 'blue': self.format([130, 226, 255], 'italic'), + 'arguments': self.format([255, 170, 10], 'italic'), + 'custom': self.format([200, 200, 200], 'italic'), + 'underline': self.format([240, 240, 240], 'underline'), + 'selected': self.format([255, 255, 255], 'bold underline'), + } + + self.keywords_sublime = [ + 'and', 'assert', 'break', 'continue', + 'del', 'elif', 'else', 'except', 'exec', 'finally', + 'for', 'from', 'global', 'if', 'import', 'in', + 'is', 'lambda', 'not', 'or', 'pass', 'print', + 'raise', 'return', 'try', 'while', 'yield', 'with', 'as' + ] + self.operatorKeywords_sublime = [ + '=', '==', '!=', '<', '<=', '>', '>=', + '\+', '-', '\*', '/', '//', '\%', '\*\*', + '\+=', '-=', '\*=', '/=', '\%=', + '\^', '\|', '\&', '\~', '>>', '<<' + ] + + self.baseKeywords_sublime = [ + ',', + ] + + self.customKeywords_sublime = [ + 'nuke', + ] + + self.blueKeywords_sublime = [ + 'def', 'class', 'int', 'str', 'float', 'bool', 'list', 'dict', 'set' + ] + + self.argKeywords_sublime = [ + 'self', + ] + + self.tri_single_sublime = (QtCore.QRegExp( + "'''"), 1, self.styles_sublime['comment']) + self.tri_double_sublime = (QtCore.QRegExp( + '"""'), 2, self.styles_sublime['comment']) + self.numbers_sublime = ['True', 'False', 'None'] + + # rules + + rules = [] + # First turn everything inside parentheses orange + rules += [(r"def [\w]+[\s]*\((.*)\)", 1, + self.styles_sublime['arguments'])] + # Now restore unwanted stuff... + rules += [(i, 0, self.styles_sublime['base']) + for i in self.baseKeywords_sublime] + rules += [(r"[^\(\w),.][\s]*[\w]+", 0, self.styles_sublime['base'])] + + # Everything else + rules += [(r'\b%s\b' % i, 0, self.styles_sublime['keyword']) + for i in self.keywords_sublime] + rules += [(i, 0, self.styles_sublime['keyword']) + for i in self.operatorKeywords_sublime] + rules += [(i, 0, self.styles_sublime['custom']) + for i in self.customKeywords_sublime] + rules += [(r'\b%s\b' % i, 0, self.styles_sublime['blue']) + for i in self.blueKeywords_sublime] + rules += [(i, 0, self.styles_sublime['arguments']) + for i in self.argKeywords_sublime] + rules += [(r'\b%s\b' % i, 0, self.styles_sublime['numbers']) + for i in self.numbers_sublime] + + rules += [ + + # integers + (r'\b[0-9]+\b', 0, self.styles_sublime['numbers']), + # Double-quoted string, possibly containing escape sequences + (r'"[^"\\]*(\\.[^"\\]*)*"', 0, self.styles_sublime['string']), + # Single-quoted string, possibly containing escape sequences + (r"'[^'\\]*(\\.[^'\\]*)*'", 0, self.styles_sublime['string']), + # From '#' until a newline + (r'#[^\n]*', 0, self.styles_sublime['comment']), + # Function definitions + (r"def[\s]+([\w\.]+)", 1, self.styles_sublime['functions']), + # Class definitions + (r"class[\s]+([\w\.]+)", 1, self.styles_sublime['functions']), + # Class argument (which is also a class so must be green) + (r"class[\s]+[\w\.]+[\s]*\((.*)\)", + 1, self.styles_sublime['functions']), + # Function arguments also pick their style... + (r"def[\s]+[\w]+[\s]*\(([\w]+)", 1, + self.styles_sublime['arguments']), + ] + + # Build a QRegExp for each pattern + self.rules_sublime = [(QtCore.QRegExp(pat), index, fmt) + for (pat, index, fmt) in rules] + + def format(self, rgb, style=''): + ''' + Return a QtWidgets.QTextCharFormat with the given attributes. + ''' + + color = QtGui.QColor(*rgb) + textFormat = QtGui.QTextCharFormat() + textFormat.setForeground(color) + + if 'bold' in style: + textFormat.setFontWeight(QtGui.QFont.Bold) + if 'italic' in style: + textFormat.setFontItalic(True) + if 'underline' in style: + textFormat.setUnderlineStyle(QtGui.QTextCharFormat.SingleUnderline) + + return textFormat + + def highlightBlock(self, text): + ''' + Apply syntax highlighting to the given block of text. + ''' + # Do other syntax formatting + + if self.knobScripter.color_scheme: + self.color_scheme = self.knobScripter.color_scheme + else: + self.color_scheme = "nuke" + + if self.color_scheme == "nuke": + self.rules = self.rules_nuke + elif self.color_scheme == "sublime": + self.rules = self.rules_sublime + + for expression, nth, format in self.rules: + index = expression.indexIn(text, 0) + + while index >= 0: + # We actually want the index of the nth match + index = expression.pos(nth) + length = len(expression.cap(nth)) + self.setFormat(index, length, format) + index = expression.indexIn(text, index + length) + + self.setCurrentBlockState(0) + + # Multi-line strings etc. based on selected scheme + if self.color_scheme == "nuke": + in_multiline = self.match_multiline(text, *self.tri_single) + if not in_multiline: + in_multiline = self.match_multiline(text, *self.tri_double) + elif self.color_scheme == "sublime": + in_multiline = self.match_multiline(text, *self.tri_single_sublime) + if not in_multiline: + in_multiline = self.match_multiline( + text, *self.tri_double_sublime) + + # TODO if there's a selection, highlight same occurrences in the full document. If no selection but something highlighted, unhighlight full document. (do it thru regex or sth) + + def match_multiline(self, text, delimiter, in_state, style): + ''' + Check whether highlighting requires multiple lines. + ''' + # If inside triple-single quotes, start at 0 + if self.previousBlockState() == in_state: + start = 0 + add = 0 + # Otherwise, look for the delimiter on this line + else: + start = delimiter.indexIn(text) + # Move past this match + add = delimiter.matchedLength() + + # As long as there's a delimiter match on this line... + while start >= 0: + # Look for the ending delimiter + end = delimiter.indexIn(text, start + add) + # Ending delimiter on this line? + if end >= add: + length = end - start + add + delimiter.matchedLength() + self.setCurrentBlockState(0) + # No; multi-line string + else: + self.setCurrentBlockState(in_state) + length = len(text) - start + add + # Apply formatting + self.setFormat(start, length, style) + # Look for the next match + start = delimiter.indexIn(text, start + length) + + # Return True if still inside a multi-line string, False otherwise + if self.currentBlockState() == in_state: + return True + else: + return False + +# -------------------------------------------------------------------------------------- +# Script Output Widget +# The output logger works the same way as Nuke's python script editor output window +# -------------------------------------------------------------------------------------- + + +class ScriptOutputWidget(QtWidgets.QTextEdit): + def __init__(self, parent=None): + super(ScriptOutputWidget, self).__init__(parent) + self.knobScripter = parent + self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, + QtWidgets.QSizePolicy.Expanding) + self.setMinimumHeight(20) + + def keyPressEvent(self, event): + ctrl = ((event.modifiers() and (Qt.ControlModifier)) != 0) + alt = ((event.modifiers() and (Qt.AltModifier)) != 0) + shift = ((event.modifiers() and (Qt.ShiftModifier)) != 0) + key = event.key() + if type(event) == QtGui.QKeyEvent: + # print event.key() + if key in [32]: # Space + return KnobScripter.keyPressEvent(self.knobScripter, event) + elif key in [Qt.Key_Backspace, Qt.Key_Delete]: + self.knobScripter.clearConsole() + return QtWidgets.QTextEdit.keyPressEvent(self, event) + + # def mousePressEvent(self, QMouseEvent): + # if QMouseEvent.button() == Qt.RightButton: + # self.knobScripter.clearConsole() + # QtWidgets.QTextEdit.mousePressEvent(self, QMouseEvent) + +# --------------------------------------------------------------------- +# Modified KnobScripterTextEdit to include snippets etc. +# --------------------------------------------------------------------- + + +class KnobScripterTextEditMain(KnobScripterTextEdit): + def __init__(self, knobScripter, output=None, parent=None): + super(KnobScripterTextEditMain, self).__init__(knobScripter) + self.knobScripter = knobScripter + self.script_output = output + self.nukeCompleter = None + self.currentNukeCompletion = None + + ######## + # FROM NUKE's SCRIPT EDITOR START + ######## + self.setSizePolicy(QtWidgets.QSizePolicy.Expanding, + QtWidgets.QSizePolicy.Expanding) + + # Setup completer + self.nukeCompleter = QtWidgets.QCompleter(self) + self.nukeCompleter.setWidget(self) + self.nukeCompleter.setCompletionMode( + QtWidgets.QCompleter.UnfilteredPopupCompletion) + self.nukeCompleter.setCaseSensitivity(Qt.CaseSensitive) + try: + self.nukeCompleter.setModel(QtGui.QStringListModel()) + except: + self.nukeCompleter.setModel(QtCore.QStringListModel()) + + self.nukeCompleter.activated.connect(self.insertNukeCompletion) + self.nukeCompleter.highlighted.connect(self.completerHighlightChanged) + ######## + # FROM NUKE's SCRIPT EDITOR END + ######## + + def findLongestEndingMatch(self, text, dic): + ''' + If the text ends with a key in the dictionary, it returns the key and value. + If there are several matches, returns the longest one. + False if no matches. + ''' + longest = 0 # len of longest match + match_key = None + match_snippet = "" + for key, val in dic.items(): + #match = re.search(r"[\s\.({\[,;=+-]"+key+r"(?:[\s)\]\"]+|$)",text) + match = re.search(r"[\s\.({\[,;=+-]" + key + r"$", text) + if match or text == key: + if len(key) > longest: + longest = len(key) + match_key = key + match_snippet = val + if match_key is None: + return False + return match_key, match_snippet + + def placeholderToEnd(self, text, placeholder): + '''Returns distance (int) from the first ocurrence of the placeholder, to the end of the string with placeholders removed''' + search = re.search(placeholder, text) + if not search: + return -1 + from_start = search.start() + total = len(re.sub(placeholder, "", text)) + to_end = total - from_start + return to_end + + def addSnippetText(self, snippet_text): + ''' Adds the selected text as a snippet (taking care of $$, $name$ etc) to the script editor ''' + cursor_placeholder_find = r"(? 1: + cursor_len = positions[1] - positions[0] - 2 + + text = re.sub(cursor_placeholder_find, "", text) + self.cursor.insertText(text) + if placeholder_to_end >= 0: + for i in range(placeholder_to_end): + self.cursor.movePosition(QtGui.QTextCursor.PreviousCharacter) + for i in range(cursor_len): + self.cursor.movePosition( + QtGui.QTextCursor.NextCharacter, QtGui.QTextCursor.KeepAnchor) + self.setTextCursor(self.cursor) + + def keyPressEvent(self, event): + + ctrl = bool(event.modifiers() & Qt.ControlModifier) + alt = bool(event.modifiers() & Qt.AltModifier) + shift = bool(event.modifiers() & Qt.ShiftModifier) + key = event.key() + + # ADAPTED FROM NUKE's SCRIPT EDITOR: + # Get completer state + self.nukeCompleterShowing = self.nukeCompleter.popup().isVisible() + + # BEFORE ANYTHING ELSE, IF SPECIAL MODIFIERS SIMPLY IGNORE THE REST + if not self.nukeCompleterShowing and (ctrl or shift or alt): + # Bypassed! + if key not in [Qt.Key_Return, Qt.Key_Enter, Qt.Key_Tab]: + KnobScripterTextEdit.keyPressEvent(self, event) + return + + # If the completer is showing + if self.nukeCompleterShowing: + tc = self.textCursor() + # If we're hitting enter, do completion + if key in [Qt.Key_Return, Qt.Key_Enter, Qt.Key_Tab]: + if not self.currentNukeCompletion: + self.nukeCompleter.setCurrentRow(0) + self.currentNukeCompletion = self.nukeCompleter.currentCompletion() + # print str(self.nukeCompleter.completionModel[0]) + self.insertNukeCompletion(self.currentNukeCompletion) + self.nukeCompleter.popup().hide() + self.nukeCompleterShowing = False + # If you're hitting right or escape, hide the popup + elif key == Qt.Key_Right or key == Qt.Key_Escape: + self.nukeCompleter.popup().hide() + self.nukeCompleterShowing = False + # If you hit tab, escape or ctrl-space, hide the completer + elif key == Qt.Key_Tab or key == Qt.Key_Escape or (ctrl and key == Qt.Key_Space): + self.currentNukeCompletion = "" + self.nukeCompleter.popup().hide() + self.nukeCompleterShowing = False + # If none of the above, update the completion model + else: + QtWidgets.QPlainTextEdit.keyPressEvent(self, event) + # Edit completion model + colNum = tc.columnNumber() + posNum = tc.position() + inputText = self.toPlainText() + inputTextSplit = inputText.splitlines() + runningLength = 0 + currentLine = None + for line in inputTextSplit: + length = len(line) + runningLength += length + if runningLength >= posNum: + currentLine = line + break + runningLength += 1 + if currentLine: + completionPart = currentLine.split(" ")[-1] + if "(" in completionPart: + completionPart = completionPart.split("(")[-1] + self.completeNukePartUnderCursor(completionPart) + return + + if type(event) == QtGui.QKeyEvent: + if key == Qt.Key_Escape: # Close the knobscripter... + self.knobScripter.close() + elif not ctrl and not alt and not shift and event.key() == Qt.Key_Tab: + self.placeholder = "$$" + # 1. Set the cursor + self.cursor = self.textCursor() + + # 2. Save text before and after + cpos = self.cursor.position() + text_before_cursor = self.toPlainText()[:cpos] + line_before_cursor = text_before_cursor.split('\n')[-1] + text_after_cursor = self.toPlainText()[cpos:] + + # 3. Check coincidences in snippets dicts + try: # Meaning snippet found + match_key, match_snippet = self.findLongestEndingMatch( + line_before_cursor, self.knobScripter.snippets) + for i in range(len(match_key)): + self.cursor.deletePreviousChar() + # This function takes care of adding the appropriate snippet and moving the cursor... + self.addSnippetText(match_snippet) + except: # Meaning snippet not found... + # ADAPTED FROM NUKE's SCRIPT EDITOR: + tc = self.textCursor() + allCode = self.toPlainText() + colNum = tc.columnNumber() + posNum = tc.position() + + # ...and if there's text in the editor + if len(allCode.split()) > 0: + # There is text in the editor + currentLine = tc.block().text() + + # If you're not at the end of the line just add a tab + if colNum < len(currentLine): + # If there isn't a ')' directly to the right of the cursor add a tab + if currentLine[colNum:colNum + 1] != ')': + KnobScripterTextEdit.keyPressEvent(self, event) + return + # Else show the completer + else: + completionPart = currentLine[:colNum].split( + " ")[-1] + if "(" in completionPart: + completionPart = completionPart.split( + "(")[-1] + + self.completeNukePartUnderCursor( + completionPart) + + return + + # If you are at the end of the line, + else: + # If there's nothing to the right of you add a tab + if currentLine[colNum - 1:] == "" or currentLine.endswith(" "): + KnobScripterTextEdit.keyPressEvent(self, event) + return + # Else update completionPart and show the completer + completionPart = currentLine.split(" ")[-1] + if "(" in completionPart: + completionPart = completionPart.split("(")[-1] + + self.completeNukePartUnderCursor(completionPart) + return + + KnobScripterTextEdit.keyPressEvent(self, event) + elif event.key() in [Qt.Key_Enter, Qt.Key_Return]: + modifiers = QtWidgets.QApplication.keyboardModifiers() + if modifiers == QtCore.Qt.ControlModifier: + self.runScript() + else: + KnobScripterTextEdit.keyPressEvent(self, event) + else: + KnobScripterTextEdit.keyPressEvent(self, event) + + def getPyObjects(self, text): + ''' Returns a list containing all the functions, classes and variables found within the selected python text (code) ''' + matches = [] + # 1: Remove text inside triple quotes (leaving the quotes) + text_clean = '""'.join(text.split('"""')[::2]) + text_clean = '""'.join(text_clean.split("'''")[::2]) + + # 2: Remove text inside of quotes (leaving the quotes) except if \" + lines = text_clean.split("\n") + text_clean = "" + for line in lines: + line_clean = '""'.join(line.split('"')[::2]) + line_clean = '""'.join(line_clean.split("'")[::2]) + line_clean = line_clean.split("#")[0] + text_clean += line_clean + "\n" + + # 3. Split into segments (lines plus ";") + segments = re.findall(r"[^\n;]+", text_clean) + + # 4. Go case by case. + for s in segments: + # Declared vars + matches += re.findall(r"([\w\.]+)(?=[,\s\w]*=[^=]+$)", s) + # Def functions and arguments + function = re.findall(r"[\s]*def[\s]+([\w\.]+)[\s]*\([\s]*", s) + if len(function): + matches += function + args = re.split(r"[\s]*def[\s]+([\w\.]+)[\s]*\([\s]*", s) + if len(args) > 1: + args = args[-1] + matches += re.findall( + r"(?adrianpueyo.com, 2016-2019') + kspSignature.setOpenExternalLinks(True) + kspSignature.setStyleSheet('''color:#555;font-size:9px;''') + kspSignature.setAlignment(QtCore.Qt.AlignRight) + + fontLabel = QtWidgets.QLabel("Font:") + self.fontBox = QtWidgets.QFontComboBox() + self.fontBox.setCurrentFont(QtGui.QFont(self.font)) + self.fontBox.currentFontChanged.connect(self.fontChanged) + + fontSizeLabel = QtWidgets.QLabel("Font size:") + self.fontSizeBox = QtWidgets.QSpinBox() + self.fontSizeBox.setValue(self.oldFontSize) + self.fontSizeBox.setMinimum(6) + self.fontSizeBox.setMaximum(100) + self.fontSizeBox.valueChanged.connect(self.fontSizeChanged) + + windowWLabel = QtWidgets.QLabel("Width (px):") + windowWLabel.setToolTip("Default window width in pixels") + self.windowWBox = QtWidgets.QSpinBox() + self.windowWBox.setValue(self.knobScripter.windowDefaultSize[0]) + self.windowWBox.setMinimum(200) + self.windowWBox.setMaximum(4000) + self.windowWBox.setToolTip("Default window width in pixels") + + windowHLabel = QtWidgets.QLabel("Height (px):") + windowHLabel.setToolTip("Default window height in pixels") + self.windowHBox = QtWidgets.QSpinBox() + self.windowHBox.setValue(self.knobScripter.windowDefaultSize[1]) + self.windowHBox.setMinimum(100) + self.windowHBox.setMaximum(2000) + self.windowHBox.setToolTip("Default window height in pixels") + + # TODO: "Grab current dimensions" button + + tabSpaceLabel = QtWidgets.QLabel("Tab spaces:") + tabSpaceLabel.setToolTip("Number of spaces to add with the tab key.") + self.tabSpace2 = QtWidgets.QRadioButton("2") + self.tabSpace4 = QtWidgets.QRadioButton("4") + tabSpaceButtonGroup = QtWidgets.QButtonGroup(self) + tabSpaceButtonGroup.addButton(self.tabSpace2) + tabSpaceButtonGroup.addButton(self.tabSpace4) + self.tabSpace2.setChecked(self.knobScripter.tabSpaces == 2) + self.tabSpace4.setChecked(self.knobScripter.tabSpaces == 4) + + pinDefaultLabel = QtWidgets.QLabel("Always on top:") + pinDefaultLabel.setToolTip("Default mode of the PIN toggle.") + self.pinDefaultOn = QtWidgets.QRadioButton("On") + self.pinDefaultOff = QtWidgets.QRadioButton("Off") + pinDefaultButtonGroup = QtWidgets.QButtonGroup(self) + pinDefaultButtonGroup.addButton(self.pinDefaultOn) + pinDefaultButtonGroup.addButton(self.pinDefaultOff) + self.pinDefaultOn.setChecked(self.knobScripter.pinned == True) + self.pinDefaultOff.setChecked(self.knobScripter.pinned == False) + self.pinDefaultOn.clicked.connect(lambda: self.knobScripter.pin(True)) + self.pinDefaultOff.clicked.connect( + lambda: self.knobScripter.pin(False)) + + colorSchemeLabel = QtWidgets.QLabel("Color scheme:") + colorSchemeLabel.setToolTip("Syntax highlighting text style.") + self.colorSchemeSublime = QtWidgets.QRadioButton("subl") + self.colorSchemeNuke = QtWidgets.QRadioButton("nuke") + colorSchemeButtonGroup = QtWidgets.QButtonGroup(self) + colorSchemeButtonGroup.addButton(self.colorSchemeSublime) + colorSchemeButtonGroup.addButton(self.colorSchemeNuke) + colorSchemeButtonGroup.buttonClicked.connect(self.colorSchemeChanged) + self.colorSchemeSublime.setChecked( + self.knobScripter.color_scheme == "sublime") + self.colorSchemeNuke.setChecked( + self.knobScripter.color_scheme == "nuke") + + showLabelsLabel = QtWidgets.QLabel("Show labels:") + showLabelsLabel.setToolTip( + "Display knob labels on the knob dropdown\nOtherwise, shows the internal name only.") + self.showLabelsOn = QtWidgets.QRadioButton("On") + self.showLabelsOff = QtWidgets.QRadioButton("Off") + showLabelsButtonGroup = QtWidgets.QButtonGroup(self) + showLabelsButtonGroup.addButton(self.showLabelsOn) + showLabelsButtonGroup.addButton(self.showLabelsOff) + self.showLabelsOn.setChecked(self.knobScripter.pinned == True) + self.showLabelsOff.setChecked(self.knobScripter.pinned == False) + self.showLabelsOn.clicked.connect(lambda: self.knobScripter.pin(True)) + self.showLabelsOff.clicked.connect( + lambda: self.knobScripter.pin(False)) + + self.buttonBox = QtWidgets.QDialogButtonBox( + QtWidgets.QDialogButtonBox.Ok | QtWidgets.QDialogButtonBox.Cancel) + self.buttonBox.accepted.connect(self.savePrefs) + self.buttonBox.rejected.connect(self.cancelPrefs) + + # Loaded custom values + self.ksPrefs = self.knobScripter.loadPrefs() + if self.ksPrefs != []: + try: + self.fontSizeBox.setValue(self.ksPrefs['font_size']) + self.windowWBox.setValue(self.ksPrefs['window_default_w']) + self.windowHBox.setValue(self.ksPrefs['window_default_h']) + self.tabSpace2.setChecked(self.ksPrefs['tab_spaces'] == 2) + self.tabSpace4.setChecked(self.ksPrefs['tab_spaces'] == 4) + self.pinDefaultOn.setChecked(self.ksPrefs['pin_default'] == 1) + self.pinDefaultOff.setChecked(self.ksPrefs['pin_default'] == 0) + self.showLabelsOn.setChecked(self.ksPrefs['show_labels'] == 1) + self.showLabelsOff.setChecked(self.ksPrefs['show_labels'] == 0) + self.colorSchemeSublime.setChecked( + self.ksPrefs['color_scheme'] == "sublime") + self.colorSchemeNuke.setChecked( + self.ksPrefs['color_scheme'] == "nuke") + except: + pass + + # Layouts + font_layout = QtWidgets.QHBoxLayout() + font_layout.addWidget(fontLabel) + font_layout.addWidget(self.fontBox) + + fontSize_layout = QtWidgets.QHBoxLayout() + fontSize_layout.addWidget(fontSizeLabel) + fontSize_layout.addWidget(self.fontSizeBox) + + windowW_layout = QtWidgets.QHBoxLayout() + windowW_layout.addWidget(windowWLabel) + windowW_layout.addWidget(self.windowWBox) + + windowH_layout = QtWidgets.QHBoxLayout() + windowH_layout.addWidget(windowHLabel) + windowH_layout.addWidget(self.windowHBox) + + tabSpacesButtons_layout = QtWidgets.QHBoxLayout() + tabSpacesButtons_layout.addWidget(self.tabSpace2) + tabSpacesButtons_layout.addWidget(self.tabSpace4) + tabSpaces_layout = QtWidgets.QHBoxLayout() + tabSpaces_layout.addWidget(tabSpaceLabel) + tabSpaces_layout.addLayout(tabSpacesButtons_layout) + + pinDefaultButtons_layout = QtWidgets.QHBoxLayout() + pinDefaultButtons_layout.addWidget(self.pinDefaultOn) + pinDefaultButtons_layout.addWidget(self.pinDefaultOff) + pinDefault_layout = QtWidgets.QHBoxLayout() + pinDefault_layout.addWidget(pinDefaultLabel) + pinDefault_layout.addLayout(pinDefaultButtons_layout) + + showLabelsButtons_layout = QtWidgets.QHBoxLayout() + showLabelsButtons_layout.addWidget(self.showLabelsOn) + showLabelsButtons_layout.addWidget(self.showLabelsOff) + showLabels_layout = QtWidgets.QHBoxLayout() + showLabels_layout.addWidget(showLabelsLabel) + showLabels_layout.addLayout(showLabelsButtons_layout) + + colorSchemeButtons_layout = QtWidgets.QHBoxLayout() + colorSchemeButtons_layout.addWidget(self.colorSchemeSublime) + colorSchemeButtons_layout.addWidget(self.colorSchemeNuke) + colorScheme_layout = QtWidgets.QHBoxLayout() + colorScheme_layout.addWidget(colorSchemeLabel) + colorScheme_layout.addLayout(colorSchemeButtons_layout) + + self.master_layout = QtWidgets.QVBoxLayout() + self.master_layout.addWidget(kspTitle) + self.master_layout.addWidget(kspSignature) + self.master_layout.addWidget(kspLine) + self.master_layout.addLayout(font_layout) + self.master_layout.addLayout(fontSize_layout) + self.master_layout.addLayout(windowW_layout) + self.master_layout.addLayout(windowH_layout) + self.master_layout.addLayout(tabSpaces_layout) + self.master_layout.addLayout(pinDefault_layout) + self.master_layout.addLayout(showLabels_layout) + self.master_layout.addLayout(colorScheme_layout) + self.master_layout.addWidget(self.buttonBox) + self.setLayout(self.master_layout) + self.setFixedSize(self.minimumSize()) + + def savePrefs(self): + self.font = self.fontBox.currentFont().family() + ks_prefs = { + 'font_size': self.fontSizeBox.value(), + 'window_default_w': self.windowWBox.value(), + 'window_default_h': self.windowHBox.value(), + 'tab_spaces': self.tabSpaceValue(), + 'pin_default': self.pinDefaultValue(), + 'show_labels': self.showLabelsValue(), + 'font': self.font, + 'color_scheme': self.colorSchemeValue(), + } + self.knobScripter.script_editor_font.setFamily(self.font) + self.knobScripter.script_editor.setFont( + self.knobScripter.script_editor_font) + self.knobScripter.font = self.font + self.knobScripter.color_scheme = self.colorSchemeValue() + self.knobScripter.tabSpaces = self.tabSpaceValue() + self.knobScripter.script_editor.tabSpaces = self.tabSpaceValue() + with open(self.prefs_txt, "w") as f: + prefs = json.dump(ks_prefs, f, sort_keys=True, indent=4) + self.accept() + self.knobScripter.highlighter.rehighlight() + self.knobScripter.show_labels = self.showLabelsValue() + if self.knobScripter.nodeMode: + self.knobScripter.refreshClicked() + return prefs + + def cancelPrefs(self): + self.knobScripter.script_editor_font.setPointSize(self.oldFontSize) + self.knobScripter.script_editor.setFont( + self.knobScripter.script_editor_font) + self.knobScripter.color_scheme = self.oldScheme + self.knobScripter.highlighter.rehighlight() + self.reject() + + def fontSizeChanged(self): + self.knobScripter.script_editor_font.setPointSize( + self.fontSizeBox.value()) + self.knobScripter.script_editor.setFont( + self.knobScripter.script_editor_font) + return + + def fontChanged(self): + self.font = self.fontBox.currentFont().family() + self.knobScripter.script_editor_font.setFamily(self.font) + self.knobScripter.script_editor.setFont( + self.knobScripter.script_editor_font) + return + + def colorSchemeChanged(self): + self.knobScripter.color_scheme = self.colorSchemeValue() + self.knobScripter.highlighter.rehighlight() + return + + def tabSpaceValue(self): + return 2 if self.tabSpace2.isChecked() else 4 + + def pinDefaultValue(self): + return 1 if self.pinDefaultOn.isChecked() else 0 + + def showLabelsValue(self): + return 1 if self.showLabelsOn.isChecked() else 0 + + def colorSchemeValue(self): + return "nuke" if self.colorSchemeNuke.isChecked() else "sublime" + + def closeEvent(self, event): + self.cancelPrefs() + self.close() + + +def updateContext(): + ''' + Get the current selection of nodes with their appropiate context + Doing this outside the KnobScripter -> forces context update inside groups when needed + ''' + global knobScripterSelectedNodes + knobScripterSelectedNodes = nuke.selectedNodes() + return + +# -------------------------------- +# FindReplace +# -------------------------------- + + +class FindReplaceWidget(QtWidgets.QWidget): + ''' SearchReplace Widget for the knobscripter. FindReplaceWidget(editor = QPlainTextEdit) ''' + + def __init__(self, parent): + super(FindReplaceWidget, self).__init__(parent) + + self.editor = parent.script_editor + + self.initUI() + + def initUI(self): + + # -------------- + # Find Row + # -------------- + + # Widgets + self.find_label = QtWidgets.QLabel("Find:") + # self.find_label.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed) + self.find_label.setFixedWidth(50) + self.find_label.setAlignment( + QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) + self.find_lineEdit = QtWidgets.QLineEdit() + self.find_next_button = QtWidgets.QPushButton("Next") + self.find_next_button.clicked.connect(self.find) + self.find_prev_button = QtWidgets.QPushButton("Previous") + self.find_prev_button.clicked.connect(self.findBack) + self.find_lineEdit.returnPressed.connect(self.find_next_button.click) + + # Layout + self.find_layout = QtWidgets.QHBoxLayout() + self.find_layout.addWidget(self.find_label) + self.find_layout.addWidget(self.find_lineEdit, stretch=1) + self.find_layout.addWidget(self.find_next_button) + self.find_layout.addWidget(self.find_prev_button) + + # -------------- + # Replace Row + # -------------- + + # Widgets + self.replace_label = QtWidgets.QLabel("Replace:") + # self.replace_label.setSizePolicy(QtWidgets.QSizePolicy.Fixed,QtWidgets.QSizePolicy.Fixed) + self.replace_label.setFixedWidth(50) + self.replace_label.setAlignment( + QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter) + self.replace_lineEdit = QtWidgets.QLineEdit() + self.replace_button = QtWidgets.QPushButton("Replace") + self.replace_button.clicked.connect(self.replace) + self.replace_all_button = QtWidgets.QPushButton("Replace All") + self.replace_all_button.clicked.connect( + lambda: self.replace(rep_all=True)) + self.replace_lineEdit.returnPressed.connect(self.replace_button.click) + + # Layout + self.replace_layout = QtWidgets.QHBoxLayout() + self.replace_layout.addWidget(self.replace_label) + self.replace_layout.addWidget(self.replace_lineEdit, stretch=1) + self.replace_layout.addWidget(self.replace_button) + self.replace_layout.addWidget(self.replace_all_button) + + # Info text + self.info_text = QtWidgets.QLabel("") + self.info_text.setVisible(False) + self.info_text.mousePressEvent = lambda x: self.info_text.setVisible( + False) + #f = self.info_text.font() + # f.setItalic(True) + # self.info_text.setFont(f) + # self.info_text.clicked.connect(lambda:self.info_text.setVisible(False)) + + # Divider line + line = QtWidgets.QFrame() + line.setFrameShape(QtWidgets.QFrame.HLine) + line.setFrameShadow(QtWidgets.QFrame.Sunken) + line.setLineWidth(0) + line.setMidLineWidth(1) + line.setFrameShadow(QtWidgets.QFrame.Sunken) + + # -------------- + # Main Layout + # -------------- + + self.layout = QtWidgets.QVBoxLayout() + self.layout.addSpacing(4) + self.layout.addWidget(self.info_text) + self.layout.addLayout(self.find_layout) + self.layout.addLayout(self.replace_layout) + self.layout.setSpacing(4) + try: # >n11 + self.layout.setMargin(2) + except: # 0: # If not found but there are matches, start over + cursor.movePosition(QtGui.QTextCursor.Start) + self.editor.setTextCursor(cursor) + self.editor.find(find_str, flags) + else: + cursor.insertText(rep_str) + self.editor.find( + rep_str, flags | QtGui.QTextDocument.FindBackward) + + cursor.endEditBlock() + self.replace_lineEdit.setFocus() + return + + +# -------------------------------- +# Snippets +# -------------------------------- +class SnippetsPanel(QtWidgets.QDialog): + def __init__(self, parent): + super(SnippetsPanel, self).__init__(parent) + + self.knobScripter = parent + + self.setWindowFlags(self.windowFlags() | + QtCore.Qt.WindowStaysOnTopHint) + self.setWindowTitle("Snippet editor") + + self.snippets_txt_path = self.knobScripter.snippets_txt_path + self.snippets_dict = self.loadSnippetsDict(path=self.snippets_txt_path) + #self.snippets_dict = snippets_dic + + # self.saveSnippets(snippets_dic) + + self.initUI() + self.resize(500, 300) + + def initUI(self): + self.layout = QtWidgets.QVBoxLayout() + + # First Area (Titles) + title_layout = QtWidgets.QHBoxLayout() + shortcuts_label = QtWidgets.QLabel("Shortcut") + code_label = QtWidgets.QLabel("Code snippet") + title_layout.addWidget(shortcuts_label, stretch=1) + title_layout.addWidget(code_label, stretch=2) + self.layout.addLayout(title_layout) + + # Main Scroll area + self.scroll_content = QtWidgets.QWidget() + self.scroll_layout = QtWidgets.QVBoxLayout() + + self.buildSnippetWidgets() + + self.scroll_content.setLayout(self.scroll_layout) + + # Scroll Area Properties + self.scroll = QtWidgets.QScrollArea() + self.scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOn) + self.scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) + self.scroll.setWidgetResizable(True) + self.scroll.setWidget(self.scroll_content) + + self.layout.addWidget(self.scroll) + + # File knob test + #self.filePath_lineEdit = SnippetFilePath(self) + # self.filePath_lineEdit + # self.layout.addWidget(self.filePath_lineEdit) + + # Lower buttons + self.bottom_layout = QtWidgets.QHBoxLayout() + + self.add_btn = QtWidgets.QPushButton("Add snippet") + self.add_btn.setToolTip("Create empty fields for an extra snippet.") + self.add_btn.clicked.connect(self.addSnippet) + self.bottom_layout.addWidget(self.add_btn) + + self.addPath_btn = QtWidgets.QPushButton("Add custom path") + self.addPath_btn.setToolTip( + "Add a custom path to an external snippets .txt file.") + self.addPath_btn.clicked.connect(self.addCustomPath) + self.bottom_layout.addWidget(self.addPath_btn) + + self.bottom_layout.addStretch() + + self.save_btn = QtWidgets.QPushButton('OK') + self.save_btn.setToolTip( + "Save the snippets into a json file and close the panel.") + self.save_btn.clicked.connect(self.okPressed) + self.bottom_layout.addWidget(self.save_btn) + + self.cancel_btn = QtWidgets.QPushButton("Cancel") + self.cancel_btn.setToolTip("Cancel any new snippets or modifications.") + self.cancel_btn.clicked.connect(self.close) + self.bottom_layout.addWidget(self.cancel_btn) + + self.apply_btn = QtWidgets.QPushButton('Apply') + self.apply_btn.setToolTip("Save the snippets into a json file.") + self.apply_btn.setShortcut('Ctrl+S') + self.apply_btn.clicked.connect(self.applySnippets) + self.bottom_layout.addWidget(self.apply_btn) + + self.help_btn = QtWidgets.QPushButton('Help') + self.help_btn.setShortcut('F1') + self.help_btn.clicked.connect(self.showHelp) + self.bottom_layout.addWidget(self.help_btn) + + self.layout.addLayout(self.bottom_layout) + + self.setLayout(self.layout) + + def reload(self): + ''' + Clears everything without saving and redoes the widgets etc. + Only to be called if the panel isn't shown meaning it's closed. + ''' + for i in reversed(range(self.scroll_layout.count())): + self.scroll_layout.itemAt(i).widget().deleteLater() + + self.snippets_dict = self.loadSnippetsDict(path=self.snippets_txt_path) + + self.buildSnippetWidgets() + + def buildSnippetWidgets(self): + for i, (key, val) in enumerate(self.snippets_dict.items()): + if re.match(r"\[custom-path-[0-9]+\]$", key): + file_edit = SnippetFilePath(val) + self.scroll_layout.insertWidget(-1, file_edit) + else: + snippet_edit = SnippetEdit(key, val, parent=self) + self.scroll_layout.insertWidget(-1, snippet_edit) + + def loadSnippetsDict(self, path=""): + ''' Load prefs. TO REMOVE ''' + if path == "": + path = self.knobScripter.snippets_txt_path + if not os.path.isfile(self.snippets_txt_path): + return {} + else: + with open(self.snippets_txt_path, "r") as f: + self.snippets = json.load(f) + return self.snippets + + def getSnippetsAsDict(self): + dic = {} + num_snippets = self.scroll_layout.count() + path_i = 1 + for s in range(num_snippets): + se = self.scroll_layout.itemAt(s).widget() + if se.__class__.__name__ == "SnippetEdit": + key = se.shortcut_editor.text() + val = se.script_editor.toPlainText() + if key != "": + dic[key] = val + else: + path = se.filepath_lineEdit.text() + if path != "": + dic["[custom-path-{}]".format(str(path_i))] = path + path_i += 1 + return dic + + def saveSnippets(self, snippets=""): + if snippets == "": + snippets = self.getSnippetsAsDict() + with open(self.snippets_txt_path, "w") as f: + prefs = json.dump(snippets, f, sort_keys=True, indent=4) + return prefs + + def applySnippets(self): + self.saveSnippets() + self.knobScripter.snippets = self.knobScripter.loadSnippets(maxDepth=5) + self.knobScripter.loadSnippets() + + def okPressed(self): + self.applySnippets() + self.accept() + + def addSnippet(self, key="", val=""): + se = SnippetEdit(key, val, parent=self) + self.scroll_layout.insertWidget(0, se) + self.show() + return se + + def addCustomPath(self, path=""): + cpe = SnippetFilePath(path) + self.scroll_layout.insertWidget(0, cpe) + self.show() + cpe.browseSnippets() + return cpe + + def showHelp(self): + ''' Create a new snippet, auto-completed with the help ''' + help_key = "help" + help_val = """Snippets are a convenient way to have code blocks that you can call through a shortcut.\n\n1. Simply write a shortcut on the text input field on the left. You can see this one is set to "test".\n\n2. Then, write a code or whatever in this script editor. You can include $$ as the placeholder for where you'll want the mouse cursor to appear.\n\n3. Finally, click OK or Apply to save the snippets. On the main script editor, you'll be able to call any snippet by writing the shortcut (in this example: help) and pressing the Tab key.\n\nIn order to remove a snippet, simply leave the shortcut and contents blank, and save the snippets.""" + help_se = self.addSnippet(help_key, help_val) + help_se.script_editor.resize(160, 160) + + +class SnippetEdit(QtWidgets.QWidget): + ''' Simple widget containing two fields, for the snippet shortcut and content ''' + + def __init__(self, key="", val="", parent=None): + super(SnippetEdit, self).__init__(parent) + + self.knobScripter = parent.knobScripter + self.color_scheme = self.knobScripter.color_scheme + self.layout = QtWidgets.QHBoxLayout() + + self.shortcut_editor = QtWidgets.QLineEdit(self) + f = self.shortcut_editor.font() + f.setWeight(QtGui.QFont.Bold) + self.shortcut_editor.setFont(f) + self.shortcut_editor.setText(str(key)) + #self.script_editor = QtWidgets.QTextEdit(self) + self.script_editor = KnobScripterTextEdit() + self.script_editor.setMinimumHeight(100) + self.script_editor.setStyleSheet( + 'background:#282828;color:#EEE;') # Main Colors + self.highlighter = KSScriptEditorHighlighter( + self.script_editor.document(), self) + self.script_editor_font = self.knobScripter.script_editor_font + self.script_editor.setFont(self.script_editor_font) + self.script_editor.resize(90, 90) + self.script_editor.setPlainText(str(val)) + self.layout.addWidget(self.shortcut_editor, + stretch=1, alignment=Qt.AlignTop) + self.layout.addWidget(self.script_editor, stretch=2) + self.layout.setContentsMargins(0, 0, 0, 0) + + self.setLayout(self.layout) + + +class SnippetFilePath(QtWidgets.QWidget): + ''' Simple widget containing a filepath lineEdit and a button to open the file browser ''' + + def __init__(self, path="", parent=None): + super(SnippetFilePath, self).__init__(parent) + + self.layout = QtWidgets.QHBoxLayout() + + self.custompath_label = QtWidgets.QLabel(self) + self.custompath_label.setText("Custom path: ") + + self.filepath_lineEdit = QtWidgets.QLineEdit(self) + self.filepath_lineEdit.setText(str(path)) + #self.script_editor = QtWidgets.QTextEdit(self) + self.filepath_lineEdit.setStyleSheet( + 'background:#282828;color:#EEE;') # Main Colors + self.script_editor_font = QtGui.QFont() + self.script_editor_font.setFamily("Courier") + self.script_editor_font.setStyleHint(QtGui.QFont.Monospace) + self.script_editor_font.setFixedPitch(True) + self.script_editor_font.setPointSize(11) + self.filepath_lineEdit.setFont(self.script_editor_font) + + self.file_button = QtWidgets.QPushButton(self) + self.file_button.setText("Browse...") + self.file_button.clicked.connect(self.browseSnippets) + + self.layout.addWidget(self.custompath_label) + self.layout.addWidget(self.filepath_lineEdit) + self.layout.addWidget(self.file_button) + self.layout.setContentsMargins(0, 10, 0, 10) + + self.setLayout(self.layout) + + def browseSnippets(self): + ''' Opens file panel for ...snippets.txt ''' + browseLocation = nuke.getFilename('Select snippets file', '*.txt') + + if not browseLocation: + return + + self.filepath_lineEdit.setText(browseLocation) + return + + +# -------------------------------- +# Implementation +# -------------------------------- + +def showKnobScripter(knob="knobChanged"): + selection = nuke.selectedNodes() + if not len(selection): + pan = KnobScripter() + else: + pan = KnobScripter(selection[0], knob) + pan.show() + + +def addKnobScripterPanel(): + global knobScripterPanel + try: + knobScripterPanel = panels.registerWidgetAsPanel('nuke.KnobScripterPane', 'Knob Scripter', + 'com.adrianpueyo.KnobScripterPane') + knobScripterPanel.addToPane(nuke.getPaneFor('Properties.1')) + + except: + knobScripterPanel = panels.registerWidgetAsPanel( + 'nuke.KnobScripterPane', 'Knob Scripter', 'com.adrianpueyo.KnobScripterPane') + + +nuke.KnobScripterPane = KnobScripterPane +log("KS LOADED") +ksShortcut = "alt+z" +addKnobScripterPanel() +nuke.menu('Nuke').addCommand( + 'Edit/Node/Open Floating Knob Scripter', showKnobScripter, ksShortcut) +nuke.menu('Nuke').addCommand('Edit/Node/Update KnobScripter Context', + updateContext).setVisible(False) diff --git a/setup/nuke/nuke_path/init.py b/setup/nuke/nuke_path/init.py new file mode 100644 index 0000000000..0ea5d1ad7d --- /dev/null +++ b/setup/nuke/nuke_path/init.py @@ -0,0 +1,2 @@ +# default write mov +nuke.knobDefault('Write.mov.colorspace', 'sRGB') diff --git a/setup/nuke/nuke_path/menu.py b/setup/nuke/nuke_path/menu.py index fd87c98246..7f5de6013d 100644 --- a/setup/nuke/nuke_path/menu.py +++ b/setup/nuke/nuke_path/menu.py @@ -1,4 +1,7 @@ +import os +import sys import atom_server +import KnobScripter from pype.nuke.lib import ( writes_version_sync, @@ -16,6 +19,6 @@ log = Logger().get_logger(__name__, "nuke") nuke.addOnScriptSave(onScriptLoad) nuke.addOnScriptLoad(checkInventoryVersions) nuke.addOnScriptSave(checkInventoryVersions) -nuke.addOnScriptSave(writes_version_sync) +# nuke.addOnScriptSave(writes_version_sync) log.info('Automatic syncing of write file knob to script version')