diff --git a/pype/blender/__init__.py b/pype/blender/__init__.py
new file mode 100644
index 0000000000..8a29917e40
--- /dev/null
+++ b/pype/blender/__init__.py
@@ -0,0 +1,34 @@
+import logging
+from pathlib import Path
+import os
+
+import bpy
+
+from avalon import api as avalon
+from pyblish import api as pyblish
+
+from .plugin import AssetLoader
+
+logger = logging.getLogger("pype.blender")
+
+PARENT_DIR = os.path.dirname(__file__)
+PACKAGE_DIR = os.path.dirname(PARENT_DIR)
+PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
+
+PUBLISH_PATH = os.path.join(PLUGINS_DIR, "blender", "publish")
+LOAD_PATH = os.path.join(PLUGINS_DIR, "blender", "load")
+CREATE_PATH = os.path.join(PLUGINS_DIR, "blender", "create")
+
+
+def install():
+ """Install Blender configuration for Avalon."""
+ pyblish.register_plugin_path(str(PUBLISH_PATH))
+ avalon.register_plugin_path(avalon.Loader, str(LOAD_PATH))
+ avalon.register_plugin_path(avalon.Creator, str(CREATE_PATH))
+
+
+def uninstall():
+ """Uninstall Blender configuration for Avalon."""
+ pyblish.deregister_plugin_path(str(PUBLISH_PATH))
+ avalon.deregister_plugin_path(avalon.Loader, str(LOAD_PATH))
+ avalon.deregister_plugin_path(avalon.Creator, str(CREATE_PATH))
diff --git a/pype/blender/action.py b/pype/blender/action.py
new file mode 100644
index 0000000000..4bd7e303fc
--- /dev/null
+++ b/pype/blender/action.py
@@ -0,0 +1,47 @@
+import bpy
+
+import pyblish.api
+
+from ..action import get_errored_instances_from_context
+
+
+class SelectInvalidAction(pyblish.api.Action):
+ """Select invalid objects in Blender when a publish plug-in failed."""
+ label = "Select Invalid"
+ on = "failed"
+ icon = "search"
+
+ def process(self, context, plugin):
+ errored_instances = get_errored_instances_from_context(context)
+ instances = pyblish.api.instances_by_plugin(errored_instances, plugin)
+
+ # Get the invalid nodes for the plug-ins
+ self.log.info("Finding invalid nodes...")
+ invalid = list()
+ for instance in instances:
+ invalid_nodes = plugin.get_invalid(instance)
+ if invalid_nodes:
+ if isinstance(invalid_nodes, (list, tuple)):
+ invalid.extend(invalid_nodes)
+ else:
+ self.log.warning(
+ "Failed plug-in doens't have any selectable objects."
+ )
+
+ bpy.ops.object.select_all(action='DESELECT')
+
+ # Make sure every node is only processed once
+ invalid = list(set(invalid))
+ if not invalid:
+ self.log.info("No invalid nodes found.")
+ return
+
+ invalid_names = [obj.name for obj in invalid]
+ self.log.info(
+ "Selecting invalid objects: %s", ", ".join(invalid_names)
+ )
+ # Select the objects and also make the last one the active object.
+ for obj in invalid:
+ obj.select_set(True)
+
+ bpy.context.view_layer.objects.active = invalid[-1]
diff --git a/pype/blender/plugin.py b/pype/blender/plugin.py
new file mode 100644
index 0000000000..ad5a259785
--- /dev/null
+++ b/pype/blender/plugin.py
@@ -0,0 +1,135 @@
+"""Shared functionality for pipeline plugins for Blender."""
+
+from pathlib import Path
+from typing import Dict, List, Optional
+
+import bpy
+
+from avalon import api
+
+VALID_EXTENSIONS = [".blend"]
+
+
+def model_name(asset: str, subset: str, namespace: Optional[str] = None) -> str:
+ """Return a consistent name for a model asset."""
+ name = f"{asset}_{subset}"
+ if namespace:
+ name = f"{namespace}:{name}"
+ return name
+
+
+class AssetLoader(api.Loader):
+ """A basic AssetLoader for Blender
+
+ This will implement the basic logic for linking/appending assets
+ into another Blender scene.
+
+ The `update` method should be implemented by a sub-class, because
+ it's different for different types (e.g. model, rig, animation,
+ etc.).
+ """
+
+ @staticmethod
+ def _get_instance_empty(instance_name: str, nodes: List) -> Optional[bpy.types.Object]:
+ """Get the 'instance empty' that holds the collection instance."""
+ for node in nodes:
+ if not isinstance(node, bpy.types.Object):
+ continue
+ if (node.type == 'EMPTY' and node.instance_type == 'COLLECTION'
+ and node.instance_collection and node.name == instance_name):
+ return node
+ return None
+
+ @staticmethod
+ def _get_instance_collection(instance_name: str, nodes: List) -> Optional[bpy.types.Collection]:
+ """Get the 'instance collection' (container) for this asset."""
+ for node in nodes:
+ if not isinstance(node, bpy.types.Collection):
+ continue
+ if node.name == instance_name:
+ return node
+ return None
+
+ @staticmethod
+ def _get_library_from_container(container: bpy.types.Collection) -> bpy.types.Library:
+ """Find the library file from the container.
+
+ It traverses the objects from this collection, checks if there is only
+ 1 library from which the objects come from and returns the library.
+
+ Warning:
+ No nested collections are supported at the moment!
+ """
+ assert not container.children, "Nested collections are not supported."
+ assert container.objects, "The collection doesn't contain any objects."
+ libraries = set()
+ for obj in container.objects:
+ assert obj.library, f"'{obj.name}' is not linked."
+ libraries.add(obj.library)
+
+ assert len(libraries) == 1, "'{container.name}' contains objects from more then 1 library."
+
+ return list(libraries)[0]
+
+ def process_asset(self,
+ context: dict,
+ name: str,
+ namespace: Optional[str] = None,
+ options: Optional[Dict] = None):
+ """Must be implemented by a sub-class"""
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def load(self,
+ context: dict,
+ name: Optional[str] = None,
+ namespace: Optional[str] = None,
+ options: Optional[Dict] = None) -> Optional[bpy.types.Collection]:
+ """Load asset via database
+
+ Arguments:
+ context: Full parenthood of representation to load
+ name: Use pre-defined name
+ namespace: Use pre-defined namespace
+ options: Additional settings dictionary
+ """
+ # TODO (jasper): make it possible to add the asset several times by
+ # just re-using the collection
+ assert Path(self.fname).exists(), f"{self.fname} doesn't exist."
+
+ self.process_asset(
+ context=context,
+ name=name,
+ namespace=namespace,
+ options=options,
+ )
+
+ # Only containerise if anything was loaded by the Loader.
+ nodes = self[:]
+ if not nodes:
+ return None
+
+ # Only containerise if it's not already a collection from a .blend file.
+ representation = context["representation"]["name"]
+ if representation != "blend":
+ from avalon.blender.pipeline import containerise
+ return containerise(
+ name=name,
+ namespace=namespace,
+ nodes=nodes,
+ context=context,
+ loader=self.__class__.__name__,
+ )
+
+ asset = context["asset"]["name"]
+ subset = context["subset"]["name"]
+ instance_name = model_name(asset, subset, namespace)
+
+ return self._get_instance_collection(instance_name, nodes)
+
+ def update(self, container: Dict, representation: Dict):
+ """Must be implemented by a sub-class"""
+ raise NotImplementedError("Must be implemented by a sub-class")
+
+ def remove(self, container: Dict) -> bool:
+ """Must be implemented by a sub-class"""
+ raise NotImplementedError("Must be implemented by a sub-class")
diff --git a/pype/ftrack/actions/action_delete_asset.py b/pype/ftrack/actions/action_delete_asset.py
index df760f7c21..7eb9126fca 100644
--- a/pype/ftrack/actions/action_delete_asset.py
+++ b/pype/ftrack/actions/action_delete_asset.py
@@ -1,354 +1,606 @@
import os
-import sys
-import logging
+import collections
+import uuid
+from datetime import datetime
+from queue import Queue
+
from bson.objectid import ObjectId
-import argparse
-import ftrack_api
from pype.ftrack import BaseAction
from pype.ftrack.lib.io_nonsingleton import DbConnector
-class DeleteAsset(BaseAction):
+class DeleteAssetSubset(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
- identifier = 'delete.asset'
+ identifier = "delete.asset.subset"
#: Action label.
- label = 'Delete Asset/Subsets'
+ label = "Delete Asset/Subsets"
#: Action description.
- description = 'Removes from Avalon with all childs and asset from Ftrack'
- icon = '{}/ftrack/action_icons/DeleteAsset.svg'.format(
- os.environ.get('PYPE_STATICS_SERVER', '')
+ description = "Removes from Avalon with all childs and asset from Ftrack"
+ icon = "{}/ftrack/action_icons/DeleteAsset.svg".format(
+ os.environ.get("PYPE_STATICS_SERVER", "")
)
#: roles that are allowed to register this action
- role_list = ['Pypeclub', 'Administrator']
- #: Db
- db = DbConnector()
+ role_list = ["Pypeclub", "Administrator", "Project Manager"]
+ #: Db connection
+ dbcon = DbConnector()
- value = None
+ splitter = {"type": "label", "value": "---"}
+ action_data_by_id = {}
+ asset_prefix = "asset:"
+ subset_prefix = "subset:"
def discover(self, session, entities, event):
- ''' Validation '''
- if len(entities) != 1:
- return False
+ """ Validation """
+ task_ids = []
+ for ent_info in event["data"]["selection"]:
+ entType = ent_info.get("entityType", "")
+ if entType == "task":
+ task_ids.append(ent_info["entityId"])
- valid = ["task"]
- entityType = event["data"]["selection"][0].get("entityType", "")
- if entityType.lower() not in valid:
- return False
-
- return True
+ for entity in entities:
+ ftrack_id = entity["id"]
+ if ftrack_id not in task_ids:
+ continue
+ if entity.entity_type.lower() != "task":
+ return True
+ return False
def _launch(self, event):
- self.reset_session()
try:
- self.db.install()
args = self._translate_event(
self.session, event
)
+ if "values" not in event["data"]:
+ self.dbcon.install()
+ return self._interface(self.session, *args)
- interface = self._interface(
- self.session, *args
- )
-
- confirmation = self.confirm_delete(
- True, *args
- )
-
- if interface:
- return interface
-
+ confirmation = self.confirm_delete(*args)
if confirmation:
return confirmation
+ self.dbcon.install()
response = self.launch(
self.session, *args
)
finally:
- self.db.uninstall()
+ self.dbcon.uninstall()
return self._handle_result(
self.session, response, *args
)
def interface(self, session, entities, event):
- if not event['data'].get('values', {}):
- self.attempt = 1
- items = []
- entity = entities[0]
- title = 'Choose items to delete from "{}"'.format(entity['name'])
- project = entity['project']
+ self.show_message(event, "Preparing data...", True)
+ items = []
+ title = "Choose items to delete"
- self.db.Session['AVALON_PROJECT'] = project["full_name"]
+ # Filter selection and get ftrack ids
+ selection = event["data"].get("selection") or []
+ ftrack_ids = []
+ project_in_selection = False
+ for entity in selection:
+ entity_type = (entity.get("entityType") or "").lower()
+ if entity_type != "task":
+ if entity_type == "show":
+ project_in_selection = True
+ continue
- av_entity = self.db.find_one({
- 'type': 'asset',
- 'name': entity['name']
+ ftrack_id = entity.get("entityId")
+ if not ftrack_id:
+ continue
+
+ ftrack_ids.append(ftrack_id)
+
+ if project_in_selection:
+ msg = "It is not possible to use this action on project entity."
+ self.show_message(event, msg, True)
+
+ # Filter event even more (skip task entities)
+ # - task entities are not relevant for avalon
+ for entity in entities:
+ ftrack_id = entity["id"]
+ if ftrack_id not in ftrack_ids:
+ continue
+
+ if entity.entity_type.lower() == "task":
+ ftrack_ids.remove(ftrack_id)
+
+ if not ftrack_ids:
+ # It is bug if this happens!
+ return {
+ "success": False,
+ "message": "Invalid selection for this action (Bug)"
+ }
+
+ if entities[0].entity_type.lower() == "project":
+ project = entities[0]
+ else:
+ project = entities[0]["project"]
+
+ project_name = project["full_name"]
+ self.dbcon.Session["AVALON_PROJECT"] = project_name
+
+ selected_av_entities = self.dbcon.find({
+ "type": "asset",
+ "data.ftrackId": {"$in": ftrack_ids}
+ })
+ selected_av_entities = [ent for ent in selected_av_entities]
+ if not selected_av_entities:
+ return {
+ "success": False,
+ "message": "Didn't found entities in avalon"
+ }
+
+ # Remove cached action older than 2 minutes
+ old_action_ids = []
+ for id, data in self.action_data_by_id.items():
+ created_at = data.get("created_at")
+ if not created_at:
+ old_action_ids.append(id)
+ continue
+ cur_time = datetime.now()
+ existing_in_sec = (created_at - cur_time).total_seconds()
+ if existing_in_sec > 60 * 2:
+ old_action_ids.append(id)
+
+ for id in old_action_ids:
+ self.action_data_by_id.pop(id, None)
+
+ # Store data for action id
+ action_id = str(uuid.uuid1())
+ self.action_data_by_id[action_id] = {
+ "attempt": 1,
+ "created_at": datetime.now(),
+ "project_name": project_name,
+ "subset_ids_by_name": {},
+ "subset_ids_by_parent": {}
+ }
+
+ id_item = {
+ "type": "hidden",
+ "name": "action_id",
+ "value": action_id
+ }
+
+ items.append(id_item)
+ asset_ids = [ent["_id"] for ent in selected_av_entities]
+ subsets_for_selection = self.dbcon.find({
+ "type": "subset",
+ "parent": {"$in": asset_ids}
+ })
+
+ asset_ending = ""
+ if len(selected_av_entities) > 1:
+ asset_ending = "s"
+
+ asset_title = {
+ "type": "label",
+ "value": "# Delete asset{}:".format(asset_ending)
+ }
+ asset_note = {
+ "type": "label",
+ "value": (
+ "
NOTE: Action will delete checked entities"
+ " in Ftrack and Avalon with all children entities and"
+ " published content.
"
+ )
+ }
+
+ items.append(asset_title)
+ items.append(asset_note)
+
+ asset_items = collections.defaultdict(list)
+ for asset in selected_av_entities:
+ ent_path_items = [project_name]
+ ent_path_items.extend(asset.get("data", {}).get("parents") or [])
+ ent_path_to_parent = "/".join(ent_path_items) + "/"
+ asset_items[ent_path_to_parent].append(asset)
+
+ for asset_parent_path, assets in sorted(asset_items.items()):
+ items.append({
+ "type": "label",
+ "value": "## - {}".format(asset_parent_path)
})
-
- if av_entity is None:
- return {
- 'success': False,
- 'message': 'Didn\'t found assets in avalon'
- }
-
- asset_label = {
- 'type': 'label',
- 'value': '## Delete whole asset: ##'
- }
- asset_item = {
- 'label': av_entity['name'],
- 'name': 'whole_asset',
- 'type': 'boolean',
- 'value': False
- }
- splitter = {
- 'type': 'label',
- 'value': '{}'.format(200*"-")
- }
- subset_label = {
- 'type': 'label',
- 'value': '## Subsets: ##'
- }
- if av_entity is not None:
- items.append(asset_label)
- items.append(asset_item)
- items.append(splitter)
-
- all_subsets = self.db.find({
- 'type': 'subset',
- 'parent': av_entity['_id']
+ for asset in assets:
+ items.append({
+ "label": asset["name"],
+ "name": "{}{}".format(
+ self.asset_prefix, str(asset["_id"])
+ ),
+ "type": 'boolean',
+ "value": False
})
- subset_items = []
- for subset in all_subsets:
- item = {
- 'label': subset['name'],
- 'name': str(subset['_id']),
- 'type': 'boolean',
- 'value': False
- }
- subset_items.append(item)
- if len(subset_items) > 0:
- items.append(subset_label)
- items.extend(subset_items)
- else:
- return {
- 'success': False,
- 'message': 'Didn\'t found assets in avalon'
- }
+ subset_ids_by_name = collections.defaultdict(list)
+ subset_ids_by_parent = collections.defaultdict(list)
+ for subset in subsets_for_selection:
+ subset_id = subset["_id"]
+ name = subset["name"]
+ parent_id = subset["parent"]
+ subset_ids_by_name[name].append(subset_id)
+ subset_ids_by_parent[parent_id].append(subset_id)
+ if not subset_ids_by_name:
return {
- 'items': items,
- 'title': title
+ "items": items,
+ "title": title
}
- def confirm_delete(self, first_attempt, entities, event):
- if first_attempt is True:
- if 'values' not in event['data']:
- return
+ subset_ending = ""
+ if len(subset_ids_by_name.keys()) > 1:
+ subset_ending = "s"
- values = event['data']['values']
+ subset_title = {
+ "type": "label",
+ "value": "# Subset{} to delete:".format(subset_ending)
+ }
+ subset_note = {
+ "type": "label",
+ "value": (
+ "WARNING: Subset{} will be removed"
+ " for all selected entities.
"
+ ).format(subset_ending)
+ }
- if len(values) <= 0:
- return
- if 'whole_asset' not in values:
- return
- else:
- values = self.values
+ items.append(self.splitter)
+ items.append(subset_title)
+ items.append(subset_note)
- title = 'Confirmation of deleting {}'
- if values['whole_asset'] is True:
- title = title.format(
- 'whole asset {}'.format(
- entities[0]['name']
- )
- )
- else:
- subsets = []
- for key, value in values.items():
- if value is True:
- subsets.append(key)
- len_subsets = len(subsets)
- if len_subsets == 0:
+ for name in subset_ids_by_name:
+ items.append({
+ "label": "{}".format(name),
+ "name": "{}{}".format(self.subset_prefix, name),
+ "type": "boolean",
+ "value": False
+ })
+
+ self.action_data_by_id[action_id]["subset_ids_by_parent"] = (
+ subset_ids_by_parent
+ )
+ self.action_data_by_id[action_id]["subset_ids_by_name"] = (
+ subset_ids_by_name
+ )
+
+ return {
+ "items": items,
+ "title": title
+ }
+
+ def confirm_delete(self, entities, event):
+ values = event["data"]["values"]
+ action_id = values.get("action_id")
+ spec_data = self.action_data_by_id.get(action_id)
+ if not spec_data:
+ # it is a bug if this happens!
+ return {
+ "success": False,
+ "message": "Something bad has happened. Please try again."
+ }
+
+ # Process Delete confirmation
+ delete_key = values.get("delete_key")
+ if delete_key:
+ delete_key = delete_key.lower().strip()
+ # Go to launch part if user entered `delete`
+ if delete_key == "delete":
+ return
+ # Skip whole process if user didn't enter any text
+ elif delete_key == "":
+ self.action_data_by_id.pop(action_id, None)
return {
- 'success': True,
- 'message': 'Nothing was selected to delete'
+ "success": True,
+ "message": "Deleting cancelled (delete entry was empty)"
}
- elif len_subsets == 1:
- title = title.format(
- '{} subset'.format(len_subsets)
- )
- else:
- title = title.format(
- '{} subsets'.format(len_subsets)
- )
+ # Get data to show again
+ to_delete = spec_data["to_delete"]
+
+ else:
+ to_delete = collections.defaultdict(list)
+ for key, value in values.items():
+ if not value:
+ continue
+ if key.startswith(self.asset_prefix):
+ _key = key.replace(self.asset_prefix, "")
+ to_delete["assets"].append(_key)
+
+ elif key.startswith(self.subset_prefix):
+ _key = key.replace(self.subset_prefix, "")
+ to_delete["subsets"].append(_key)
+
+ self.action_data_by_id[action_id]["to_delete"] = to_delete
+
+ asset_to_delete = len(to_delete.get("assets") or []) > 0
+ subset_to_delete = len(to_delete.get("subsets") or []) > 0
+
+ if not asset_to_delete and not subset_to_delete:
+ self.action_data_by_id.pop(action_id, None)
+ return {
+ "success": True,
+ "message": "Nothing was selected to delete"
+ }
+
+ attempt = spec_data["attempt"]
+ if attempt > 3:
+ self.action_data_by_id.pop(action_id, None)
+ return {
+ "success": False,
+ "message": "You didn't enter \"DELETE\" properly 3 times!"
+ }
+
+ self.action_data_by_id[action_id]["attempt"] += 1
+
+ title = "Confirmation of deleting"
+
+ if asset_to_delete:
+ asset_len = len(to_delete["assets"])
+ asset_ending = ""
+ if asset_len > 1:
+ asset_ending = "s"
+ title += " {} Asset{}".format(asset_len, asset_ending)
+ if subset_to_delete:
+ title += " and"
+
+ if subset_to_delete:
+ sub_len = len(to_delete["subsets"])
+ type_ending = ""
+ sub_ending = ""
+ if sub_len == 1:
+ subset_ids_by_name = spec_data["subset_ids_by_name"]
+ if len(subset_ids_by_name[to_delete["subsets"][0]]) > 1:
+ sub_ending = "s"
+
+ elif sub_len > 1:
+ type_ending = "s"
+ sub_ending = "s"
+
+ title += " {} type{} of subset{}".format(
+ sub_len, type_ending, sub_ending
+ )
- self.values = values
items = []
+ id_item = {"type": "hidden", "name": "action_id", "value": action_id}
delete_label = {
'type': 'label',
'value': '# Please enter "DELETE" to confirm #'
}
-
delete_item = {
- 'name': 'delete_key',
- 'type': 'text',
- 'value': '',
- 'empty_text': 'Type Delete here...'
+ "name": "delete_key",
+ "type": "text",
+ "value": "",
+ "empty_text": "Type Delete here..."
}
+
+ items.append(id_item)
items.append(delete_label)
items.append(delete_item)
return {
- 'items': items,
- 'title': title
+ "items": items,
+ "title": title
}
def launch(self, session, entities, event):
- if 'values' not in event['data']:
- return
-
- values = event['data']['values']
- if len(values) <= 0:
- return
- if 'delete_key' not in values:
- return
-
- if values['delete_key'].lower() != 'delete':
- if values['delete_key'].lower() == '':
- return {
- 'success': False,
- 'message': 'Deleting cancelled'
- }
- if self.attempt < 3:
- self.attempt += 1
- return_dict = self.confirm_delete(False, entities, event)
- return_dict['title'] = '{} ({} attempt)'.format(
- return_dict['title'], self.attempt
- )
- return return_dict
+ self.show_message(event, "Processing...", True)
+ values = event["data"]["values"]
+ action_id = values.get("action_id")
+ spec_data = self.action_data_by_id.get(action_id)
+ if not spec_data:
+ # it is a bug if this happens!
return {
- 'success': False,
- 'message': 'You didn\'t enter "DELETE" properly 3 times!'
+ "success": False,
+ "message": "Something bad has happened. Please try again."
}
- entity = entities[0]
- project = entity['project']
+ report_messages = collections.defaultdict(list)
- self.db.Session['AVALON_PROJECT'] = project["full_name"]
+ project_name = spec_data["project_name"]
+ to_delete = spec_data["to_delete"]
+ self.dbcon.Session["AVALON_PROJECT"] = project_name
- all_ids = []
- if self.values.get('whole_asset', False) is True:
- av_entity = self.db.find_one({
- 'type': 'asset',
- 'name': entity['name']
+ assets_to_delete = to_delete.get("assets") or []
+ subsets_to_delete = to_delete.get("subsets") or []
+
+ # Convert asset ids to ObjectId obj
+ assets_to_delete = [ObjectId(id) for id in assets_to_delete if id]
+
+ subset_ids_by_parent = spec_data["subset_ids_by_parent"]
+ subset_ids_by_name = spec_data["subset_ids_by_name"]
+
+ subset_ids_to_archive = []
+ asset_ids_to_archive = []
+ ftrack_ids_to_delete = []
+ if len(assets_to_delete) > 0:
+ # Prepare data when deleting whole avalon asset
+ avalon_assets = self.dbcon.find({"type": "asset"})
+ avalon_assets_by_parent = collections.defaultdict(list)
+ for asset in avalon_assets:
+ parent_id = asset["data"]["visualParent"]
+ avalon_assets_by_parent[parent_id].append(asset)
+ if asset["_id"] in assets_to_delete:
+ ftrack_id = asset["data"]["ftrackId"]
+ ftrack_ids_to_delete.append(ftrack_id)
+
+ children_queue = Queue()
+ for mongo_id in assets_to_delete:
+ children_queue.put(mongo_id)
+
+ while not children_queue.empty():
+ mongo_id = children_queue.get()
+ if mongo_id in asset_ids_to_archive:
+ continue
+
+ asset_ids_to_archive.append(mongo_id)
+ for subset_id in subset_ids_by_parent.get(mongo_id, []):
+ if subset_id not in subset_ids_to_archive:
+ subset_ids_to_archive.append(subset_id)
+
+ children = avalon_assets_by_parent.get(mongo_id)
+ if not children:
+ continue
+
+ for child in children:
+ child_id = child["_id"]
+ if child_id not in asset_ids_to_archive:
+ children_queue.put(child_id)
+
+ # Prepare names of assets in ftrack and ids of subsets in mongo
+ asset_names_to_delete = []
+ if len(subsets_to_delete) > 0:
+ for name in subsets_to_delete:
+ asset_names_to_delete.append(name)
+ for subset_id in subset_ids_by_name[name]:
+ if subset_id in subset_ids_to_archive:
+ continue
+ subset_ids_to_archive.append(subset_id)
+
+ # Get ftrack ids of entities where will be delete only asset
+ not_deleted_entities_id = []
+ ftrack_id_name_map = {}
+ if asset_names_to_delete:
+ for entity in entities:
+ ftrack_id = entity["id"]
+ ftrack_id_name_map[ftrack_id] = entity["name"]
+ if ftrack_id in ftrack_ids_to_delete:
+ continue
+ not_deleted_entities_id.append(ftrack_id)
+
+ mongo_proc_txt = "MongoProcessing: "
+ ftrack_proc_txt = "Ftrack processing: "
+ if asset_ids_to_archive:
+ self.log.debug("{}Archivation of assets <{}>".format(
+ mongo_proc_txt,
+ ", ".join([str(id) for id in asset_ids_to_archive])
+ ))
+ self.dbcon.update_many(
+ {
+ "_id": {"$in": asset_ids_to_archive},
+ "type": "asset"
+ },
+ {"$set": {"type": "archived_asset"}}
+ )
+
+ if subset_ids_to_archive:
+ self.log.debug("{}Archivation of subsets <{}>".format(
+ mongo_proc_txt,
+ ", ".join([str(id) for id in subset_ids_to_archive])
+ ))
+ self.dbcon.update_many(
+ {
+ "_id": {"$in": subset_ids_to_archive},
+ "type": "subset"
+ },
+ {"$set": {"type": "archived_subset"}}
+ )
+
+ if ftrack_ids_to_delete:
+ self.log.debug("{}Deleting Ftrack Entities <{}>".format(
+ ftrack_proc_txt, ", ".join(ftrack_ids_to_delete)
+ ))
+
+ joined_ids_to_delete = ", ".join(
+ ["\"{}\"".format(id) for id in ftrack_ids_to_delete]
+ )
+ ftrack_ents_to_delete = self.session.query(
+ "select id, link from TypedContext where id in ({})".format(
+ joined_ids_to_delete
+ )
+ ).all()
+ for entity in ftrack_ents_to_delete:
+ self.session.delete(entity)
+ try:
+ self.session.commit()
+ except Exception:
+ ent_path = "/".join(
+ [ent["name"] for ent in entity["link"]]
+ )
+ msg = "Failed to delete entity"
+ report_messages[msg].append(ent_path)
+ self.session.rollback()
+ self.log.warning(
+ "{} <{}>".format(msg, ent_path),
+ exc_info=True
+ )
+
+ if not_deleted_entities_id:
+ joined_not_deleted = ", ".join([
+ "\"{}\"".format(ftrack_id)
+ for ftrack_id in not_deleted_entities_id
+ ])
+ joined_asset_names = ", ".join([
+ "\"{}\"".format(name)
+ for name in asset_names_to_delete
+ ])
+ # Find assets of selected entities with names of checked subsets
+ assets = self.session.query((
+ "select id from Asset where"
+ " context_id in ({}) and name in ({})"
+ ).format(joined_not_deleted, joined_asset_names)).all()
+
+ self.log.debug("{}Deleting Ftrack Assets <{}>".format(
+ ftrack_proc_txt,
+ ", ".join([asset["id"] for asset in assets])
+ ))
+ for asset in assets:
+ self.session.delete(asset)
+ try:
+ self.session.commit()
+ except Exception:
+ self.session.rollback()
+ msg = "Failed to delete asset"
+ report_messages[msg].append(asset["id"])
+ self.log.warning(
+ "{} <{}>".format(asset["id"]),
+ exc_info=True
+ )
+
+ return self.report_handle(report_messages, project_name, event)
+
+ def report_handle(self, report_messages, project_name, event):
+ if not report_messages:
+ return {
+ "success": True,
+ "message": "Deletion was successful!"
+ }
+
+ title = "Delete report ({}):".format(project_name)
+ items = []
+ items.append({
+ "type": "label",
+ "value": "# Deleting was not completely successful"
+ })
+ items.append({
+ "type": "label",
+ "value": "Check logs for more information
"
+ })
+ for msg, _items in report_messages.items():
+ if not _items or not msg:
+ continue
+
+ items.append({
+ "type": "label",
+ "value": "# {}".format(msg)
})
- if av_entity is not None:
- all_ids.append(av_entity['_id'])
- all_ids.extend(self.find_child(av_entity))
+ if isinstance(_items, str):
+ _items = [_items]
+ items.append({
+ "type": "label",
+ "value": '{}
'.format("
".join(_items))
+ })
+ items.append(self.splitter)
- session.delete(entity)
- session.commit()
- else:
- subset_names = []
- for key, value in self.values.items():
- if key == 'delete_key' or value is False:
- continue
-
- entity_id = ObjectId(key)
- av_entity = self.db.find_one({'_id': entity_id})
- subset_names.append(av_entity['name'])
- if av_entity is None:
- continue
- all_ids.append(entity_id)
- all_ids.extend(self.find_child(av_entity))
-
- for ft_asset in entity['assets']:
- if ft_asset['name'] in subset_names:
- session.delete(ft_asset)
- session.commit()
-
- if len(all_ids) == 0:
- return {
- 'success': True,
- 'message': 'No entities to delete in avalon'
- }
-
- delete_query = {'_id': {'$in': all_ids}}
- self.db.delete_many(delete_query)
+ self.show_interface(items, title, event)
return {
- 'success': True,
- 'message': 'All assets were deleted!'
+ "success": False,
+ "message": "Deleting finished. Read report messages."
}
- def find_child(self, entity):
- output = []
- id = entity['_id']
- visuals = [x for x in self.db.find({'data.visualParent': id})]
- assert len(visuals) == 0, 'This asset has another asset as child'
- childs = self.db.find({'parent': id})
- for child in childs:
- output.append(child['_id'])
- output.extend(self.find_child(child))
- return output
-
- def find_assets(self, asset_names):
- assets = []
- for name in asset_names:
- entity = self.db.find_one({
- 'type': 'asset',
- 'name': name
- })
- if entity is not None and entity not in assets:
- assets.append(entity)
- return assets
-
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
- DeleteAsset(session, plugins_presets).register()
-
-
-def main(arguments=None):
- '''Set up logging and register action.'''
- if arguments is None:
- arguments = []
-
- parser = argparse.ArgumentParser()
- # Allow setting of logging level from arguments.
- loggingLevels = {}
- for level in (
- logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
- logging.ERROR, logging.CRITICAL
- ):
- loggingLevels[logging.getLevelName(level).lower()] = level
-
- parser.add_argument(
- '-v', '--verbosity',
- help='Set the logging output verbosity.',
- choices=loggingLevels.keys(),
- default='info'
- )
- namespace = parser.parse_args(arguments)
-
- # Set up basic logging
- logging.basicConfig(level=loggingLevels[namespace.verbosity])
-
- session = ftrack_api.Session()
-
- register(session)
-
- # Wait for events
- logging.info(
- 'Registered actions and listening for events. Use Ctrl-C to abort.'
- )
- session.event_hub.wait()
-
-
-if __name__ == '__main__':
- raise SystemExit(main(sys.argv[1:]))
+ DeleteAssetSubset(session, plugins_presets).register()
diff --git a/pype/ftrack/actions/action_delete_asset_byname.py b/pype/ftrack/actions/action_delete_asset_byname.py
deleted file mode 100644
index c05c135991..0000000000
--- a/pype/ftrack/actions/action_delete_asset_byname.py
+++ /dev/null
@@ -1,175 +0,0 @@
-import os
-import sys
-import logging
-import argparse
-import ftrack_api
-from pype.ftrack import BaseAction
-from pype.ftrack.lib.io_nonsingleton import DbConnector
-
-
-class AssetsRemover(BaseAction):
- '''Edit meta data action.'''
-
- #: Action identifier.
- identifier = 'remove.assets'
- #: Action label.
- label = "Pype Admin"
- variant = '- Delete Assets by Name'
- #: Action description.
- description = 'Removes assets from Ftrack and Avalon db with all childs'
- #: roles that are allowed to register this action
- role_list = ['Pypeclub', 'Administrator']
- icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
- os.environ.get('PYPE_STATICS_SERVER', '')
- )
- #: Db
- db = DbConnector()
-
- def discover(self, session, entities, event):
- ''' Validation '''
- if len(entities) != 1:
- return False
-
- valid = ["show", "task"]
- entityType = event["data"]["selection"][0].get("entityType", "")
- if entityType.lower() not in valid:
- return False
-
- return True
-
- def interface(self, session, entities, event):
- if not event['data'].get('values', {}):
- title = 'Enter Asset names to delete'
-
- items = []
- for i in range(15):
-
- item = {
- 'label': 'Asset {}'.format(i+1),
- 'name': 'asset_{}'.format(i+1),
- 'type': 'text',
- 'value': ''
- }
- items.append(item)
-
- return {
- 'items': items,
- 'title': title
- }
-
- def launch(self, session, entities, event):
- entity = entities[0]
- if entity.entity_type.lower() != 'Project':
- project = entity['project']
- else:
- project = entity
-
- if 'values' not in event['data']:
- return
-
- values = event['data']['values']
- if len(values) <= 0:
- return {
- 'success': True,
- 'message': 'No Assets to delete!'
- }
-
- asset_names = []
-
- for k, v in values.items():
- if v.replace(' ', '') != '':
- asset_names.append(v)
-
- self.db.install()
- self.db.Session['AVALON_PROJECT'] = project["full_name"]
-
- assets = self.find_assets(asset_names)
-
- all_ids = []
- for asset in assets:
- all_ids.append(asset['_id'])
- all_ids.extend(self.find_child(asset))
-
- if len(all_ids) == 0:
- self.db.uninstall()
- return {
- 'success': True,
- 'message': 'None of assets'
- }
-
- delete_query = {'_id': {'$in': all_ids}}
- self.db.delete_many(delete_query)
-
- self.db.uninstall()
- return {
- 'success': True,
- 'message': 'All assets were deleted!'
- }
-
- def find_child(self, entity):
- output = []
- id = entity['_id']
- visuals = [x for x in self.db.find({'data.visualParent': id})]
- assert len(visuals) == 0, 'This asset has another asset as child'
- childs = self.db.find({'parent': id})
- for child in childs:
- output.append(child['_id'])
- output.extend(self.find_child(child))
- return output
-
- def find_assets(self, asset_names):
- assets = []
- for name in asset_names:
- entity = self.db.find_one({
- 'type': 'asset',
- 'name': name
- })
- if entity is not None and entity not in assets:
- assets.append(entity)
- return assets
-
-
-def register(session, plugins_presets={}):
- '''Register plugin. Called when used as an plugin.'''
-
- AssetsRemover(session, plugins_presets).register()
-
-
-def main(arguments=None):
- '''Set up logging and register action.'''
- if arguments is None:
- arguments = []
-
- parser = argparse.ArgumentParser()
- # Allow setting of logging level from arguments.
- loggingLevels = {}
- for level in (
- logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
- logging.ERROR, logging.CRITICAL
- ):
- loggingLevels[logging.getLevelName(level).lower()] = level
-
- parser.add_argument(
- '-v', '--verbosity',
- help='Set the logging output verbosity.',
- choices=loggingLevels.keys(),
- default='info'
- )
- namespace = parser.parse_args(arguments)
-
- # Set up basic logging
- logging.basicConfig(level=loggingLevels[namespace.verbosity])
-
- session = ftrack_api.Session()
-
- register(session)
-
- # Wait for events
- logging.info(
- 'Registered actions and listening for events. Use Ctrl-C to abort.'
- )
- session.event_hub.wait()
-
-
-if __name__ == '__main__':
- raise SystemExit(main(sys.argv[1:]))
diff --git a/pype/ftrack/actions/action_delivery.py b/pype/ftrack/actions/action_delivery.py
new file mode 100644
index 0000000000..afd20d12d1
--- /dev/null
+++ b/pype/ftrack/actions/action_delivery.py
@@ -0,0 +1,538 @@
+import os
+import copy
+import shutil
+import collections
+import string
+
+import clique
+from bson.objectid import ObjectId
+
+from avalon import pipeline
+from avalon.vendor import filelink
+from avalon.tools.libraryloader.io_nonsingleton import DbConnector
+
+from pypeapp import Anatomy
+from pype.ftrack import BaseAction
+from pype.ftrack.lib.avalon_sync import CustAttrIdKey
+
+
+class Delivery(BaseAction):
+ '''Edit meta data action.'''
+
+ #: Action identifier.
+ identifier = "delivery.action"
+ #: Action label.
+ label = "Delivery"
+ #: Action description.
+ description = "Deliver data to client"
+ #: roles that are allowed to register this action
+ role_list = ["Pypeclub", "Administrator", "Project manager"]
+ icon = '{}/ftrack/action_icons/Delivery.svg'.format(
+ os.environ.get('PYPE_STATICS_SERVER', '')
+ )
+
+ db_con = DbConnector()
+
+ def discover(self, session, entities, event):
+ ''' Validation '''
+ for entity in entities:
+ if entity.entity_type.lower() == "assetversion":
+ return True
+
+ return False
+
+ def interface(self, session, entities, event):
+ if event["data"].get("values", {}):
+ return
+
+ title = "Delivery data to Client"
+
+ items = []
+ item_splitter = {"type": "label", "value": "---"}
+
+ # Prepare component names for processing
+ components = None
+ project = None
+ for entity in entities:
+ if project is None:
+ project_id = None
+ for ent_info in entity["link"]:
+ if ent_info["type"].lower() == "project":
+ project_id = ent_info["id"]
+ break
+
+ if project_id is None:
+ project = entity["asset"]["parent"]["project"]
+ else:
+ project = session.query((
+ "select id, full_name from Project where id is \"{}\""
+ ).format(project_id)).one()
+
+ _components = set(
+ [component["name"] for component in entity["components"]]
+ )
+ if components is None:
+ components = _components
+ continue
+
+ components = components.intersection(_components)
+ if not components:
+ break
+
+ project_name = project["full_name"]
+ items.append({
+ "type": "hidden",
+ "name": "__project_name__",
+ "value": project_name
+ })
+
+ # Prpeare anatomy data
+ anatomy = Anatomy(project_name)
+ new_anatomies = []
+ first = None
+ for key in (anatomy.templates.get("delivery") or {}):
+ new_anatomies.append({
+ "label": key,
+ "value": key
+ })
+ if first is None:
+ first = key
+
+ skipped = False
+ # Add message if there are any common components
+ if not components or not new_anatomies:
+ skipped = True
+ items.append({
+ "type": "label",
+ "value": "Something went wrong:
"
+ })
+
+ items.append({
+ "type": "hidden",
+ "name": "__skipped__",
+ "value": skipped
+ })
+
+ if not components:
+ if len(entities) == 1:
+ items.append({
+ "type": "label",
+ "value": (
+ "- Selected entity doesn't have components to deliver."
+ )
+ })
+ else:
+ items.append({
+ "type": "label",
+ "value": (
+ "- Selected entities don't have common components."
+ )
+ })
+
+ # Add message if delivery anatomies are not set
+ if not new_anatomies:
+ items.append({
+ "type": "label",
+ "value": (
+ "- `\"delivery\"` anatomy key is not set in config."
+ )
+ })
+
+ # Skip if there are any data shortcomings
+ if skipped:
+ return {
+ "items": items,
+ "title": title
+ }
+
+ items.append({
+ "value": "Choose Components to deliver
",
+ "type": "label"
+ })
+
+ for component in components:
+ items.append({
+ "type": "boolean",
+ "value": False,
+ "label": component,
+ "name": component
+ })
+
+ items.append(item_splitter)
+
+ items.append({
+ "value": "Location for delivery
",
+ "type": "label"
+ })
+
+ items.append({
+ "type": "label",
+ "value": (
+ "NOTE: It is possible to replace `root` key in anatomy."
+ )
+ })
+
+ items.append({
+ "type": "text",
+ "name": "__location_path__",
+ "empty_text": "Type location path here...(Optional)"
+ })
+
+ items.append(item_splitter)
+
+ items.append({
+ "value": "Anatomy of delivery files
",
+ "type": "label"
+ })
+
+ items.append({
+ "type": "label",
+ "value": (
+ "NOTE: These can be set in Anatomy.yaml"
+ " within `delivery` key.
"
+ )
+ })
+
+ items.append({
+ "type": "enumerator",
+ "name": "__new_anatomies__",
+ "data": new_anatomies,
+ "value": first
+ })
+
+ return {
+ "items": items,
+ "title": title
+ }
+
+ def launch(self, session, entities, event):
+ if "values" not in event["data"]:
+ return
+
+ self.report_items = collections.defaultdict(list)
+
+ values = event["data"]["values"]
+ skipped = values.pop("__skipped__")
+ if skipped:
+ return None
+
+ component_names = []
+ location_path = values.pop("__location_path__")
+ anatomy_name = values.pop("__new_anatomies__")
+ project_name = values.pop("__project_name__")
+
+ for key, value in values.items():
+ if value is True:
+ component_names.append(key)
+
+ if not component_names:
+ return {
+ "success": True,
+ "message": "Not selected components to deliver."
+ }
+
+ location_path = location_path.strip()
+ if location_path:
+ location_path = os.path.normpath(location_path)
+ if not os.path.exists(location_path):
+ return {
+ "success": False,
+ "message": (
+ "Entered location path does not exists. \"{}\""
+ ).format(location_path)
+ }
+
+ self.db_con.install()
+ self.db_con.Session["AVALON_PROJECT"] = project_name
+
+ repres_to_deliver = []
+ for entity in entities:
+ asset = entity["asset"]
+ subset_name = asset["name"]
+ version = entity["version"]
+
+ parent = asset["parent"]
+ parent_mongo_id = parent["custom_attributes"].get(CustAttrIdKey)
+ if parent_mongo_id:
+ parent_mongo_id = ObjectId(parent_mongo_id)
+ else:
+ asset_ent = self.db_con.find_one({
+ "type": "asset",
+ "data.ftrackId": parent["id"]
+ })
+ if not asset_ent:
+ ent_path = "/".join(
+ [ent["name"] for ent in parent["link"]]
+ )
+ msg = "Not synchronized entities to avalon"
+ self.report_items[msg].append(ent_path)
+ self.log.warning("{} <{}>".format(msg, ent_path))
+ continue
+
+ parent_mongo_id = asset_ent["_id"]
+
+ subset_ent = self.db_con.find_one({
+ "type": "subset",
+ "parent": parent_mongo_id,
+ "name": subset_name
+ })
+
+ version_ent = self.db_con.find_one({
+ "type": "version",
+ "name": version,
+ "parent": subset_ent["_id"]
+ })
+
+ repre_ents = self.db_con.find({
+ "type": "representation",
+ "parent": version_ent["_id"]
+ })
+
+ repres_by_name = {}
+ for repre in repre_ents:
+ repre_name = repre["name"]
+ repres_by_name[repre_name] = repre
+
+ for component in entity["components"]:
+ comp_name = component["name"]
+ if comp_name not in component_names:
+ continue
+
+ repre = repres_by_name.get(comp_name)
+ repres_to_deliver.append(repre)
+
+ if not location_path:
+ location_path = os.environ.get("AVALON_PROJECTS") or ""
+
+ print(location_path)
+
+ anatomy = Anatomy(project_name)
+ for repre in repres_to_deliver:
+ # Get destination repre path
+ anatomy_data = copy.deepcopy(repre["context"])
+ anatomy_data["root"] = location_path
+
+ anatomy_filled = anatomy.format(anatomy_data)
+ test_path = (
+ anatomy_filled
+ .get("delivery", {})
+ .get(anatomy_name)
+ )
+
+ if not test_path:
+ msg = (
+ "Missing keys in Representation's context"
+ " for anatomy template \"{}\"."
+ ).format(anatomy_name)
+
+ all_anatomies = anatomy.format_all(anatomy_data)
+ result = None
+ for anatomies in all_anatomies.values():
+ for key, temp in anatomies.get("delivery", {}).items():
+ if key != anatomy_name:
+ continue
+
+ result = temp
+ break
+
+ # TODO log error! - missing keys in anatomy
+ if result:
+ missing_keys = [
+ key[1] for key in string.Formatter().parse(result)
+ if key[1] is not None
+ ]
+ else:
+ missing_keys = ["unknown"]
+
+ keys = ", ".join(missing_keys)
+ sub_msg = (
+ "Representation: {}
- Missing keys: \"{}\"
"
+ ).format(str(repre["_id"]), keys)
+ self.report_items[msg].append(sub_msg)
+ self.log.warning(
+ "{} Representation: \"{}\" Filled: <{}>".format(
+ msg, str(repre["_id"]), str(result)
+ )
+ )
+ continue
+
+ # Get source repre path
+ frame = repre['context'].get('frame')
+
+ if frame:
+ repre["context"]["frame"] = len(str(frame)) * "#"
+
+ repre_path = self.path_from_represenation(repre)
+ # TODO add backup solution where root of path from component
+ # is repalced with AVALON_PROJECTS root
+ if not frame:
+ self.process_single_file(
+ repre_path, anatomy, anatomy_name, anatomy_data
+ )
+
+ else:
+ self.process_sequence(
+ repre_path, anatomy, anatomy_name, anatomy_data
+ )
+
+ self.db_con.uninstall()
+
+ return self.report()
+
+ def process_single_file(
+ self, repre_path, anatomy, anatomy_name, anatomy_data
+ ):
+ anatomy_filled = anatomy.format(anatomy_data)
+ delivery_path = anatomy_filled["delivery"][anatomy_name]
+ delivery_folder = os.path.dirname(delivery_path)
+ if not os.path.exists(delivery_folder):
+ os.makedirs(delivery_folder)
+
+ self.copy_file(repre_path, delivery_path)
+
+ def process_sequence(
+ self, repre_path, anatomy, anatomy_name, anatomy_data
+ ):
+ dir_path, file_name = os.path.split(str(repre_path))
+
+ base_name, ext = os.path.splitext(file_name)
+ file_name_items = None
+ if "#" in base_name:
+ file_name_items = [part for part in base_name.split("#") if part]
+
+ elif "%" in base_name:
+ file_name_items = base_name.split("%")
+
+ if not file_name_items:
+ msg = "Source file was not found"
+ self.report_items[msg].append(repre_path)
+ self.log.warning("{} <{}>".format(msg, repre_path))
+ return
+
+ src_collections, remainder = clique.assemble(os.listdir(dir_path))
+ src_collection = None
+ for col in src_collections:
+ if col.tail != ext:
+ continue
+
+ # skip if collection don't have same basename
+ if not col.head.startswith(file_name_items[0]):
+ continue
+
+ src_collection = col
+ break
+
+ if src_collection is None:
+ # TODO log error!
+ msg = "Source collection of files was not found"
+ self.report_items[msg].append(repre_path)
+ self.log.warning("{} <{}>".format(msg, repre_path))
+ return
+
+ frame_indicator = "@####@"
+
+ anatomy_data["frame"] = frame_indicator
+ anatomy_filled = anatomy.format(anatomy_data)
+
+ delivery_path = anatomy_filled["delivery"][anatomy_name]
+ print(delivery_path)
+ delivery_folder = os.path.dirname(delivery_path)
+ dst_head, dst_tail = delivery_path.split(frame_indicator)
+ dst_padding = src_collection.padding
+ dst_collection = clique.Collection(
+ head=dst_head,
+ tail=dst_tail,
+ padding=dst_padding
+ )
+
+ if not os.path.exists(delivery_folder):
+ os.makedirs(delivery_folder)
+
+ src_head = src_collection.head
+ src_tail = src_collection.tail
+ for index in src_collection.indexes:
+ src_padding = src_collection.format("{padding}") % index
+ src_file_name = "{}{}{}".format(src_head, src_padding, src_tail)
+ src = os.path.normpath(
+ os.path.join(dir_path, src_file_name)
+ )
+
+ dst_padding = dst_collection.format("{padding}") % index
+ dst = "{}{}{}".format(dst_head, dst_padding, dst_tail)
+
+ self.copy_file(src, dst)
+
+ def path_from_represenation(self, representation):
+ try:
+ template = representation["data"]["template"]
+
+ except KeyError:
+ return None
+
+ try:
+ context = representation["context"]
+ context["root"] = os.environ.get("AVALON_PROJECTS") or ""
+ path = pipeline.format_template_with_optional_keys(
+ context, template
+ )
+
+ except KeyError:
+ # Template references unavailable data
+ return None
+
+ return os.path.normpath(path)
+
+ def copy_file(self, src_path, dst_path):
+ if os.path.exists(dst_path):
+ return
+ try:
+ filelink.create(
+ src_path,
+ dst_path,
+ filelink.HARDLINK
+ )
+ except OSError:
+ shutil.copyfile(src_path, dst_path)
+
+ def report(self):
+ items = []
+ title = "Delivery report"
+ for msg, _items in self.report_items.items():
+ if not _items:
+ continue
+
+ if items:
+ items.append({"type": "label", "value": "---"})
+
+ items.append({
+ "type": "label",
+ "value": "# {}".format(msg)
+ })
+ if not isinstance(_items, (list, tuple)):
+ _items = [_items]
+ __items = []
+ for item in _items:
+ __items.append(str(item))
+
+ items.append({
+ "type": "label",
+ "value": '{}
'.format("
".join(__items))
+ })
+
+ if not items:
+ return {
+ "success": True,
+ "message": "Delivery Finished"
+ }
+
+ return {
+ "items": items,
+ "title": title,
+ "success": False,
+ "message": "Delivery Finished"
+ }
+
+def register(session, plugins_presets={}):
+ '''Register plugin. Called when used as an plugin.'''
+
+ Delivery(session, plugins_presets).register()
diff --git a/pype/ftrack/actions/action_seed.py b/pype/ftrack/actions/action_seed.py
index cf0a4b0445..1238e73e72 100644
--- a/pype/ftrack/actions/action_seed.py
+++ b/pype/ftrack/actions/action_seed.py
@@ -9,7 +9,7 @@ class SeedDebugProject(BaseAction):
#: Action identifier.
identifier = "seed.debug.project"
#: Action label.
- label = "SeedDebugProject"
+ label = "Seed Debug Project"
#: Action description.
description = "Description"
#: priority
@@ -265,6 +265,15 @@ class SeedDebugProject(BaseAction):
def create_assets(self, project, asset_count):
self.log.debug("*** Creating assets:")
+ try:
+ asset_count = int(asset_count)
+ except ValueError:
+ asset_count = 0
+
+ if asset_count <= 0:
+ self.log.debug("No assets to create")
+ return
+
main_entity = self.session.create("Folder", {
"name": "Assets",
"parent": project
@@ -305,6 +314,31 @@ class SeedDebugProject(BaseAction):
def create_shots(self, project, seq_count, shots_count):
self.log.debug("*** Creating shots:")
+
+ # Convert counts to integers
+ try:
+ seq_count = int(seq_count)
+ except ValueError:
+ seq_count = 0
+
+ try:
+ shots_count = int(shots_count)
+ except ValueError:
+ shots_count = 0
+
+ # Check if both are higher than 0
+ missing = []
+ if seq_count <= 0:
+ missing.append("sequences")
+
+ if shots_count <= 0:
+ missing.append("shots")
+
+ if missing:
+ self.log.debug("No {} to create".format(" and ".join(missing)))
+ return
+
+ # Create Folder "Shots"
main_entity = self.session.create("Folder", {
"name": "Shots",
"parent": project
diff --git a/pype/ftrack/actions/action_sync_to_avalon.py b/pype/ftrack/actions/action_sync_to_avalon.py
index 01d0b866bf..d2fcfb372f 100644
--- a/pype/ftrack/actions/action_sync_to_avalon.py
+++ b/pype/ftrack/actions/action_sync_to_avalon.py
@@ -70,7 +70,10 @@ class SyncToAvalonLocal(BaseAction):
ft_project_name = in_entities[0]["project"]["full_name"]
try:
- self.entities_factory.launch_setup(ft_project_name)
+ output = self.entities_factory.launch_setup(ft_project_name)
+ if output is not None:
+ return output
+
time_1 = time.time()
self.entities_factory.set_cutom_attributes()
diff --git a/pype/ftrack/actions/action_update_from_v2-2-0.py b/pype/ftrack/actions/action_update_from_v2-2-0.py
index 80b920207a..dd0f1e6ea2 100644
--- a/pype/ftrack/actions/action_update_from_v2-2-0.py
+++ b/pype/ftrack/actions/action_update_from_v2-2-0.py
@@ -1,14 +1,6 @@
import os
-import sys
-import argparse
-import logging
-import collections
-import json
-import re
-import ftrack_api
from pype.ftrack import BaseAction
-from avalon import io, inventory, schema
from pype.ftrack.lib.io_nonsingleton import DbConnector
@@ -134,7 +126,6 @@ class PypeUpdateFromV2_2_0(BaseAction):
"title": title
}
-
def launch(self, session, entities, event):
if 'values' not in event['data']:
return
@@ -182,7 +173,7 @@ class PypeUpdateFromV2_2_0(BaseAction):
{"type": "asset"},
{"$unset": {"silo": ""}}
)
-
+
self.log.debug("- setting schema of assets to v.3")
self.db_con.update_many(
{"type": "asset"},
@@ -191,10 +182,8 @@ class PypeUpdateFromV2_2_0(BaseAction):
return True
+
def register(session, plugins_presets={}):
"""Register plugin. Called when used as an plugin."""
- if not isinstance(session, ftrack_api.session.Session):
- return
-
PypeUpdateFromV2_2_0(session, plugins_presets).register()
diff --git a/pype/ftrack/events/action_sync_to_avalon.py b/pype/ftrack/events/action_sync_to_avalon.py
index 9f9deeab95..79ab1b5f7a 100644
--- a/pype/ftrack/events/action_sync_to_avalon.py
+++ b/pype/ftrack/events/action_sync_to_avalon.py
@@ -105,7 +105,10 @@ class SyncToAvalonServer(BaseAction):
ft_project_name = in_entities[0]["project"]["full_name"]
try:
- self.entities_factory.launch_setup(ft_project_name)
+ output = self.entities_factory.launch_setup(ft_project_name)
+ if output is not None:
+ return output
+
time_1 = time.time()
self.entities_factory.set_cutom_attributes()
diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py
index 606866aba2..23284a2ae6 100644
--- a/pype/ftrack/events/event_sync_to_avalon.py
+++ b/pype/ftrack/events/event_sync_to_avalon.py
@@ -28,7 +28,7 @@ class SyncToAvalonEvent(BaseEvent):
ignore_entTypes = [
"socialfeed", "socialnotification", "note",
"assetversion", "job", "user", "reviewsessionobject", "timer",
- "timelog", "auth_userrole"
+ "timelog", "auth_userrole", "appointment"
]
ignore_ent_types = ["Milestone"]
ignore_keys = ["statusid"]
@@ -131,7 +131,9 @@ class SyncToAvalonEvent(BaseEvent):
ftrack_id = proj["data"]["ftrackId"]
self._avalon_ents_by_ftrack_id[ftrack_id] = proj
for ent in ents:
- ftrack_id = ent["data"]["ftrackId"]
+ ftrack_id = ent["data"].get("ftrackId")
+ if ftrack_id is None:
+ continue
self._avalon_ents_by_ftrack_id[ftrack_id] = ent
return self._avalon_ents_by_ftrack_id
@@ -1427,6 +1429,93 @@ class SyncToAvalonEvent(BaseEvent):
parent_id = ent_info["parentId"]
new_tasks_by_parent[parent_id].append(ent_info)
pop_out_ents.append(ftrack_id)
+ continue
+
+ name = (
+ ent_info
+ .get("changes", {})
+ .get("name", {})
+ .get("new")
+ )
+ avalon_ent_by_name = self.avalon_ents_by_name.get(name)
+ avalon_ent_by_name_ftrack_id = (
+ avalon_ent_by_name
+ .get("data", {})
+ .get("ftrackId")
+ )
+ if avalon_ent_by_name and avalon_ent_by_name_ftrack_id is None:
+ ftrack_ent = self.ftrack_ents_by_id.get(ftrack_id)
+ if not ftrack_ent:
+ ftrack_ent = self.process_session.query(
+ self.entities_query_by_id.format(
+ self.cur_project["id"], ftrack_id
+ )
+ ).one()
+ self.ftrack_ents_by_id[ftrack_id] = ftrack_ent
+
+ ent_path_items = [ent["name"] for ent in ftrack_ent["link"]]
+ parents = ent_path_items[1:len(ent_path_items)-1:]
+
+ avalon_ent_parents = (
+ avalon_ent_by_name.get("data", {}).get("parents")
+ )
+ if parents == avalon_ent_parents:
+ self.dbcon.update_one({
+ "_id": avalon_ent_by_name["_id"]
+ }, {
+ "$set": {
+ "data.ftrackId": ftrack_id,
+ "data.entityType": entity_type
+ }
+ })
+
+ avalon_ent_by_name["data"]["ftrackId"] = ftrack_id
+ avalon_ent_by_name["data"]["entityType"] = entity_type
+
+ self._avalon_ents_by_ftrack_id[ftrack_id] = (
+ avalon_ent_by_name
+ )
+ if self._avalon_ents_by_parent_id:
+ found = None
+ for _parent_id_, _entities_ in (
+ self._avalon_ents_by_parent_id.items()
+ ):
+ for _idx_, entity in enumerate(_entities_):
+ if entity["_id"] == avalon_ent_by_name["_id"]:
+ found = (_parent_id_, _idx_)
+ break
+
+ if found:
+ break
+
+ if found:
+ _parent_id_, _idx_ = found
+ self._avalon_ents_by_parent_id[_parent_id_][
+ _idx_] = avalon_ent_by_name
+
+ if self._avalon_ents_by_id:
+ self._avalon_ents_by_id[avalon_ent_by_name["_id"]] = (
+ avalon_ent_by_name
+ )
+
+ if self._avalon_ents_by_name:
+ self._avalon_ents_by_name[name] = avalon_ent_by_name
+
+ if self._avalon_ents:
+ found = None
+ project, entities = self._avalon_ents
+ for _idx_, _ent_ in enumerate(entities):
+ if _ent_["_id"] != avalon_ent_by_name["_id"]:
+ continue
+ found = _idx_
+ break
+
+ if found is not None:
+ entities[found] = avalon_ent_by_name
+ self._avalon_ents = project, entities
+
+ pop_out_ents.append(ftrack_id)
+ continue
configuration_id = entity_type_conf_ids.get(entity_type)
if not configuration_id:
@@ -1438,9 +1527,11 @@ class SyncToAvalonEvent(BaseEvent):
if attr["entity_type"] != ent_info["entityType"]:
continue
- if ent_info["entityType"] != "show":
- if attr["object_type_id"] != ent_info["objectTypeId"]:
- continue
+ if (
+ ent_info["entityType"] == "task" and
+ attr["object_type_id"] != ent_info["objectTypeId"]
+ ):
+ continue
configuration_id = attr["id"]
entity_type_conf_ids[entity_type] = configuration_id
@@ -1712,7 +1803,8 @@ class SyncToAvalonEvent(BaseEvent):
if ca_ent_type == "show":
cust_attrs_by_obj_id[ca_ent_type][key] = cust_attr
- else:
+
+ elif ca_ent_type == "task":
obj_id = cust_attr["object_type_id"]
cust_attrs_by_obj_id[obj_id][key] = cust_attr
diff --git a/pype/ftrack/events/event_version_to_task_statuses.py b/pype/ftrack/events/event_version_to_task_statuses.py
index 81398373bb..0d2a3130c0 100644
--- a/pype/ftrack/events/event_version_to_task_statuses.py
+++ b/pype/ftrack/events/event_version_to_task_statuses.py
@@ -1,73 +1,134 @@
-import ftrack_api
from pype.ftrack import BaseEvent
+from pypeapp import config
class VersionToTaskStatus(BaseEvent):
+ # Presets usage
+ default_status_mapping = {}
+
def launch(self, session, event):
'''Propagates status from version to task when changed'''
# start of event procedure ----------------------------------
for entity in event['data'].get('entities', []):
- # Filter non-assetversions
- if (
- entity['entityType'] == 'assetversion' and
- 'statusid' in (entity.get('keys') or [])
- ):
+ # Filter AssetVersions
+ if entity["entityType"] != "assetversion":
+ continue
- version = session.get('AssetVersion', entity['entityId'])
- try:
- version_status = session.get(
- 'Status', entity['changes']['statusid']['new']
- )
- except Exception:
+ # Skip if statusid not in keys (in changes)
+ keys = entity.get("keys")
+ if not keys or "statusid" not in keys:
+ continue
+
+ # Get new version task name
+ version_status_id = (
+ entity
+ .get("changes", {})
+ .get("statusid", {})
+ .get("new", {})
+ )
+
+ # Just check that `new` is set to any value
+ if not version_status_id:
+ continue
+
+ try:
+ version_status = session.get("Status", version_status_id)
+ except Exception:
+ self.log.warning(
+ "Troubles with query status id [ {} ]".format(
+ version_status_id
+ ),
+ exc_info=True
+ )
+
+ if not version_status:
+ continue
+
+ version_status_orig = version_status["name"]
+
+ # Load status mapping from presets
+ status_mapping = (
+ config.get_presets()
+ .get("ftrack", {})
+ .get("ftrack_config", {})
+ .get("status_version_to_task")
+ ) or self.default_status_mapping
+
+ # Skip if mapping is empty
+ if not status_mapping:
+ continue
+
+ # Lower version status name and check if has mapping
+ version_status = version_status_orig.lower()
+ new_status_names = []
+ mapped = status_mapping.get(version_status)
+ if mapped:
+ new_status_names.extend(list(mapped))
+
+ new_status_names.append(version_status)
+
+ self.log.debug(
+ "Processing AssetVersion status change: [ {} ]".format(
+ version_status_orig
+ )
+ )
+
+ # Lower all names from presets
+ new_status_names = [name.lower() for name in new_status_names]
+
+ # Get entities necessary for processing
+ version = session.get("AssetVersion", entity["entityId"])
+ task = version.get("task")
+ if not task:
+ continue
+
+ project_schema = task["project"]["project_schema"]
+ # Get all available statuses for Task
+ statuses = project_schema.get_statuses("Task", task["type_id"])
+ # map lowered status name with it's object
+ stat_names_low = {
+ status["name"].lower(): status for status in statuses
+ }
+
+ new_status = None
+ for status_name in new_status_names:
+ if status_name not in stat_names_low:
continue
- task_status = version_status
- task = version['task']
- self.log.info('>>> version status: [ {} ]'.format(
- version_status['name']))
- status_to_set = None
- # Filter to versions with status change to "render complete"
- if version_status['name'].lower() == 'reviewed':
- status_to_set = 'Change requested'
+ # store object of found status
+ new_status = stat_names_low[status_name]
+ self.log.debug("Status to set: [ {} ]".format(
+ new_status["name"]
+ ))
+ break
- if version_status['name'].lower() == 'approved':
- status_to_set = 'Complete'
+ # Skip if status names were not found for paticulat entity
+ if not new_status:
+ self.log.warning(
+ "Any of statuses from presets can be set: {}".format(
+ str(new_status_names)
+ )
+ )
+ continue
- self.log.info(
- '>>> status to set: [ {} ]'.format(status_to_set))
+ # Get full path to task for logging
+ ent_path = "/".join([ent["name"] for ent in task["link"]])
- if status_to_set is not None:
- query = 'Status where name is "{}"'.format(status_to_set)
- try:
- task_status = session.query(query).one()
- except Exception:
- self.log.info(
- '!!! status was not found in Ftrack [ {} ]'.format(
- status_to_set
- ))
- continue
-
- # Proceed if the task status was set
- if task_status is not None:
- # Get path to task
- path = task['name']
- for p in task['ancestors']:
- path = p['name'] + '/' + path
-
- # Setting task status
- try:
- task['status'] = task_status
- session.commit()
- except Exception as e:
- session.rollback()
- self.log.warning('!!! [ {} ] status couldnt be set:\
- [ {} ]'.format(path, e))
- session.rollback()
- else:
- self.log.info('>>> [ {} ] updated to [ {} ]'.format(
- path, task_status['name']))
+ # Setting task status
+ try:
+ task["status"] = new_status
+ session.commit()
+ self.log.debug("[ {} ] Status updated to [ {} ]".format(
+ ent_path, new_status['name']
+ ))
+ except Exception:
+ session.rollback()
+ self.log.warning(
+ "[ {} ]Status couldn't be set".format(ent_path),
+ exc_info=True
+ )
def register(session, plugins_presets):
diff --git a/pype/ftrack/ftrack_server/event_server_cli.py b/pype/ftrack/ftrack_server/event_server_cli.py
index 56a301e8f2..b09b0bc84e 100644
--- a/pype/ftrack/ftrack_server/event_server_cli.py
+++ b/pype/ftrack/ftrack_server/event_server_cli.py
@@ -7,11 +7,9 @@ import socket
import argparse
import atexit
import time
-from urllib.parse import urlparse
import ftrack_api
from pype.ftrack.lib import credentials
-from pype.ftrack.ftrack_server import FtrackServer
from pype.ftrack.ftrack_server.lib import (
ftrack_events_mongo_settings, check_ftrack_url
)
@@ -67,9 +65,8 @@ def validate_credentials(url, user, api):
except Exception as e:
print(
'ERROR: Can\'t log into Ftrack with used credentials:'
- ' Ftrack server: "{}" // Username: {} // API key: {}'.format(
- url, user, api
- ))
+ ' Ftrack server: "{}" // Username: {} // API key: {}'
+ ).format(url, user, api)
return False
print('DEBUG: Credentials Username: "{}", API key: "{}" are valid.'.format(
@@ -147,9 +144,9 @@ def legacy_server(ftrack_url):
).format(str(max_fail_count), str(wait_time_after_max_fail)))
subproc_failed_count += 1
elif ((
- datetime.datetime.now() - subproc_last_failed
- ).seconds > wait_time_after_max_fail):
- subproc_failed_count = 0
+ datetime.datetime.now() - subproc_last_failed
+ ).seconds > wait_time_after_max_fail):
+ subproc_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif subproc.poll() is not None:
@@ -277,9 +274,9 @@ def main_loop(ftrack_url):
).format(str(max_fail_count), str(wait_time_after_max_fail)))
storer_failed_count += 1
elif ((
- datetime.datetime.now() - storer_last_failed
- ).seconds > wait_time_after_max_fail):
- storer_failed_count = 0
+ datetime.datetime.now() - storer_last_failed
+ ).seconds > wait_time_after_max_fail):
+ storer_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not storer_thread.isAlive():
@@ -313,13 +310,13 @@ def main_loop(ftrack_url):
processor_failed_count += 1
elif ((
- datetime.datetime.now() - processor_last_failed
- ).seconds > wait_time_after_max_fail):
- processor_failed_count = 0
+ datetime.datetime.now() - processor_last_failed
+ ).seconds > wait_time_after_max_fail):
+ processor_failed_count = 0
# If thread failed test Ftrack and Mongo connection
elif not processor_thread.isAlive():
- if storer_thread.mongo_error:
+ if processor_thread.mongo_error:
raise Exception(
"Exiting because have issue with acces to MongoDB"
)
diff --git a/pype/ftrack/ftrack_server/lib.py b/pype/ftrack/ftrack_server/lib.py
index 748937c7bd..edd3cee09b 100644
--- a/pype/ftrack/ftrack_server/lib.py
+++ b/pype/ftrack/ftrack_server/lib.py
@@ -1,10 +1,32 @@
import os
+import sys
+import logging
+import getpass
+import atexit
+import tempfile
+import threading
+import datetime
+import time
+import queue
+import pymongo
+
import requests
+import ftrack_api
+import ftrack_api.session
+import ftrack_api.cache
+import ftrack_api.operation
+import ftrack_api._centralized_storage_scenario
+import ftrack_api.event
+from ftrack_api.logging import LazyLogMessage as L
try:
from urllib.parse import urlparse, parse_qs
except ImportError:
from urlparse import urlparse, parse_qs
+from pypeapp import Logger
+
+from pype.ftrack.lib.custom_db_connector import DbConnector
+
def ftrack_events_mongo_settings():
host = None
@@ -49,7 +71,9 @@ def ftrack_events_mongo_settings():
def get_ftrack_event_mongo_info():
- host, port, database, username, password, collection, auth_db = ftrack_events_mongo_settings()
+ host, port, database, username, password, collection, auth_db = (
+ ftrack_events_mongo_settings()
+ )
user_pass = ""
if username and password:
user_pass = "{}:{}@".format(username, password)
@@ -97,3 +121,303 @@ def check_ftrack_url(url, log_errors=True):
print('DEBUG: Ftrack server {} is accessible.'.format(url))
return url
+
+
+class StorerEventHub(ftrack_api.event.hub.EventHub):
+ def __init__(self, *args, **kwargs):
+ self.sock = kwargs.pop("sock")
+ super(StorerEventHub, self).__init__(*args, **kwargs)
+
+ def _handle_packet(self, code, packet_identifier, path, data):
+ """Override `_handle_packet` which extend heartbeat"""
+ code_name = self._code_name_mapping[code]
+ if code_name == "heartbeat":
+ # Reply with heartbeat.
+ self.sock.sendall(b"storer")
+ return self._send_packet(self._code_name_mapping['heartbeat'])
+
+ elif code_name == "connect":
+ event = ftrack_api.event.base.Event(
+ topic="pype.storer.started",
+ data={},
+ source={
+ "id": self.id,
+ "user": {"username": self._api_user}
+ }
+ )
+ self._event_queue.put(event)
+
+ return super(StorerEventHub, self)._handle_packet(
+ code, packet_identifier, path, data
+ )
+
+
+class ProcessEventHub(ftrack_api.event.hub.EventHub):
+ url, database, table_name = get_ftrack_event_mongo_info()
+
+ is_table_created = False
+ pypelog = Logger().get_logger("Session Processor")
+
+ def __init__(self, *args, **kwargs):
+ self.dbcon = DbConnector(
+ mongo_url=self.url,
+ database_name=self.database,
+ table_name=self.table_name
+ )
+ self.sock = kwargs.pop("sock")
+ super(ProcessEventHub, self).__init__(*args, **kwargs)
+
+ def prepare_dbcon(self):
+ try:
+ self.dbcon.install()
+ self.dbcon._database.list_collection_names()
+ except pymongo.errors.AutoReconnect:
+ self.pypelog.error(
+ "Mongo server \"{}\" is not responding, exiting.".format(
+ os.environ["AVALON_MONGO"]
+ )
+ )
+ sys.exit(0)
+
+ except pymongo.errors.OperationFailure:
+ self.pypelog.error((
+ "Error with Mongo access, probably permissions."
+ "Check if exist database with name \"{}\""
+ " and collection \"{}\" inside."
+ ).format(self.database, self.table_name))
+ self.sock.sendall(b"MongoError")
+ sys.exit(0)
+
+ def wait(self, duration=None):
+ """Overriden wait
+
+ Event are loaded from Mongo DB when queue is empty. Handled event is
+ set as processed in Mongo DB.
+ """
+ started = time.time()
+ self.prepare_dbcon()
+ while True:
+ try:
+ event = self._event_queue.get(timeout=0.1)
+ except queue.Empty:
+ if not self.load_events():
+ time.sleep(0.5)
+ else:
+ try:
+ self._handle(event)
+ self.dbcon.update_one(
+ {"id": event["id"]},
+ {"$set": {"pype_data.is_processed": True}}
+ )
+ except pymongo.errors.AutoReconnect:
+ self.pypelog.error((
+ "Mongo server \"{}\" is not responding, exiting."
+ ).format(os.environ["AVALON_MONGO"]))
+ sys.exit(0)
+ # Additional special processing of events.
+ if event['topic'] == 'ftrack.meta.disconnected':
+ break
+
+ if duration is not None:
+ if (time.time() - started) > duration:
+ break
+
+ def load_events(self):
+ """Load not processed events sorted by stored date"""
+ ago_date = datetime.datetime.now() - datetime.timedelta(days=3)
+ result = self.dbcon.delete_many({
+ "pype_data.stored": {"$lte": ago_date},
+ "pype_data.is_processed": True
+ })
+
+ not_processed_events = self.dbcon.find(
+ {"pype_data.is_processed": False}
+ ).sort(
+ [("pype_data.stored", pymongo.ASCENDING)]
+ )
+
+ found = False
+ for event_data in not_processed_events:
+ new_event_data = {
+ k: v for k, v in event_data.items()
+ if k not in ["_id", "pype_data"]
+ }
+ try:
+ event = ftrack_api.event.base.Event(**new_event_data)
+ except Exception:
+ self.logger.exception(L(
+ 'Failed to convert payload into event: {0}',
+ event_data
+ ))
+ continue
+ found = True
+ self._event_queue.put(event)
+
+ return found
+
+ def _handle_packet(self, code, packet_identifier, path, data):
+ """Override `_handle_packet` which skip events and extend heartbeat"""
+ code_name = self._code_name_mapping[code]
+ if code_name == "event":
+ return
+ if code_name == "heartbeat":
+ self.sock.sendall(b"processor")
+ return self._send_packet(self._code_name_mapping["heartbeat"])
+
+ return super()._handle_packet(code, packet_identifier, path, data)
+class SocketSession(ftrack_api.session.Session):
+ '''An isolated session for interaction with an ftrack server.'''
+ def __init__(
+ self, server_url=None, api_key=None, api_user=None, auto_populate=True,
+ plugin_paths=None, cache=None, cache_key_maker=None,
+ auto_connect_event_hub=None, schema_cache_path=None,
+ plugin_arguments=None, sock=None, Eventhub=None
+ ):
+ super(ftrack_api.session.Session, self).__init__()
+ self.logger = logging.getLogger(
+ __name__ + '.' + self.__class__.__name__
+ )
+ self._closed = False
+
+ if server_url is None:
+ server_url = os.environ.get('FTRACK_SERVER')
+
+ if not server_url:
+ raise TypeError(
+ 'Required "server_url" not specified. Pass as argument or set '
+ 'in environment variable FTRACK_SERVER.'
+ )
+
+ self._server_url = server_url
+
+ if api_key is None:
+ api_key = os.environ.get(
+ 'FTRACK_API_KEY',
+ # Backwards compatibility
+ os.environ.get('FTRACK_APIKEY')
+ )
+
+ if not api_key:
+ raise TypeError(
+ 'Required "api_key" not specified. Pass as argument or set in '
+ 'environment variable FTRACK_API_KEY.'
+ )
+
+ self._api_key = api_key
+
+ if api_user is None:
+ api_user = os.environ.get('FTRACK_API_USER')
+ if not api_user:
+ try:
+ api_user = getpass.getuser()
+ except Exception:
+ pass
+
+ if not api_user:
+ raise TypeError(
+ 'Required "api_user" not specified. Pass as argument, set in '
+ 'environment variable FTRACK_API_USER or one of the standard '
+ 'environment variables used by Python\'s getpass module.'
+ )
+
+ self._api_user = api_user
+
+ # Currently pending operations.
+ self.recorded_operations = ftrack_api.operation.Operations()
+ self.record_operations = True
+
+ self.cache_key_maker = cache_key_maker
+ if self.cache_key_maker is None:
+ self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
+
+ # Enforce always having a memory cache at top level so that the same
+ # in-memory instance is returned from session.
+ self.cache = ftrack_api.cache.LayeredCache([
+ ftrack_api.cache.MemoryCache()
+ ])
+
+ if cache is not None:
+ if callable(cache):
+ cache = cache(self)
+
+ if cache is not None:
+ self.cache.caches.append(cache)
+
+ self._managed_request = None
+ self._request = requests.Session()
+ self._request.auth = ftrack_api.session.SessionAuthentication(
+ self._api_key, self._api_user
+ )
+
+ self.auto_populate = auto_populate
+
+ # Fetch server information and in doing so also check credentials.
+ self._server_information = self._fetch_server_information()
+
+ # Now check compatibility of server based on retrieved information.
+ self.check_server_compatibility()
+
+ # Construct event hub and load plugins.
+ if Eventhub is None:
+ Eventhub = ftrack_api.event.hub.EventHub
+ self._event_hub = Eventhub(
+ self._server_url,
+ self._api_user,
+ self._api_key,
+ sock=sock
+ )
+
+ self._auto_connect_event_hub_thread = None
+ if auto_connect_event_hub in (None, True):
+ # Connect to event hub in background thread so as not to block main
+ # session usage waiting for event hub connection.
+ self._auto_connect_event_hub_thread = threading.Thread(
+ target=self._event_hub.connect
+ )
+ self._auto_connect_event_hub_thread.daemon = True
+ self._auto_connect_event_hub_thread.start()
+
+ # To help with migration from auto_connect_event_hub default changing
+ # from True to False.
+ self._event_hub._deprecation_warning_auto_connect = (
+ auto_connect_event_hub is None
+ )
+
+ # Register to auto-close session on exit.
+ atexit.register(self.close)
+
+ self._plugin_paths = plugin_paths
+ if self._plugin_paths is None:
+ self._plugin_paths = os.environ.get(
+ 'FTRACK_EVENT_PLUGIN_PATH', ''
+ ).split(os.pathsep)
+
+ self._discover_plugins(plugin_arguments=plugin_arguments)
+
+ # TODO: Make schemas read-only and non-mutable (or at least without
+ # rebuilding types)?
+ if schema_cache_path is not False:
+ if schema_cache_path is None:
+ schema_cache_path = os.environ.get(
+ 'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir()
+ )
+
+ schema_cache_path = os.path.join(
+ schema_cache_path, 'ftrack_api_schema_cache.json'
+ )
+
+ self.schemas = self._load_schemas(schema_cache_path)
+ self.types = self._build_entity_type_classes(self.schemas)
+
+ ftrack_api._centralized_storage_scenario.register(self)
+
+ self._configure_locations()
+ self.event_hub.publish(
+ ftrack_api.event.base.Event(
+ topic='ftrack.api.session.ready',
+ data=dict(
+ session=self
+ )
+ ),
+ synchronous=True
+ )
diff --git a/pype/ftrack/ftrack_server/session_processor.py b/pype/ftrack/ftrack_server/session_processor.py
deleted file mode 100644
index 133719bab4..0000000000
--- a/pype/ftrack/ftrack_server/session_processor.py
+++ /dev/null
@@ -1,292 +0,0 @@
-import logging
-import os
-import atexit
-import datetime
-import tempfile
-import threading
-import time
-import requests
-import queue
-import pymongo
-
-import ftrack_api
-import ftrack_api.session
-import ftrack_api.cache
-import ftrack_api.operation
-import ftrack_api._centralized_storage_scenario
-import ftrack_api.event
-from ftrack_api.logging import LazyLogMessage as L
-
-from pype.ftrack.lib.custom_db_connector import DbConnector
-from pype.ftrack.ftrack_server.lib import get_ftrack_event_mongo_info
-from pypeapp import Logger
-
-log = Logger().get_logger("Session processor")
-
-
-class ProcessEventHub(ftrack_api.event.hub.EventHub):
- url, database, table_name = get_ftrack_event_mongo_info()
-
- is_table_created = False
-
- def __init__(self, *args, **kwargs):
- self.dbcon = DbConnector(
- mongo_url=self.url,
- database_name=self.database,
- table_name=self.table_name
- )
- self.sock = kwargs.pop("sock")
- super(ProcessEventHub, self).__init__(*args, **kwargs)
-
- def prepare_dbcon(self):
- try:
- self.dbcon.install()
- self.dbcon._database.list_collection_names()
- except pymongo.errors.AutoReconnect:
- log.error("Mongo server \"{}\" is not responding, exiting.".format(
- os.environ["AVALON_MONGO"]
- ))
- sys.exit(0)
-
- except pymongo.errors.OperationFailure:
- log.error((
- "Error with Mongo access, probably permissions."
- "Check if exist database with name \"{}\""
- " and collection \"{}\" inside."
- ).format(self.database, self.table_name))
- self.sock.sendall(b"MongoError")
- sys.exit(0)
-
- def wait(self, duration=None):
- """Overriden wait
-
- Event are loaded from Mongo DB when queue is empty. Handled event is
- set as processed in Mongo DB.
- """
- started = time.time()
- self.prepare_dbcon()
- while True:
- try:
- event = self._event_queue.get(timeout=0.1)
- except queue.Empty:
- if not self.load_events():
- time.sleep(0.5)
- else:
- try:
- self._handle(event)
- self.dbcon.update_one(
- {"id": event["id"]},
- {"$set": {"pype_data.is_processed": True}}
- )
- except pymongo.errors.AutoReconnect:
- log.error((
- "Mongo server \"{}\" is not responding, exiting."
- ).format(os.environ["AVALON_MONGO"]))
- sys.exit(0)
- # Additional special processing of events.
- if event['topic'] == 'ftrack.meta.disconnected':
- break
-
- if duration is not None:
- if (time.time() - started) > duration:
- break
-
- def load_events(self):
- """Load not processed events sorted by stored date"""
- ago_date = datetime.datetime.now() - datetime.timedelta(days=3)
- result = self.dbcon.delete_many({
- "pype_data.stored": {"$lte": ago_date},
- "pype_data.is_processed": True
- })
-
- not_processed_events = self.dbcon.find(
- {"pype_data.is_processed": False}
- ).sort(
- [("pype_data.stored", pymongo.ASCENDING)]
- )
-
- found = False
- for event_data in not_processed_events:
- new_event_data = {
- k: v for k, v in event_data.items()
- if k not in ["_id", "pype_data"]
- }
- try:
- event = ftrack_api.event.base.Event(**new_event_data)
- except Exception:
- self.logger.exception(L(
- 'Failed to convert payload into event: {0}',
- event_data
- ))
- continue
- found = True
- self._event_queue.put(event)
-
- return found
-
- def _handle_packet(self, code, packet_identifier, path, data):
- """Override `_handle_packet` which skip events and extend heartbeat"""
- code_name = self._code_name_mapping[code]
- if code_name == "event":
- return
- if code_name == "heartbeat":
- self.sock.sendall(b"processor")
- return self._send_packet(self._code_name_mapping["heartbeat"])
-
- return super()._handle_packet(code, packet_identifier, path, data)
-
-
-class ProcessSession(ftrack_api.session.Session):
- '''An isolated session for interaction with an ftrack server.'''
- def __init__(
- self, server_url=None, api_key=None, api_user=None, auto_populate=True,
- plugin_paths=None, cache=None, cache_key_maker=None,
- auto_connect_event_hub=None, schema_cache_path=None,
- plugin_arguments=None, sock=None
- ):
- super(ftrack_api.session.Session, self).__init__()
- self.logger = logging.getLogger(
- __name__ + '.' + self.__class__.__name__
- )
- self._closed = False
-
- if server_url is None:
- server_url = os.environ.get('FTRACK_SERVER')
-
- if not server_url:
- raise TypeError(
- 'Required "server_url" not specified. Pass as argument or set '
- 'in environment variable FTRACK_SERVER.'
- )
-
- self._server_url = server_url
-
- if api_key is None:
- api_key = os.environ.get(
- 'FTRACK_API_KEY',
- # Backwards compatibility
- os.environ.get('FTRACK_APIKEY')
- )
-
- if not api_key:
- raise TypeError(
- 'Required "api_key" not specified. Pass as argument or set in '
- 'environment variable FTRACK_API_KEY.'
- )
-
- self._api_key = api_key
-
- if api_user is None:
- api_user = os.environ.get('FTRACK_API_USER')
- if not api_user:
- try:
- api_user = getpass.getuser()
- except Exception:
- pass
-
- if not api_user:
- raise TypeError(
- 'Required "api_user" not specified. Pass as argument, set in '
- 'environment variable FTRACK_API_USER or one of the standard '
- 'environment variables used by Python\'s getpass module.'
- )
-
- self._api_user = api_user
-
- # Currently pending operations.
- self.recorded_operations = ftrack_api.operation.Operations()
- self.record_operations = True
-
- self.cache_key_maker = cache_key_maker
- if self.cache_key_maker is None:
- self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
-
- # Enforce always having a memory cache at top level so that the same
- # in-memory instance is returned from session.
- self.cache = ftrack_api.cache.LayeredCache([
- ftrack_api.cache.MemoryCache()
- ])
-
- if cache is not None:
- if callable(cache):
- cache = cache(self)
-
- if cache is not None:
- self.cache.caches.append(cache)
-
- self._managed_request = None
- self._request = requests.Session()
- self._request.auth = ftrack_api.session.SessionAuthentication(
- self._api_key, self._api_user
- )
-
- self.auto_populate = auto_populate
-
- # Fetch server information and in doing so also check credentials.
- self._server_information = self._fetch_server_information()
-
- # Now check compatibility of server based on retrieved information.
- self.check_server_compatibility()
-
- # Construct event hub and load plugins.
- self._event_hub = ProcessEventHub(
- self._server_url,
- self._api_user,
- self._api_key,
- sock=sock
- )
-
- self._auto_connect_event_hub_thread = None
- if auto_connect_event_hub in (None, True):
- # Connect to event hub in background thread so as not to block main
- # session usage waiting for event hub connection.
- self._auto_connect_event_hub_thread = threading.Thread(
- target=self._event_hub.connect
- )
- self._auto_connect_event_hub_thread.daemon = True
- self._auto_connect_event_hub_thread.start()
-
- # To help with migration from auto_connect_event_hub default changing
- # from True to False.
- self._event_hub._deprecation_warning_auto_connect = (
- auto_connect_event_hub is None
- )
-
- # Register to auto-close session on exit.
- atexit.register(self.close)
-
- self._plugin_paths = plugin_paths
- if self._plugin_paths is None:
- self._plugin_paths = os.environ.get(
- 'FTRACK_EVENT_PLUGIN_PATH', ''
- ).split(os.pathsep)
-
- self._discover_plugins(plugin_arguments=plugin_arguments)
-
- # TODO: Make schemas read-only and non-mutable (or at least without
- # rebuilding types)?
- if schema_cache_path is not False:
- if schema_cache_path is None:
- schema_cache_path = os.environ.get(
- 'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir()
- )
-
- schema_cache_path = os.path.join(
- schema_cache_path, 'ftrack_api_schema_cache.json'
- )
-
- self.schemas = self._load_schemas(schema_cache_path)
- self.types = self._build_entity_type_classes(self.schemas)
-
- ftrack_api._centralized_storage_scenario.register(self)
-
- self._configure_locations()
- self.event_hub.publish(
- ftrack_api.event.base.Event(
- topic='ftrack.api.session.ready',
- data=dict(
- session=self
- )
- ),
- synchronous=True
- )
diff --git a/pype/ftrack/ftrack_server/session_storer.py b/pype/ftrack/ftrack_server/session_storer.py
deleted file mode 100644
index 0b44d7d3a1..0000000000
--- a/pype/ftrack/ftrack_server/session_storer.py
+++ /dev/null
@@ -1,269 +0,0 @@
-import logging
-import os
-import atexit
-import tempfile
-import threading
-import requests
-
-import ftrack_api
-import ftrack_api.session
-import ftrack_api.cache
-import ftrack_api.operation
-import ftrack_api._centralized_storage_scenario
-import ftrack_api.event
-from ftrack_api.logging import LazyLogMessage as L
-
-
-class StorerEventHub(ftrack_api.event.hub.EventHub):
- def __init__(self, *args, **kwargs):
- self.sock = kwargs.pop("sock")
- super(StorerEventHub, self).__init__(*args, **kwargs)
-
- def _handle_packet(self, code, packet_identifier, path, data):
- """Override `_handle_packet` which extend heartbeat"""
- code_name = self._code_name_mapping[code]
- if code_name == "heartbeat":
- # Reply with heartbeat.
- self.sock.sendall(b"storer")
- return self._send_packet(self._code_name_mapping['heartbeat'])
-
- elif code_name == "connect":
- event = ftrack_api.event.base.Event(
- topic="pype.storer.started",
- data={},
- source={
- "id": self.id,
- "user": {"username": self._api_user}
- }
- )
- self._event_queue.put(event)
-
- return super(StorerEventHub, self)._handle_packet(
- code, packet_identifier, path, data
- )
-
-
-class StorerSession(ftrack_api.session.Session):
- '''An isolated session for interaction with an ftrack server.'''
- def __init__(
- self, server_url=None, api_key=None, api_user=None, auto_populate=True,
- plugin_paths=None, cache=None, cache_key_maker=None,
- auto_connect_event_hub=None, schema_cache_path=None,
- plugin_arguments=None, sock=None
- ):
- '''Initialise session.
-
- *server_url* should be the URL of the ftrack server to connect to
- including any port number. If not specified attempt to look up from
- :envvar:`FTRACK_SERVER`.
-
- *api_key* should be the API key to use for authentication whilst
- *api_user* should be the username of the user in ftrack to record
- operations against. If not specified, *api_key* should be retrieved
- from :envvar:`FTRACK_API_KEY` and *api_user* from
- :envvar:`FTRACK_API_USER`.
-
- If *auto_populate* is True (the default), then accessing entity
- attributes will cause them to be automatically fetched from the server
- if they are not already. This flag can be changed on the session
- directly at any time.
-
- *plugin_paths* should be a list of paths to search for plugins. If not
- specified, default to looking up :envvar:`FTRACK_EVENT_PLUGIN_PATH`.
-
- *cache* should be an instance of a cache that fulfils the
- :class:`ftrack_api.cache.Cache` interface and will be used as the cache
- for the session. It can also be a callable that will be called with the
- session instance as sole argument. The callable should return ``None``
- if a suitable cache could not be configured, but session instantiation
- can continue safely.
-
- .. note::
-
- The session will add the specified cache to a pre-configured layered
- cache that specifies the top level cache as a
- :class:`ftrack_api.cache.MemoryCache`. Therefore, it is unnecessary
- to construct a separate memory cache for typical behaviour. Working
- around this behaviour or removing the memory cache can lead to
- unexpected behaviour.
-
- *cache_key_maker* should be an instance of a key maker that fulfils the
- :class:`ftrack_api.cache.KeyMaker` interface and will be used to
- generate keys for objects being stored in the *cache*. If not specified,
- a :class:`~ftrack_api.cache.StringKeyMaker` will be used.
-
- If *auto_connect_event_hub* is True then embedded event hub will be
- automatically connected to the event server and allow for publishing and
- subscribing to **non-local** events. If False, then only publishing and
- subscribing to **local** events will be possible until the hub is
- manually connected using :meth:`EventHub.connect
- `.
-
- .. note::
-
- The event hub connection is performed in a background thread to
- improve session startup time. If a registered plugin requires a
- connected event hub then it should check the event hub connection
- status explicitly. Subscribing to events does *not* require a
- connected event hub.
-
- Enable schema caching by setting *schema_cache_path* to a folder path.
- If not set, :envvar:`FTRACK_API_SCHEMA_CACHE_PATH` will be used to
- determine the path to store cache in. If the environment variable is
- also not specified then a temporary directory will be used. Set to
- `False` to disable schema caching entirely.
-
- *plugin_arguments* should be an optional mapping (dict) of keyword
- arguments to pass to plugin register functions upon discovery. If a
- discovered plugin has a signature that is incompatible with the passed
- arguments, the discovery mechanism will attempt to reduce the passed
- arguments to only those that the plugin accepts. Note that a warning
- will be logged in this case.
-
- '''
- super(ftrack_api.session.Session, self).__init__()
- self.logger = logging.getLogger(
- __name__ + '.' + self.__class__.__name__
- )
- self._closed = False
-
- if server_url is None:
- server_url = os.environ.get('FTRACK_SERVER')
-
- if not server_url:
- raise TypeError(
- 'Required "server_url" not specified. Pass as argument or set '
- 'in environment variable FTRACK_SERVER.'
- )
-
- self._server_url = server_url
-
- if api_key is None:
- api_key = os.environ.get(
- 'FTRACK_API_KEY',
- # Backwards compatibility
- os.environ.get('FTRACK_APIKEY')
- )
-
- if not api_key:
- raise TypeError(
- 'Required "api_key" not specified. Pass as argument or set in '
- 'environment variable FTRACK_API_KEY.'
- )
-
- self._api_key = api_key
-
- if api_user is None:
- api_user = os.environ.get('FTRACK_API_USER')
- if not api_user:
- try:
- api_user = getpass.getuser()
- except Exception:
- pass
-
- if not api_user:
- raise TypeError(
- 'Required "api_user" not specified. Pass as argument, set in '
- 'environment variable FTRACK_API_USER or one of the standard '
- 'environment variables used by Python\'s getpass module.'
- )
-
- self._api_user = api_user
-
- # Currently pending operations.
- self.recorded_operations = ftrack_api.operation.Operations()
- self.record_operations = True
-
- self.cache_key_maker = cache_key_maker
- if self.cache_key_maker is None:
- self.cache_key_maker = ftrack_api.cache.StringKeyMaker()
-
- # Enforce always having a memory cache at top level so that the same
- # in-memory instance is returned from session.
- self.cache = ftrack_api.cache.LayeredCache([
- ftrack_api.cache.MemoryCache()
- ])
-
- if cache is not None:
- if callable(cache):
- cache = cache(self)
-
- if cache is not None:
- self.cache.caches.append(cache)
-
- self._managed_request = None
- self._request = requests.Session()
- self._request.auth = ftrack_api.session.SessionAuthentication(
- self._api_key, self._api_user
- )
-
- self.auto_populate = auto_populate
-
- # Fetch server information and in doing so also check credentials.
- self._server_information = self._fetch_server_information()
-
- # Now check compatibility of server based on retrieved information.
- self.check_server_compatibility()
-
- # Construct event hub and load plugins.
- self._event_hub = StorerEventHub(
- self._server_url,
- self._api_user,
- self._api_key,
- sock=sock
- )
-
- self._auto_connect_event_hub_thread = None
- if auto_connect_event_hub in (None, True):
- # Connect to event hub in background thread so as not to block main
- # session usage waiting for event hub connection.
- self._auto_connect_event_hub_thread = threading.Thread(
- target=self._event_hub.connect
- )
- self._auto_connect_event_hub_thread.daemon = True
- self._auto_connect_event_hub_thread.start()
-
- # To help with migration from auto_connect_event_hub default changing
- # from True to False.
- self._event_hub._deprecation_warning_auto_connect = (
- auto_connect_event_hub is None
- )
-
- # Register to auto-close session on exit.
- atexit.register(self.close)
-
- self._plugin_paths = plugin_paths
- if self._plugin_paths is None:
- self._plugin_paths = os.environ.get(
- 'FTRACK_EVENT_PLUGIN_PATH', ''
- ).split(os.pathsep)
-
- self._discover_plugins(plugin_arguments=plugin_arguments)
-
- # TODO: Make schemas read-only and non-mutable (or at least without
- # rebuilding types)?
- if schema_cache_path is not False:
- if schema_cache_path is None:
- schema_cache_path = os.environ.get(
- 'FTRACK_API_SCHEMA_CACHE_PATH', tempfile.gettempdir()
- )
-
- schema_cache_path = os.path.join(
- schema_cache_path, 'ftrack_api_schema_cache.json'
- )
-
- self.schemas = self._load_schemas(schema_cache_path)
- self.types = self._build_entity_type_classes(self.schemas)
-
- ftrack_api._centralized_storage_scenario.register(self)
-
- self._configure_locations()
- self.event_hub.publish(
- ftrack_api.event.base.Event(
- topic='ftrack.api.session.ready',
- data=dict(
- session=self
- )
- ),
- synchronous=True
- )
diff --git a/pype/ftrack/ftrack_server/socket_thread.py b/pype/ftrack/ftrack_server/socket_thread.py
index d0a2868743..3309f75cd7 100644
--- a/pype/ftrack/ftrack_server/socket_thread.py
+++ b/pype/ftrack/ftrack_server/socket_thread.py
@@ -1,7 +1,5 @@
import os
-import sys
import time
-import signal
import socket
import threading
import subprocess
@@ -10,7 +8,9 @@ from pypeapp import Logger
class SocketThread(threading.Thread):
"""Thread that checks suprocess of storer of processor of events"""
+
MAX_TIMEOUT = 35
+
def __init__(self, name, port, filepath):
super(SocketThread, self).__init__()
self.log = Logger().get_logger("SocketThread", "Event Thread")
diff --git a/pype/ftrack/ftrack_server/sub_event_processor.py b/pype/ftrack/ftrack_server/sub_event_processor.py
index 6ada787223..9c971ca916 100644
--- a/pype/ftrack/ftrack_server/sub_event_processor.py
+++ b/pype/ftrack/ftrack_server/sub_event_processor.py
@@ -1,12 +1,9 @@
-import os
import sys
-import datetime
import signal
import socket
-import pymongo
from ftrack_server import FtrackServer
-from pype.ftrack.ftrack_server.session_processor import ProcessSession
+from pype.ftrack.ftrack_server.lib import SocketSession, ProcessEventHub
from pypeapp import Logger
log = Logger().get_logger("Event processor")
@@ -24,12 +21,14 @@ def main(args):
sock.sendall(b"CreatedProcess")
try:
- session = ProcessSession(auto_connect_event_hub=True, sock=sock)
- server = FtrackServer('event')
+ session = SocketSession(
+ auto_connect_event_hub=True, sock=sock, Eventhub=ProcessEventHub
+ )
+ server = FtrackServer("event")
log.debug("Launched Ftrack Event processor")
server.run_server(session)
- except Exception as exc:
+ except Exception:
log.error("Event server crashed. See traceback below", exc_info=True)
finally:
diff --git a/pype/ftrack/ftrack_server/sub_event_storer.py b/pype/ftrack/ftrack_server/sub_event_storer.py
index 4828b10bfa..dfe8e21654 100644
--- a/pype/ftrack/ftrack_server/sub_event_storer.py
+++ b/pype/ftrack/ftrack_server/sub_event_storer.py
@@ -7,22 +7,22 @@ import pymongo
import ftrack_api
from ftrack_server import FtrackServer
-from pype.ftrack.ftrack_server.lib import get_ftrack_event_mongo_info
+from pype.ftrack.ftrack_server.lib import (
+ get_ftrack_event_mongo_info,
+ SocketSession,
+ StorerEventHub
+)
from pype.ftrack.lib.custom_db_connector import DbConnector
-from session_storer import StorerSession
from pypeapp import Logger
log = Logger().get_logger("Event storer")
+
+class SessionFactory:
+ session = None
+
+
url, database, table_name = get_ftrack_event_mongo_info()
-
-
-class SessionClass:
- def __init__(self):
- self.session = None
-
-
-session_obj = SessionClass()
dbcon = DbConnector(
mongo_url=url,
database_name=database,
@@ -75,7 +75,11 @@ def launch(event):
def trigger_sync(event):
- session = session_obj.session
+ session = SessionFactory.session
+ source_id = event.get("source", {}).get("id")
+ if not source_id or source_id != session.event_hub.id:
+ return
+
if session is None:
log.warning("Session is not set. Can't trigger Sync to avalon action.")
return True
@@ -93,7 +97,7 @@ def trigger_sync(event):
"$set": {"pype_data.is_processed": True}
}
dbcon.update_many(query, set_dict)
-
+
selections = []
for project in projects:
if project["status"] != "active":
@@ -154,8 +158,10 @@ def main(args):
sock.sendall(b"CreatedStore")
try:
- session = StorerSession(auto_connect_event_hub=True, sock=sock)
- session_obj.session = session
+ session = SocketSession(
+ auto_connect_event_hub=True, sock=sock, Eventhub=StorerEventHub
+ )
+ SessionFactory.session = session
register(session)
server = FtrackServer("event")
log.debug("Launched Ftrack Event storer")
diff --git a/pype/ftrack/ftrack_server/sub_legacy_server.py b/pype/ftrack/ftrack_server/sub_legacy_server.py
index 31f38d0404..8b7bab5e2e 100644
--- a/pype/ftrack/ftrack_server/sub_legacy_server.py
+++ b/pype/ftrack/ftrack_server/sub_legacy_server.py
@@ -1,4 +1,3 @@
-import os
import sys
import time
import datetime
@@ -7,7 +6,6 @@ import threading
from ftrack_server import FtrackServer
import ftrack_api
-from ftrack_api.event.hub import EventHub
from pypeapp import Logger
log = Logger().get_logger("Event Server Legacy")
@@ -37,7 +35,10 @@ class TimerChecker(threading.Thread):
if not self.session.event_hub.connected:
if not connected:
- if (datetime.datetime.now() - start).seconds > self.max_time_out:
+ if (
+ (datetime.datetime.now() - start).seconds >
+ self.max_time_out
+ ):
log.error((
"Exiting event server. Session was not connected"
" to ftrack server in {} seconds."
@@ -61,7 +62,7 @@ class TimerChecker(threading.Thread):
def main(args):
check_thread = None
try:
- server = FtrackServer('event')
+ server = FtrackServer("event")
session = ftrack_api.Session(auto_connect_event_hub=True)
check_thread = TimerChecker(server, session)
diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py
index 064ea1adb8..8cebd12a59 100644
--- a/pype/ftrack/lib/avalon_sync.py
+++ b/pype/ftrack/lib/avalon_sync.py
@@ -314,6 +314,9 @@ class SyncEntitiesFactory:
self.log.warning(msg)
return {"success": False, "message": msg}
+ self.log.debug((
+ "*** Synchronization initialization started <{}>."
+ ).format(project_full_name))
# Check if `avalon_mongo_id` custom attribute exist or is accessible
if CustAttrIdKey not in ft_project["custom_attributes"]:
items = []
@@ -699,7 +702,7 @@ class SyncEntitiesFactory:
if ca_ent_type == "show":
avalon_attrs[ca_ent_type][key] = cust_attr["default"]
avalon_attrs_ca_id[ca_ent_type][key] = cust_attr["id"]
- else:
+ elif ca_ent_type == "task":
obj_id = cust_attr["object_type_id"]
avalon_attrs[obj_id][key] = cust_attr["default"]
avalon_attrs_ca_id[obj_id][key] = cust_attr["id"]
@@ -708,7 +711,7 @@ class SyncEntitiesFactory:
if ca_ent_type == "show":
attrs_per_entity_type[ca_ent_type][key] = cust_attr["default"]
attrs_per_entity_type_ca_id[ca_ent_type][key] = cust_attr["id"]
- else:
+ elif ca_ent_type == "task":
obj_id = cust_attr["object_type_id"]
attrs_per_entity_type[obj_id][key] = cust_attr["default"]
attrs_per_entity_type_ca_id[obj_id][key] = cust_attr["id"]
diff --git a/pype/ftrack/lib/ftrack_base_handler.py b/pype/ftrack/lib/ftrack_base_handler.py
index 4b57452961..8329505ffb 100644
--- a/pype/ftrack/lib/ftrack_base_handler.py
+++ b/pype/ftrack/lib/ftrack_base_handler.py
@@ -2,7 +2,7 @@ import functools
import time
from pypeapp import Logger
import ftrack_api
-from pype.ftrack.ftrack_server import session_processor
+from pype.ftrack.ftrack_server.lib import SocketSession
class MissingPermision(Exception):
@@ -41,7 +41,7 @@ class BaseHandler(object):
self.log = Logger().get_logger(self.__class__.__name__)
if not(
isinstance(session, ftrack_api.session.Session) or
- isinstance(session, session_processor.ProcessSession)
+ isinstance(session, SocketSession)
):
raise Exception((
"Session object entered with args is instance of \"{}\""
diff --git a/pype/lib.py b/pype/lib.py
index cb238459d1..f26395d930 100644
--- a/pype/lib.py
+++ b/pype/lib.py
@@ -1,14 +1,12 @@
import os
import re
import logging
-import importlib
import itertools
import contextlib
import subprocess
import inspect
-
-import avalon.io as io
+from avalon import io
import avalon.api
import avalon
@@ -16,21 +14,38 @@ log = logging.getLogger(__name__)
# Special naming case for subprocess since its a built-in method.
-def _subprocess(args):
+def _subprocess(*args, **kwargs):
"""Convenience method for getting output errors for subprocess."""
- proc = subprocess.Popen(
- args,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- stdin=subprocess.PIPE,
- env=os.environ
- )
+ # make sure environment contains only strings
+ if not kwargs.get("env"):
+ filtered_env = {k: str(v) for k, v in os.environ.items()}
+ else:
+ filtered_env = {k: str(v) for k, v in kwargs.get("env").items()}
- output = proc.communicate()[0]
+ # set overrides
+ kwargs['stdout'] = kwargs.get('stdout', subprocess.PIPE)
+ kwargs['stderr'] = kwargs.get('stderr', subprocess.STDOUT)
+ kwargs['stdin'] = kwargs.get('stdin', subprocess.PIPE)
+ kwargs['env'] = filtered_env
+
+ proc = subprocess.Popen(*args, **kwargs)
+
+ output, error = proc.communicate()
+
+ if output:
+ output = output.decode("utf-8")
+ output += "\n"
+ for line in output.strip().split("\n"):
+ log.info(line)
+
+ if error:
+ error = error.decode("utf-8")
+ error += "\n"
+ for line in error.strip().split("\n"):
+ log.error(line)
if proc.returncode != 0:
- log.error(output)
raise ValueError("\"{}\" was not successful: {}".format(args, output))
return output
diff --git a/pype/logging/gui/app.py b/pype/logging/gui/app.py
index 7cee280158..9767077f80 100644
--- a/pype/logging/gui/app.py
+++ b/pype/logging/gui/app.py
@@ -33,5 +33,7 @@ class LogsWindow(QtWidgets.QWidget):
def on_selection_changed(self):
index = self.logs_widget.selected_log()
+ if not index or not index.isValid():
+ return
node = index.data(self.logs_widget.model.NodeRole)
self.log_detail.set_detail(node)
diff --git a/pype/logging/gui/widgets.py b/pype/logging/gui/widgets.py
index 66692c2c65..10aad3c282 100644
--- a/pype/logging/gui/widgets.py
+++ b/pype/logging/gui/widgets.py
@@ -1,11 +1,7 @@
-import datetime
-import inspect
+import getpass
from Qt import QtCore, QtWidgets, QtGui
-from PyQt5.QtCore import QVariant
from .models import LogModel
-from .lib import preserve_states
-
class SearchComboBox(QtWidgets.QComboBox):
"""Searchable ComboBox with empty placeholder value as first value"""
@@ -53,6 +49,7 @@ class SearchComboBox(QtWidgets.QComboBox):
return text
+
class CheckableComboBox2(QtWidgets.QComboBox):
def __init__(self, parent=None):
super(CheckableComboBox, self).__init__(parent)
@@ -96,9 +93,11 @@ class SelectableMenu(QtWidgets.QMenu):
else:
super(SelectableMenu, self).mouseReleaseEvent(event)
+
class CustomCombo(QtWidgets.QWidget):
selection_changed = QtCore.Signal()
+ checked_changed = QtCore.Signal(bool)
def __init__(self, title, parent=None):
super(CustomCombo, self).__init__(parent)
@@ -127,12 +126,27 @@ class CustomCombo(QtWidgets.QWidget):
self.toolmenu.clear()
self.addItems(items)
+ def select_items(self, items, ignore_input=False):
+ if not isinstance(items, list):
+ items = [items]
+
+ for action in self.toolmenu.actions():
+ check = True
+ if (
+ action.text() in items and ignore_input or
+ action.text() not in items and not ignore_input
+ ):
+ check = False
+
+ action.setChecked(check)
+
def addItems(self, items):
for item in items:
action = self.toolmenu.addAction(item)
action.setCheckable(True)
- action.setChecked(True)
self.toolmenu.addAction(action)
+ action.setChecked(True)
+ action.triggered.connect(self.checked_changed)
def items(self):
for action in self.toolmenu.actions():
@@ -186,15 +200,42 @@ class CheckableComboBox(QtWidgets.QComboBox):
for text, checked in items:
text_item = QtGui.QStandardItem(text)
checked_item = QtGui.QStandardItem()
- checked_item.setData(QVariant(checked), QtCore.Qt.CheckStateRole)
+ checked_item.setData(
+ QtCore.QVariant(checked), QtCore.Qt.CheckStateRole
+ )
self.model.appendRow([text_item, checked_item])
+class FilterLogModel(QtCore.QSortFilterProxyModel):
+ sub_dict = ["$gt", "$lt", "$not"]
+ def __init__(self, key_values, parent=None):
+ super(FilterLogModel, self).__init__(parent)
+ self.allowed_key_values = key_values
+
+ def filterAcceptsRow(self, row, parent):
+ """
+ Reimplemented from base class.
+ """
+ model = self.sourceModel()
+ for key, values in self.allowed_key_values.items():
+ col_indx = model.COLUMNS.index(key)
+ value = model.index(row, col_indx, parent).data(
+ QtCore.Qt.DisplayRole
+ )
+ if value not in values:
+ return False
+ return True
+
+
class LogsWidget(QtWidgets.QWidget):
"""A widget that lists the published subsets for an asset"""
active_changed = QtCore.Signal()
+ _level_order = [
+ "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"
+ ]
+
def __init__(self, parent=None):
super(LogsWidget, self).__init__(parent=parent)
@@ -202,47 +243,45 @@ class LogsWidget(QtWidgets.QWidget):
filter_layout = QtWidgets.QHBoxLayout()
- # user_filter = SearchComboBox(self, "Users")
user_filter = CustomCombo("Users", self)
users = model.dbcon.distinct("user")
user_filter.populate(users)
- user_filter.selection_changed.connect(self.user_changed)
+ user_filter.checked_changed.connect(self.user_changed)
+ user_filter.select_items(getpass.getuser())
level_filter = CustomCombo("Levels", self)
- # levels = [(level, True) for level in model.dbcon.distinct("level")]
levels = model.dbcon.distinct("level")
- level_filter.addItems(levels)
+ _levels = []
+ for level in self._level_order:
+ if level in levels:
+ _levels.append(level)
+ level_filter.populate(_levels)
+ level_filter.checked_changed.connect(self.level_changed)
- date_from_label = QtWidgets.QLabel("From:")
- date_filter_from = QtWidgets.QDateTimeEdit()
-
- date_from_layout = QtWidgets.QVBoxLayout()
- date_from_layout.addWidget(date_from_label)
- date_from_layout.addWidget(date_filter_from)
-
- # now = datetime.datetime.now()
- # QtCore.QDateTime(now.year, now.month, now.day, now.hour, now.minute, second = 0, msec = 0, timeSpec = 0)
- date_to_label = QtWidgets.QLabel("To:")
- date_filter_to = QtWidgets.QDateTimeEdit()
-
- date_to_layout = QtWidgets.QVBoxLayout()
- date_to_layout.addWidget(date_to_label)
- date_to_layout.addWidget(date_filter_to)
+ # date_from_label = QtWidgets.QLabel("From:")
+ # date_filter_from = QtWidgets.QDateTimeEdit()
+ #
+ # date_from_layout = QtWidgets.QVBoxLayout()
+ # date_from_layout.addWidget(date_from_label)
+ # date_from_layout.addWidget(date_filter_from)
+ #
+ # date_to_label = QtWidgets.QLabel("To:")
+ # date_filter_to = QtWidgets.QDateTimeEdit()
+ #
+ # date_to_layout = QtWidgets.QVBoxLayout()
+ # date_to_layout.addWidget(date_to_label)
+ # date_to_layout.addWidget(date_filter_to)
filter_layout.addWidget(user_filter)
filter_layout.addWidget(level_filter)
+ filter_layout.setAlignment(QtCore.Qt.AlignLeft)
- filter_layout.addLayout(date_from_layout)
- filter_layout.addLayout(date_to_layout)
+ # filter_layout.addLayout(date_from_layout)
+ # filter_layout.addLayout(date_to_layout)
view = QtWidgets.QTreeView(self)
view.setAllColumnsShowFocus(True)
- # # Set view delegates
- # time_delegate = PrettyTimeDelegate()
- # column = model.COLUMNS.index("time")
- # view.setItemDelegateForColumn(column, time_delegate)
-
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addLayout(filter_layout)
@@ -255,34 +294,54 @@ class LogsWidget(QtWidgets.QWidget):
QtCore.Qt.AscendingOrder
)
- view.setModel(model)
+ key_val = {
+ "user": users,
+ "level": levels
+ }
+ proxy_model = FilterLogModel(key_val, view)
+ proxy_model.setSourceModel(model)
+ view.setModel(proxy_model)
view.customContextMenuRequested.connect(self.on_context_menu)
view.selectionModel().selectionChanged.connect(self.active_changed)
- # user_filter.connect()
- # TODO remove if nothing will affect...
- # header = self.view.header()
+ # WARNING this is cool but slows down widget a lot
+ # header = view.header()
# # Enforce the columns to fit the data (purely cosmetic)
# if Qt.__binding__ in ("PySide2", "PyQt5"):
# header.setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
# else:
# header.setResizeMode(QtWidgets.QHeaderView.ResizeToContents)
- # Set signals
-
# prepare
model.refresh()
# Store to memory
self.model = model
+ self.proxy_model = proxy_model
self.view = view
self.user_filter = user_filter
+ self.level_filter = level_filter
def user_changed(self):
+ valid_actions = []
for action in self.user_filter.items():
- print(action)
+ if action.isChecked():
+ valid_actions.append(action.text())
+
+ self.proxy_model.allowed_key_values["user"] = valid_actions
+ self.proxy_model.invalidate()
+
+ def level_changed(self):
+ valid_actions = []
+ for action in self.level_filter.items():
+ if action.isChecked():
+ valid_actions.append(action.text())
+
+ self.proxy_model.allowed_key_values["level"] = valid_actions
+ self.proxy_model.invalidate()
+
def on_context_menu(self, point):
# TODO will be any actions? it's ready
diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py
index b7dbf69510..141cf4c13d 100644
--- a/pype/nuke/__init__.py
+++ b/pype/nuke/__init__.py
@@ -112,7 +112,9 @@ def install():
# Disable all families except for the ones we explicitly want to see
family_states = [
"write",
- "review"
+ "review",
+ "nukenodes"
+ "gizmo"
]
avalon.data["familiesStateDefault"] = False
diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py
index 157af9019d..70dd0ff80f 100644
--- a/pype/nuke/lib.py
+++ b/pype/nuke/lib.py
@@ -6,6 +6,7 @@ from collections import OrderedDict
from avalon import api, io, lib
import avalon.nuke
+from avalon.nuke import lib as anlib
import pype.api as pype
import nuke
@@ -105,6 +106,10 @@ def writes_version_sync():
for each in nuke.allNodes():
if each.Class() == 'Write':
+ # check if the node is avalon tracked
+ if "AvalonTab" not in each.knobs():
+ continue
+
avalon_knob_data = avalon.nuke.get_avalon_knob_data(
each, ['avalon:', 'ak:'])
@@ -702,9 +707,11 @@ class WorkfileSettings(object):
frame_start = int(data["frameStart"]) - handle_start
frame_end = int(data["frameEnd"]) + handle_end
+ self._root_node["lock_range"].setValue(False)
self._root_node["fps"].setValue(fps)
self._root_node["first_frame"].setValue(frame_start)
self._root_node["last_frame"].setValue(frame_end)
+ self._root_node["lock_range"].setValue(True)
# setting active viewers
try:
@@ -1190,3 +1197,451 @@ class BuildWorkfile(WorkfileSettings):
def position_up(self, multiply=1):
self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
+
+
+class ExporterReview:
+ """
+ Base class object for generating review data from Nuke
+
+ Args:
+ klass (pyblish.plugin): pyblish plugin parent
+ instance (pyblish.instance): instance of pyblish context
+
+ """
+ _temp_nodes = []
+ data = dict({
+ "representations": list()
+ })
+
+ def __init__(self,
+ klass,
+ instance
+ ):
+
+ self.log = klass.log
+ self.instance = instance
+ self.path_in = self.instance.data.get("path", None)
+ self.staging_dir = self.instance.data["stagingDir"]
+ self.collection = self.instance.data.get("collection", None)
+
+ def get_file_info(self):
+ if self.collection:
+ self.log.debug("Collection: `{}`".format(self.collection))
+ # get path
+ self.fname = os.path.basename(self.collection.format(
+ "{head}{padding}{tail}"))
+ self.fhead = self.collection.format("{head}")
+
+ # get first and last frame
+ self.first_frame = min(self.collection.indexes)
+ self.last_frame = max(self.collection.indexes)
+ else:
+ self.fname = os.path.basename(self.path_in)
+ self.fhead = os.path.splitext(self.fname)[0] + "."
+ self.first_frame = self.instance.data.get("frameStart", None)
+ self.last_frame = self.instance.data.get("frameEnd", None)
+
+ if "#" in self.fhead:
+ self.fhead = self.fhead.replace("#", "")[:-1]
+
+ def get_representation_data(self, tags=None, range=False):
+ add_tags = []
+ if tags:
+ add_tags = tags
+
+ repre = {
+ 'name': self.name,
+ 'ext': self.ext,
+ 'files': self.file,
+ "stagingDir": self.staging_dir,
+ "anatomy_template": "publish",
+ "tags": [self.name.replace("_", "-")] + add_tags
+ }
+
+ if range:
+ repre.update({
+ "frameStart": self.first_frame,
+ "frameEnd": self.last_frame,
+ })
+
+ self.data["representations"].append(repre)
+
+ def get_view_process_node(self):
+ """
+ Will get any active view process.
+
+ Arguments:
+ self (class): in object definition
+
+ Returns:
+ nuke.Node: copy node of Input Process node
+ """
+ anlib.reset_selection()
+ ipn_orig = None
+ for v in [n for n in nuke.allNodes()
+ if "Viewer" in n.Class()]:
+ ip = v['input_process'].getValue()
+ ipn = v['input_process_node'].getValue()
+ if "VIEWER_INPUT" not in ipn and ip:
+ ipn_orig = nuke.toNode(ipn)
+ ipn_orig.setSelected(True)
+
+ if ipn_orig:
+ # copy selected to clipboard
+ nuke.nodeCopy('%clipboard%')
+ # reset selection
+ anlib.reset_selection()
+ # paste node and selection is on it only
+ nuke.nodePaste('%clipboard%')
+ # assign to variable
+ ipn = nuke.selectedNode()
+
+ return ipn
+
+ def clean_nodes(self):
+ for node in self._temp_nodes:
+ nuke.delete(node)
+ self.log.info("Deleted nodes...")
+
+
+class ExporterReviewLut(ExporterReview):
+ """
+ Generator object for review lut from Nuke
+
+ Args:
+ klass (pyblish.plugin): pyblish plugin parent
+ instance (pyblish.instance): instance of pyblish context
+
+
+ """
+ def __init__(self,
+ klass,
+ instance,
+ name=None,
+ ext=None,
+ cube_size=None,
+ lut_size=None,
+ lut_style=None):
+ # initialize parent class
+ ExporterReview.__init__(self, klass, instance)
+
+ # deal with now lut defined in viewer lut
+ if hasattr(klass, "viewer_lut_raw"):
+ self.viewer_lut_raw = klass.viewer_lut_raw
+ else:
+ self.viewer_lut_raw = False
+
+ self.name = name or "baked_lut"
+ self.ext = ext or "cube"
+ self.cube_size = cube_size or 32
+ self.lut_size = lut_size or 1024
+ self.lut_style = lut_style or "linear"
+
+ # set frame start / end and file name to self
+ self.get_file_info()
+
+ self.log.info("File info was set...")
+
+ self.file = self.fhead + self.name + ".{}".format(self.ext)
+ self.path = os.path.join(
+ self.staging_dir, self.file).replace("\\", "/")
+
+ def generate_lut(self):
+ # ---------- start nodes creation
+
+ # CMSTestPattern
+ cms_node = nuke.createNode("CMSTestPattern")
+ cms_node["cube_size"].setValue(self.cube_size)
+ # connect
+ self._temp_nodes.append(cms_node)
+ self.previous_node = cms_node
+ self.log.debug("CMSTestPattern... `{}`".format(self._temp_nodes))
+
+ # Node View Process
+ ipn = self.get_view_process_node()
+ if ipn is not None:
+ # connect
+ ipn.setInput(0, self.previous_node)
+ self._temp_nodes.append(ipn)
+ self.previous_node = ipn
+ self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
+
+ if not self.viewer_lut_raw:
+ # OCIODisplay
+ dag_node = nuke.createNode("OCIODisplay")
+ # connect
+ dag_node.setInput(0, self.previous_node)
+ self._temp_nodes.append(dag_node)
+ self.previous_node = dag_node
+ self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
+
+ # GenerateLUT
+ gen_lut_node = nuke.createNode("GenerateLUT")
+ gen_lut_node["file"].setValue(self.path)
+ gen_lut_node["file_type"].setValue(".{}".format(self.ext))
+ gen_lut_node["lut1d"].setValue(self.lut_size)
+ gen_lut_node["style1d"].setValue(self.lut_style)
+ # connect
+ gen_lut_node.setInput(0, self.previous_node)
+ self._temp_nodes.append(gen_lut_node)
+ self.log.debug("GenerateLUT... `{}`".format(self._temp_nodes))
+
+ # ---------- end nodes creation
+
+ # Export lut file
+ nuke.execute(
+ gen_lut_node.name(),
+ int(self.first_frame),
+ int(self.first_frame))
+
+ self.log.info("Exported...")
+
+ # ---------- generate representation data
+ self.get_representation_data()
+
+ self.log.debug("Representation... `{}`".format(self.data))
+
+ # ---------- Clean up
+ self.clean_nodes()
+
+ return self.data
+
+
+class ExporterReviewMov(ExporterReview):
+ """
+ Metaclass for generating review mov files
+
+ Args:
+ klass (pyblish.plugin): pyblish plugin parent
+ instance (pyblish.instance): instance of pyblish context
+
+ """
+ def __init__(self,
+ klass,
+ instance,
+ name=None,
+ ext=None,
+ ):
+ # initialize parent class
+ ExporterReview.__init__(self, klass, instance)
+
+ # passing presets for nodes to self
+ if hasattr(klass, "nodes"):
+ self.nodes = klass.nodes
+ else:
+ self.nodes = {}
+
+ # deal with now lut defined in viewer lut
+ if hasattr(klass, "viewer_lut_raw"):
+ self.viewer_lut_raw = klass.viewer_lut_raw
+ else:
+ self.viewer_lut_raw = False
+
+ self.name = name or "baked"
+ self.ext = ext or "mov"
+
+ # set frame start / end and file name to self
+ self.get_file_info()
+
+ self.log.info("File info was set...")
+
+ self.file = self.fhead + self.name + ".{}".format(self.ext)
+ self.path = os.path.join(
+ self.staging_dir, self.file).replace("\\", "/")
+
+ def render(self, render_node_name):
+ self.log.info("Rendering... ")
+ # Render Write node
+ nuke.execute(
+ render_node_name,
+ int(self.first_frame),
+ int(self.last_frame))
+
+ self.log.info("Rendered...")
+
+ def save_file(self):
+ with anlib.maintained_selection():
+ self.log.info("Saving nodes as file... ")
+ # select temp nodes
+ anlib.select_nodes(self._temp_nodes)
+ # create nk path
+ path = os.path.splitext(self.path)[0] + ".nk"
+ # save file to the path
+ nuke.nodeCopy(path)
+
+ self.log.info("Nodes exported...")
+ return path
+
+ def generate_mov(self, farm=False):
+ # ---------- start nodes creation
+
+ # Read node
+ r_node = nuke.createNode("Read")
+ r_node["file"].setValue(self.path_in)
+ r_node["first"].setValue(self.first_frame)
+ r_node["origfirst"].setValue(self.first_frame)
+ r_node["last"].setValue(self.last_frame)
+ r_node["origlast"].setValue(self.last_frame)
+ # connect
+ self._temp_nodes.append(r_node)
+ self.previous_node = r_node
+ self.log.debug("Read... `{}`".format(self._temp_nodes))
+
+ # View Process node
+ ipn = self.get_view_process_node()
+ if ipn is not None:
+ # connect
+ ipn.setInput(0, self.previous_node)
+ self._temp_nodes.append(ipn)
+ self.previous_node = ipn
+ self.log.debug("ViewProcess... `{}`".format(self._temp_nodes))
+
+ if not self.viewer_lut_raw:
+ # OCIODisplay node
+ dag_node = nuke.createNode("OCIODisplay")
+ # connect
+ dag_node.setInput(0, self.previous_node)
+ self._temp_nodes.append(dag_node)
+ self.previous_node = dag_node
+ self.log.debug("OCIODisplay... `{}`".format(self._temp_nodes))
+
+ # Write node
+ write_node = nuke.createNode("Write")
+ self.log.debug("Path: {}".format(self.path))
+ self.instance.data["baked_colorspace_movie"] = self.path
+ write_node["file"].setValue(self.path)
+ write_node["file_type"].setValue(self.ext)
+ write_node["raw"].setValue(1)
+ # connect
+ write_node.setInput(0, self.previous_node)
+ self._temp_nodes.append(write_node)
+ self.log.debug("Write... `{}`".format(self._temp_nodes))
+
+ # ---------- end nodes creation
+
+ # ---------- render or save to nk
+ if farm:
+ path_nk = self.save_file()
+ self.data.update({
+ "bakeScriptPath": path_nk,
+ "bakeWriteNodeName": write_node.name(),
+ "bakeRenderPath": self.path
+ })
+ else:
+ self.render(write_node.name())
+ # ---------- generate representation data
+ self.get_representation_data(
+ tags=["review", "delete"],
+ range=True
+ )
+
+ self.log.debug("Representation... `{}`".format(self.data))
+
+ #---------- Clean up
+ self.clean_nodes()
+
+ return self.data
+
+
+def get_dependent_nodes(nodes):
+ """Get all dependent nodes connected to the list of nodes.
+
+ Looking for connections outside of the nodes in incoming argument.
+
+ Arguments:
+ nodes (list): list of nuke.Node objects
+
+ Returns:
+ connections_in: dictionary of nodes and its dependencies
+ connections_out: dictionary of nodes and its dependency
+ """
+
+ connections_in = dict()
+ connections_out = dict()
+ node_names = [n.name() for n in nodes]
+ for node in nodes:
+ inputs = node.dependencies()
+ outputs = node.dependent()
+ # collect all inputs outside
+ test_in = [(i, n) for i, n in enumerate(inputs)
+ if n.name() not in node_names]
+ if test_in:
+ connections_in.update({
+ node: test_in
+ })
+ # collect all outputs outside
+ test_out = [i for i in outputs if i.name() not in node_names]
+ if test_out:
+ # only one dependent node is allowed
+ connections_out.update({
+ node: test_out[-1]
+ })
+
+ return connections_in, connections_out
+
+
+def find_free_space_to_paste_nodes(
+ nodes,
+ group=nuke.root(),
+ direction="right",
+ offset=300):
+ """
+ For getting coordinates in DAG (node graph) for placing new nodes
+
+ Arguments:
+ nodes (list): list of nuke.Node objects
+ group (nuke.Node) [optional]: object in which context it is
+ direction (str) [optional]: where we want it to be placed
+ [left, right, top, bottom]
+ offset (int) [optional]: what offset it is from rest of nodes
+
+ Returns:
+ xpos (int): x coordinace in DAG
+ ypos (int): y coordinace in DAG
+ """
+ if len(nodes) == 0:
+ return 0, 0
+
+ group_xpos = list()
+ group_ypos = list()
+
+ # get local coordinates of all nodes
+ nodes_xpos = [n.xpos() for n in nodes] + \
+ [n.xpos() + n.screenWidth() for n in nodes]
+
+ nodes_ypos = [n.ypos() for n in nodes] + \
+ [n.ypos() + n.screenHeight() for n in nodes]
+
+ # get complete screen size of all nodes to be placed in
+ nodes_screen_width = max(nodes_xpos) - min(nodes_xpos)
+ nodes_screen_heigth = max(nodes_ypos) - min(nodes_ypos)
+
+ # get screen size (r,l,t,b) of all nodes in `group`
+ with group:
+ group_xpos = [n.xpos() for n in nuke.allNodes() if n not in nodes] + \
+ [n.xpos() + n.screenWidth() for n in nuke.allNodes()
+ if n not in nodes]
+ group_ypos = [n.ypos() for n in nuke.allNodes() if n not in nodes] + \
+ [n.ypos() + n.screenHeight() for n in nuke.allNodes()
+ if n not in nodes]
+
+ # calc output left
+ if direction in "left":
+ xpos = min(group_xpos) - abs(nodes_screen_width) - abs(offset)
+ ypos = min(group_ypos)
+ return xpos, ypos
+ # calc output right
+ if direction in "right":
+ xpos = max(group_xpos) + abs(offset)
+ ypos = min(group_ypos)
+ return xpos, ypos
+ # calc output top
+ if direction in "top":
+ xpos = min(group_xpos)
+ ypos = min(group_ypos) - abs(nodes_screen_heigth) - abs(offset)
+ return xpos, ypos
+ # calc output bottom
+ if direction in "bottom":
+ xpos = min(group_xpos)
+ ypos = max(group_ypos) + abs(offset)
+ return xpos, ypos
diff --git a/pype/nuke/utils.py b/pype/nuke/utils.py
new file mode 100644
index 0000000000..7583221696
--- /dev/null
+++ b/pype/nuke/utils.py
@@ -0,0 +1,64 @@
+import os
+import nuke
+from avalon.nuke import lib as anlib
+
+
+def get_node_outputs(node):
+ '''
+ Return a dictionary of the nodes and pipes that are connected to node
+ '''
+ dep_dict = {}
+ dependencies = node.dependent(nuke.INPUTS | nuke.HIDDEN_INPUTS)
+ for d in dependencies:
+ dep_dict[d] = []
+ for i in range(d.inputs()):
+ if d.input(i) == node:
+ dep_dict[d].append(i)
+ return dep_dict
+
+
+def is_node_gizmo(node):
+ '''
+ return True if node is gizmo
+ '''
+ return 'gizmo_file' in node.knobs()
+
+
+def gizmo_is_nuke_default(gizmo):
+ '''Check if gizmo is in default install path'''
+ plug_dir = os.path.join(os.path.dirname(
+ nuke.env['ExecutablePath']), 'plugins')
+ return gizmo.filename().startswith(plug_dir)
+
+
+def bake_gizmos_recursively(in_group=nuke.Root()):
+ """Converting a gizmo to group
+
+ Argumets:
+ is_group (nuke.Node)[optonal]: group node or all nodes
+ """
+ # preserve selection after all is done
+ with anlib.maintained_selection():
+ # jump to the group
+ with in_group:
+ for node in nuke.allNodes():
+ if is_node_gizmo(node) and not gizmo_is_nuke_default(node):
+ with node:
+ outputs = get_node_outputs(node)
+ group = node.makeGroup()
+ # Reconnect inputs and outputs if any
+ if outputs:
+ for n, pipes in outputs.items():
+ for i in pipes:
+ n.setInput(i, group)
+ for i in range(node.inputs()):
+ group.setInput(i, node.input(i))
+ # set node position and name
+ group.setXYpos(node.xpos(), node.ypos())
+ name = node.name()
+ nuke.delete(node)
+ group.setName(name)
+ node = group
+
+ if node.Class() == "Group":
+ bake_gizmos_recursively(node)
diff --git a/pype/nukestudio/workio.py b/pype/nukestudio/workio.py
index 1681d8a2ab..c7484b826b 100644
--- a/pype/nukestudio/workio.py
+++ b/pype/nukestudio/workio.py
@@ -22,19 +22,16 @@ def has_unsaved_changes():
def save_file(filepath):
+ file = os.path.basename(filepath)
project = hiero.core.projects()[-1]
- # close `Untitled` project
- if "Untitled" not in project.name():
- log.info("Saving project: `{}`".format(project.name()))
+ if project:
+ log.info("Saving project: `{}` as '{}'".format(project.name(), file))
project.saveAs(filepath)
- elif not project:
+ else:
log.info("Creating new project...")
project = hiero.core.newProject()
project.saveAs(filepath)
- else:
- log.info("Dropping `Untitled` project...")
- return
def open_file(filepath):
diff --git a/pype/plugins/aport/publish/collect_context.py b/pype/plugins/aport/publish/collect_context.py
index 2aaa89fd05..35811d6378 100644
--- a/pype/plugins/aport/publish/collect_context.py
+++ b/pype/plugins/aport/publish/collect_context.py
@@ -1,9 +1,6 @@
import os
import pyblish.api
-from avalon import (
- io,
- api as avalon
-)
+from avalon import api as avalon
from pype import api as pype
import json
from pathlib import Path
diff --git a/pype/plugins/blender/create/create_model.py b/pype/plugins/blender/create/create_model.py
new file mode 100644
index 0000000000..7301073f05
--- /dev/null
+++ b/pype/plugins/blender/create/create_model.py
@@ -0,0 +1,32 @@
+"""Create a model asset."""
+
+import bpy
+
+from avalon import api
+from avalon.blender import Creator, lib
+
+
+class CreateModel(Creator):
+ """Polygonal static geometry"""
+
+ name = "modelMain"
+ label = "Model"
+ family = "model"
+ icon = "cube"
+
+ def process(self):
+ import pype.blender
+
+ asset = self.data["asset"]
+ subset = self.data["subset"]
+ name = pype.blender.plugin.model_name(asset, subset)
+ collection = bpy.data.collections.new(name=name)
+ bpy.context.scene.collection.children.link(collection)
+ self.data['task'] = api.Session.get('AVALON_TASK')
+ lib.imprint(collection, self.data)
+
+ if (self.options or {}).get("useSelection"):
+ for obj in lib.get_selection():
+ collection.objects.link(obj)
+
+ return collection
diff --git a/pype/plugins/blender/load/load_model.py b/pype/plugins/blender/load/load_model.py
new file mode 100644
index 0000000000..bd6db17650
--- /dev/null
+++ b/pype/plugins/blender/load/load_model.py
@@ -0,0 +1,315 @@
+"""Load a model asset in Blender."""
+
+import logging
+from pathlib import Path
+from pprint import pformat
+from typing import Dict, List, Optional
+
+import avalon.blender.pipeline
+import bpy
+import pype.blender
+from avalon import api
+
+logger = logging.getLogger("pype").getChild("blender").getChild("load_model")
+
+
+class BlendModelLoader(pype.blender.AssetLoader):
+ """Load models from a .blend file.
+
+ Because they come from a .blend file we can simply link the collection that
+ contains the model. There is no further need to 'containerise' it.
+
+ Warning:
+ Loading the same asset more then once is not properly supported at the
+ moment.
+ """
+
+ families = ["model"]
+ representations = ["blend"]
+
+ label = "Link Model"
+ icon = "code-fork"
+ color = "orange"
+
+ @staticmethod
+ def _get_lib_collection(name: str, libpath: Path) -> Optional[bpy.types.Collection]:
+ """Find the collection(s) with name, loaded from libpath.
+
+ Note:
+ It is assumed that only 1 matching collection is found.
+ """
+ for collection in bpy.data.collections:
+ if collection.name != name:
+ continue
+ if collection.library is None:
+ continue
+ if not collection.library.filepath:
+ continue
+ collection_lib_path = str(Path(bpy.path.abspath(collection.library.filepath)).resolve())
+ normalized_libpath = str(Path(bpy.path.abspath(str(libpath))).resolve())
+ if collection_lib_path == normalized_libpath:
+ return collection
+ return None
+
+ @staticmethod
+ def _collection_contains_object(
+ collection: bpy.types.Collection, object: bpy.types.Object
+ ) -> bool:
+ """Check if the collection contains the object."""
+ for obj in collection.objects:
+ if obj == object:
+ return True
+ return False
+
+ def process_asset(
+ self, context: dict, name: str, namespace: Optional[str] = None,
+ options: Optional[Dict] = None
+ ) -> Optional[List]:
+ """
+ Arguments:
+ name: Use pre-defined name
+ namespace: Use pre-defined namespace
+ context: Full parenthood of representation to load
+ options: Additional settings dictionary
+ """
+
+ libpath = self.fname
+ asset = context["asset"]["name"]
+ subset = context["subset"]["name"]
+ lib_container = pype.blender.plugin.model_name(asset, subset)
+ container_name = pype.blender.plugin.model_name(
+ asset, subset, namespace
+ )
+ relative = bpy.context.preferences.filepaths.use_relative_paths
+
+ with bpy.data.libraries.load(
+ libpath, link=True, relative=relative
+ ) as (_, data_to):
+ data_to.collections = [lib_container]
+
+ scene = bpy.context.scene
+ instance_empty = bpy.data.objects.new(
+ container_name, None
+ )
+ if not instance_empty.get("avalon"):
+ instance_empty["avalon"] = dict()
+ avalon_info = instance_empty["avalon"]
+ avalon_info.update({"container_name": container_name})
+ scene.collection.objects.link(instance_empty)
+ instance_empty.instance_type = 'COLLECTION'
+ container = bpy.data.collections[lib_container]
+ container.name = container_name
+ instance_empty.instance_collection = container
+ container.make_local()
+ avalon.blender.pipeline.containerise_existing(
+ container,
+ name,
+ namespace,
+ context,
+ self.__class__.__name__,
+ )
+
+ nodes = list(container.objects)
+ nodes.append(container)
+ nodes.append(instance_empty)
+ self[:] = nodes
+ return nodes
+
+ def update(self, container: Dict, representation: Dict):
+ """Update the loaded asset.
+
+ This will remove all objects of the current collection, load the new
+ ones and add them to the collection.
+ If the objects of the collection are used in another collection they
+ will not be removed, only unlinked. Normally this should not be the
+ case though.
+
+ Warning:
+ No nested collections are supported at the moment!
+ """
+ collection = bpy.data.collections.get(
+ container["objectName"]
+ )
+ libpath = Path(api.get_representation_path(representation))
+ extension = libpath.suffix.lower()
+
+ logger.debug(
+ "Container: %s\nRepresentation: %s",
+ pformat(container, indent=2),
+ pformat(representation, indent=2),
+ )
+
+ assert collection, (
+ f"The asset is not loaded: {container['objectName']}"
+ )
+ assert not (collection.children), (
+ "Nested collections are not supported."
+ )
+ assert libpath, (
+ "No existing library file found for {container['objectName']}"
+ )
+ assert libpath.is_file(), (
+ f"The file doesn't exist: {libpath}"
+ )
+ assert extension in pype.blender.plugin.VALID_EXTENSIONS, (
+ f"Unsupported file: {libpath}"
+ )
+ collection_libpath = (
+ self._get_library_from_container(collection).filepath
+ )
+ normalized_collection_libpath = (
+ str(Path(bpy.path.abspath(collection_libpath)).resolve())
+ )
+ normalized_libpath = (
+ str(Path(bpy.path.abspath(str(libpath))).resolve())
+ )
+ logger.debug(
+ "normalized_collection_libpath:\n %s\nnormalized_libpath:\n %s",
+ normalized_collection_libpath,
+ normalized_libpath,
+ )
+ if normalized_collection_libpath == normalized_libpath:
+ logger.info("Library already loaded, not updating...")
+ return
+ # Let Blender's garbage collection take care of removing the library
+ # itself after removing the objects.
+ objects_to_remove = set()
+ collection_objects = list()
+ collection_objects[:] = collection.objects
+ for obj in collection_objects:
+ # Unlink every object
+ collection.objects.unlink(obj)
+ remove_obj = True
+ for coll in [
+ coll for coll in bpy.data.collections
+ if coll != collection
+ ]:
+ if (
+ coll.objects and
+ self._collection_contains_object(coll, obj)
+ ):
+ remove_obj = False
+ if remove_obj:
+ objects_to_remove.add(obj)
+
+ for obj in objects_to_remove:
+ # Only delete objects that are not used elsewhere
+ bpy.data.objects.remove(obj)
+
+ instance_empties = [
+ obj for obj in collection.users_dupli_group
+ if obj.name in collection.name
+ ]
+ if instance_empties:
+ instance_empty = instance_empties[0]
+ container_name = instance_empty["avalon"]["container_name"]
+
+ relative = bpy.context.preferences.filepaths.use_relative_paths
+ with bpy.data.libraries.load(
+ str(libpath), link=True, relative=relative
+ ) as (_, data_to):
+ data_to.collections = [container_name]
+
+ new_collection = self._get_lib_collection(container_name, libpath)
+ if new_collection is None:
+ raise ValueError(
+ "A matching collection '{container_name}' "
+ "should have been found in: {libpath}"
+ )
+
+ for obj in new_collection.objects:
+ collection.objects.link(obj)
+ bpy.data.collections.remove(new_collection)
+ # Update the representation on the collection
+ avalon_prop = collection[avalon.blender.pipeline.AVALON_PROPERTY]
+ avalon_prop["representation"] = str(representation["_id"])
+
+ def remove(self, container: Dict) -> bool:
+ """Remove an existing container from a Blender scene.
+
+ Arguments:
+ container (avalon-core:container-1.0): Container to remove,
+ from `host.ls()`.
+
+ Returns:
+ bool: Whether the container was deleted.
+
+ Warning:
+ No nested collections are supported at the moment!
+ """
+ collection = bpy.data.collections.get(
+ container["objectName"]
+ )
+ if not collection:
+ return False
+ assert not (collection.children), (
+ "Nested collections are not supported."
+ )
+ instance_parents = list(collection.users_dupli_group)
+ instance_objects = list(collection.objects)
+ for obj in instance_objects + instance_parents:
+ bpy.data.objects.remove(obj)
+ bpy.data.collections.remove(collection)
+
+ return True
+
+
+class CacheModelLoader(pype.blender.AssetLoader):
+ """Load cache models.
+
+ Stores the imported asset in a collection named after the asset.
+
+ Note:
+ At least for now it only supports Alembic files.
+ """
+
+ families = ["model"]
+ representations = ["abc"]
+
+ label = "Link Model"
+ icon = "code-fork"
+ color = "orange"
+
+ def process_asset(
+ self, context: dict, name: str, namespace: Optional[str] = None,
+ options: Optional[Dict] = None
+ ) -> Optional[List]:
+ """
+ Arguments:
+ name: Use pre-defined name
+ namespace: Use pre-defined namespace
+ context: Full parenthood of representation to load
+ options: Additional settings dictionary
+ """
+ raise NotImplementedError("Loading of Alembic files is not yet implemented.")
+ # TODO (jasper): implement Alembic import.
+
+ libpath = self.fname
+ asset = context["asset"]["name"]
+ subset = context["subset"]["name"]
+ # TODO (jasper): evaluate use of namespace which is 'alien' to Blender.
+ lib_container = container_name = (
+ pype.blender.plugin.model_name(asset, subset, namespace)
+ )
+ relative = bpy.context.preferences.filepaths.use_relative_paths
+
+ with bpy.data.libraries.load(
+ libpath, link=True, relative=relative
+ ) as (data_from, data_to):
+ data_to.collections = [lib_container]
+
+ scene = bpy.context.scene
+ instance_empty = bpy.data.objects.new(
+ container_name, None
+ )
+ scene.collection.objects.link(instance_empty)
+ instance_empty.instance_type = 'COLLECTION'
+ collection = bpy.data.collections[lib_container]
+ collection.name = container_name
+ instance_empty.instance_collection = collection
+
+ nodes = list(collection.objects)
+ nodes.append(collection)
+ nodes.append(instance_empty)
+ self[:] = nodes
+ return nodes
diff --git a/pype/plugins/blender/publish/collect_current_file.py b/pype/plugins/blender/publish/collect_current_file.py
new file mode 100644
index 0000000000..a097c72047
--- /dev/null
+++ b/pype/plugins/blender/publish/collect_current_file.py
@@ -0,0 +1,16 @@
+import bpy
+
+import pyblish.api
+
+
+class CollectBlenderCurrentFile(pyblish.api.ContextPlugin):
+ """Inject the current working file into context"""
+
+ order = pyblish.api.CollectorOrder - 0.5
+ label = "Blender Current File"
+ hosts = ['blender']
+
+ def process(self, context):
+ """Inject the current working file"""
+ current_file = bpy.data.filepath
+ context.data['currentFile'] = current_file
diff --git a/pype/plugins/blender/publish/collect_model.py b/pype/plugins/blender/publish/collect_model.py
new file mode 100644
index 0000000000..ee10eaf7f2
--- /dev/null
+++ b/pype/plugins/blender/publish/collect_model.py
@@ -0,0 +1,53 @@
+import typing
+from typing import Generator
+
+import bpy
+
+import avalon.api
+import pyblish.api
+from avalon.blender.pipeline import AVALON_PROPERTY
+
+
+class CollectModel(pyblish.api.ContextPlugin):
+ """Collect the data of a model."""
+
+ hosts = ["blender"]
+ label = "Collect Model"
+ order = pyblish.api.CollectorOrder
+
+ @staticmethod
+ def get_model_collections() -> Generator:
+ """Return all 'model' collections.
+
+ Check if the family is 'model' and if it doesn't have the
+ representation set. If the representation is set, it is a loaded model
+ and we don't want to publish it.
+ """
+ for collection in bpy.data.collections:
+ avalon_prop = collection.get(AVALON_PROPERTY) or dict()
+ if (avalon_prop.get('family') == 'model'
+ and not avalon_prop.get('representation')):
+ yield collection
+
+ def process(self, context):
+ """Collect the models from the current Blender scene."""
+ collections = self.get_model_collections()
+ for collection in collections:
+ avalon_prop = collection[AVALON_PROPERTY]
+ asset = avalon_prop['asset']
+ family = avalon_prop['family']
+ subset = avalon_prop['subset']
+ task = avalon_prop['task']
+ name = f"{asset}_{subset}"
+ instance = context.create_instance(
+ name=name,
+ family=family,
+ families=[family],
+ subset=subset,
+ asset=asset,
+ task=task,
+ )
+ members = list(collection.objects)
+ members.append(collection)
+ instance[:] = members
+ self.log.debug(instance.data)
diff --git a/pype/plugins/blender/publish/extract_model.py b/pype/plugins/blender/publish/extract_model.py
new file mode 100644
index 0000000000..501c4d9d5c
--- /dev/null
+++ b/pype/plugins/blender/publish/extract_model.py
@@ -0,0 +1,47 @@
+import os
+import avalon.blender.workio
+
+import pype.api
+
+
+class ExtractModel(pype.api.Extractor):
+ """Extract as model."""
+
+ label = "Model"
+ hosts = ["blender"]
+ families = ["model"]
+ optional = True
+
+ def process(self, instance):
+ # Define extract output file path
+
+ stagingdir = self.staging_dir(instance)
+ filename = f"{instance.name}.blend"
+ filepath = os.path.join(stagingdir, filename)
+
+ # Perform extraction
+ self.log.info("Performing extraction..")
+
+ # Just save the file to a temporary location. At least for now it's no
+ # problem to have (possibly) extra stuff in the file.
+ avalon.blender.workio.save_file(filepath, copy=True)
+ #
+ # # Store reference for integration
+ # if "files" not in instance.data:
+ # instance.data["files"] = list()
+ #
+ # # instance.data["files"].append(filename)
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ representation = {
+ 'name': 'blend',
+ 'ext': 'blend',
+ 'files': filename,
+ "stagingDir": stagingdir,
+ }
+ instance.data["representations"].append(representation)
+
+
+ self.log.info("Extracted instance '%s' to: %s", instance.name, representation)
diff --git a/pype/plugins/blender/publish/validate_mesh_has_uv.py b/pype/plugins/blender/publish/validate_mesh_has_uv.py
new file mode 100644
index 0000000000..b71a40ad8f
--- /dev/null
+++ b/pype/plugins/blender/publish/validate_mesh_has_uv.py
@@ -0,0 +1,49 @@
+from typing import List
+
+import bpy
+
+import pyblish.api
+import pype.blender.action
+
+
+class ValidateMeshHasUvs(pyblish.api.InstancePlugin):
+ """Validate that the current mesh has UV's."""
+
+ order = pyblish.api.ValidatorOrder
+ hosts = ["blender"]
+ families = ["model"]
+ category = "geometry"
+ label = "Mesh Has UV's"
+ actions = [pype.blender.action.SelectInvalidAction]
+ optional = True
+
+ @staticmethod
+ def has_uvs(obj: bpy.types.Object) -> bool:
+ """Check if an object has uv's."""
+ if not obj.data.uv_layers:
+ return False
+ for uv_layer in obj.data.uv_layers:
+ for polygon in obj.data.polygons:
+ for loop_index in polygon.loop_indices:
+ if not uv_layer.data[loop_index].uv:
+ return False
+
+ return True
+
+ @classmethod
+ def get_invalid(cls, instance) -> List:
+ invalid = []
+ # TODO (jasper): only check objects in the collection that will be published?
+ for obj in [
+ obj for obj in bpy.data.objects if obj.type == 'MESH'
+ ]:
+ # Make sure we are in object mode.
+ bpy.ops.object.mode_set(mode='OBJECT')
+ if not cls.has_uvs(obj):
+ invalid.append(obj)
+ return invalid
+
+ def process(self, instance):
+ invalid = self.get_invalid(instance)
+ if invalid:
+ raise RuntimeError(f"Meshes found in instance without valid UV's: {invalid}")
diff --git a/pype/plugins/blender/publish/validate_mesh_no_negative_scale.py b/pype/plugins/blender/publish/validate_mesh_no_negative_scale.py
new file mode 100644
index 0000000000..7e3b38dd19
--- /dev/null
+++ b/pype/plugins/blender/publish/validate_mesh_no_negative_scale.py
@@ -0,0 +1,35 @@
+from typing import List
+
+import bpy
+
+import pyblish.api
+import pype.blender.action
+
+
+class ValidateMeshNoNegativeScale(pyblish.api.Validator):
+ """Ensure that meshes don't have a negative scale."""
+
+ order = pyblish.api.ValidatorOrder
+ hosts = ["blender"]
+ families = ["model"]
+ label = "Mesh No Negative Scale"
+ actions = [pype.blender.action.SelectInvalidAction]
+
+ @staticmethod
+ def get_invalid(instance) -> List:
+ invalid = []
+ # TODO (jasper): only check objects in the collection that will be published?
+ for obj in [
+ obj for obj in bpy.data.objects if obj.type == 'MESH'
+ ]:
+ if any(v < 0 for v in obj.scale):
+ invalid.append(obj)
+
+ return invalid
+
+ def process(self, instance):
+ invalid = self.get_invalid(instance)
+ if invalid:
+ raise RuntimeError(
+ f"Meshes found in instance with negative scale: {invalid}"
+ )
diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_api.py b/pype/plugins/ftrack/publish/integrate_ftrack_api.py
index 9fe4fddebf..c51685f84d 100644
--- a/pype/plugins/ftrack/publish/integrate_ftrack_api.py
+++ b/pype/plugins/ftrack/publish/integrate_ftrack_api.py
@@ -144,8 +144,11 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
"version": 0,
"asset": asset_entity,
}
-
- assetversion_data.update(data.get("assetversion_data", {}))
+ _assetversion_data = data.get("assetversion_data", {})
+ assetversion_cust_attrs = _assetversion_data.pop(
+ "custom_attributes", {}
+ )
+ assetversion_data.update(_assetversion_data)
assetversion_entity = session.query(
self.query("AssetVersion", assetversion_data)
@@ -182,6 +185,22 @@ class IntegrateFtrackApi(pyblish.api.InstancePlugin):
existing_assetversion_metadata.update(assetversion_metadata)
assetversion_entity["metadata"] = existing_assetversion_metadata
+ # Adding Custom Attributes
+ for attr, val in assetversion_cust_attrs.items():
+ if attr in assetversion_entity["custom_attributes"]:
+ try:
+ assetversion_entity["custom_attributes"][attr] = val
+ session.commit()
+ continue
+ except Exception:
+ session.rollback()
+
+ self.log.warning((
+ "Custom Attrubute \"{0}\""
+ " is not available for AssetVersion <{1}>."
+ " Can't set it's value to: \"{2}\""
+ ).format(attr, assetversion_entity["id"], str(val)))
+
# Have to commit the version and asset, because location can't
# determine the final location without.
try:
diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py
index 383ed0098b..78583b0a2f 100644
--- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py
+++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py
@@ -116,6 +116,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
},
"assetversion_data": {
"version": version_number,
+ "comment": instance.context.data.get("comment", "")
},
"component_data": component_data,
"component_path": comp['published_path'],
@@ -124,6 +125,16 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"thumbnail": comp['thumbnail']
}
+ # Add custom attributes for AssetVersion
+ assetversion_cust_attrs = {}
+ intent_val = instance.context.data.get("intent")
+ if intent_val:
+ assetversion_cust_attrs["intent"] = intent_val
+
+ component_item["assetversion_data"]["custom_attributes"] = (
+ assetversion_cust_attrs
+ )
+
componentList.append(component_item)
# Create copy with ftrack.unmanaged location if thumb or prev
if comp.get('thumbnail') or comp.get('preview') \
diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py
index 39481e216b..1214657856 100644
--- a/pype/plugins/global/publish/collect_filesequences.py
+++ b/pype/plugins/global/publish/collect_filesequences.py
@@ -54,10 +54,6 @@ def collect(root,
patterns=[pattern],
minimum_items=1)
- # Ignore any remainders
- if remainder:
- print("Skipping remainder {}".format(remainder))
-
# Exclude any frames outside start and end frame.
for collection in collections:
for index in list(collection.indexes):
@@ -71,7 +67,7 @@ def collect(root,
# Keep only collections that have at least a single frame
collections = [c for c in collections if c.indexes]
- return collections
+ return collections, remainder
class CollectRenderedFrames(pyblish.api.ContextPlugin):
@@ -100,6 +96,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
label = "RenderedFrames"
def process(self, context):
+ pixel_aspect = 1
+ lut_path = None
if os.environ.get("PYPE_PUBLISH_PATHS"):
paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep)
self.log.info("Collecting paths: {}".format(paths))
@@ -117,8 +115,10 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
try:
data = json.load(f)
except Exception as exc:
- self.log.error("Error loading json: "
- "{} - Exception: {}".format(path, exc))
+ self.log.error(
+ "Error loading json: "
+ "{} - Exception: {}".format(path, exc)
+ )
raise
cwd = os.path.dirname(path)
@@ -144,6 +144,15 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
self.log.info("setting session using metadata")
api.Session.update(session)
os.environ.update(session)
+ instance = metadata.get("instance")
+ if instance:
+ # here is the place to add ability for nuke noninteractive
+ # ______________________________________
+ instance_family = instance.get("family")
+ pixel_aspect = instance.get("pixelAspect", 1)
+ resolution_width = instance.get("resolutionWidth", 1920)
+ resolution_height = instance.get("resolutionHeight", 1080)
+ lut_path = instance.get("lutPath", None)
else:
# Search in directory
@@ -155,14 +164,17 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
if regex:
self.log.info("Using regex: {}".format(regex))
- collections = collect(root=root,
- regex=regex,
- exclude_regex=data.get("exclude_regex"),
- frame_start=data.get("frameStart"),
- frame_end=data.get("frameEnd"))
+ collections, remainder = collect(
+ root=root,
+ regex=regex,
+ exclude_regex=data.get("exclude_regex"),
+ frame_start=data.get("frameStart"),
+ frame_end=data.get("frameEnd"),
+ )
self.log.info("Found collections: {}".format(collections))
+ """
if data.get("subset"):
# If subset is provided for this json then it must be a single
# collection.
@@ -170,69 +182,190 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
self.log.error("Forced subset can only work with a single "
"found sequence")
raise RuntimeError("Invalid sequence")
+ """
fps = data.get("fps", 25)
+ if data.get("user"):
+ context.data["user"] = data["user"]
+
# Get family from the data
families = data.get("families", ["render"])
if "render" not in families:
families.append("render")
if "ftrack" not in families:
families.append("ftrack")
- if "review" not in families:
- families.append("review")
+ if "write" in instance_family:
+ families.append("write")
- for collection in collections:
- instance = context.create_instance(str(collection))
- self.log.info("Collection: %s" % list(collection))
+ if data.get("attachTo"):
+ # we need to attach found collections to existing
+ # subset version as review represenation.
- # Ensure each instance gets a unique reference to the data
+ for attach in data.get("attachTo"):
+ self.log.info(
+ "Attaching render {}:v{}".format(
+ attach["subset"], attach["version"]))
+ instance = context.create_instance(
+ attach["subset"])
+ instance.data.update(
+ {
+ "name": attach["subset"],
+ "version": attach["version"],
+ "family": 'review',
+ "families": ['review', 'ftrack'],
+ "asset": data.get(
+ "asset", api.Session["AVALON_ASSET"]),
+ "stagingDir": root,
+ "frameStart": data.get("frameStart"),
+ "frameEnd": data.get("frameEnd"),
+ "fps": fps,
+ "source": data.get("source", ""),
+ "pixelAspect": pixel_aspect
+ })
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ for collection in collections:
+ self.log.info(
+ " - adding representation: {}".format(
+ str(collection))
+ )
+ ext = collection.tail.lstrip(".")
+
+ representation = {
+ "name": ext,
+ "ext": "{}".format(ext),
+ "files": list(collection),
+ "stagingDir": root,
+ "anatomy_template": "render",
+ "fps": fps,
+ "tags": ["review"],
+ }
+ instance.data["representations"].append(
+ representation)
+
+ elif data.get("subset"):
+ # if we have subset - add all collections and known
+ # reminder as representations
+
+ self.log.info(
+ "Adding representations to subset {}".format(
+ data.get("subset")))
+
+ instance = context.create_instance(data.get("subset"))
data = copy.deepcopy(data)
- # If no subset provided, get it from collection's head
- subset = data.get("subset", collection.head.rstrip("_. "))
-
- # If no start or end frame provided, get it from collection
- indices = list(collection.indexes)
- start = data.get("frameStart", indices[0])
- end = data.get("frameEnd", indices[-1])
-
- # root = os.path.normpath(root)
- # self.log.info("Source: {}}".format(data.get("source", "")))
-
- ext = list(collection)[0].split('.')[-1]
-
- instance.data.update({
- "name": str(collection),
- "family": families[0], # backwards compatibility / pyblish
- "families": list(families),
- "subset": subset,
- "asset": data.get("asset", api.Session["AVALON_ASSET"]),
- "stagingDir": root,
- "frameStart": start,
- "frameEnd": end,
- "fps": fps,
- "source": data.get('source', '')
- })
- instance.append(collection)
- instance.context.data['fps'] = fps
+ instance.data.update(
+ {
+ "name": data.get("subset"),
+ "family": families[0],
+ "families": list(families),
+ "subset": data.get("subset"),
+ "asset": data.get(
+ "asset", api.Session["AVALON_ASSET"]),
+ "stagingDir": root,
+ "frameStart": data.get("frameStart"),
+ "frameEnd": data.get("frameEnd"),
+ "fps": fps,
+ "source": data.get("source", ""),
+ "pixelAspect": pixel_aspect,
+ }
+ )
if "representations" not in instance.data:
instance.data["representations"] = []
- representation = {
- 'name': ext,
- 'ext': '{}'.format(ext),
- 'files': list(collection),
- "stagingDir": root,
- "anatomy_template": "render",
- "fps": fps,
- "tags": ['review']
- }
- instance.data["representations"].append(representation)
+ for collection in collections:
+ self.log.info(" - {}".format(str(collection)))
- if data.get('user'):
- context.data["user"] = data['user']
+ ext = collection.tail.lstrip(".")
- self.log.debug("Collected instance:\n"
- "{}".format(pformat(instance.data)))
+ representation = {
+ "name": ext,
+ "ext": "{}".format(ext),
+ "files": list(collection),
+ "stagingDir": root,
+ "anatomy_template": "render",
+ "fps": fps,
+ "tags": ["review"],
+ }
+ instance.data["representations"].append(
+ representation)
+
+ # process reminders
+ for rem in remainder:
+ # add only known types to representation
+ if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']:
+ self.log.info(" . {}".format(rem))
+ representation = {
+ "name": rem.split(".")[-1],
+ "ext": "{}".format(rem.split(".")[-1]),
+ "files": rem,
+ "stagingDir": root,
+ "anatomy_template": "render",
+ "fps": fps,
+ "tags": ["review"],
+ }
+ instance.data["representations"].append(
+ representation)
+
+ else:
+ # we have no subset so we take every collection and create one
+ # from it
+ for collection in collections:
+ instance = context.create_instance(str(collection))
+ self.log.info("Creating subset from: %s" % str(collection))
+
+ # Ensure each instance gets a unique reference to the data
+ data = copy.deepcopy(data)
+
+ # If no subset provided, get it from collection's head
+ subset = data.get("subset", collection.head.rstrip("_. "))
+
+ # If no start or end frame provided, get it from collection
+ indices = list(collection.indexes)
+ start = data.get("frameStart", indices[0])
+ end = data.get("frameEnd", indices[-1])
+
+ ext = list(collection)[0].split(".")[-1]
+
+ if "review" not in families:
+ families.append("review")
+
+ instance.data.update(
+ {
+ "name": str(collection),
+ "family": families[0], # backwards compatibility
+ "families": list(families),
+ "subset": subset,
+ "asset": data.get(
+ "asset", api.Session["AVALON_ASSET"]),
+ "stagingDir": root,
+ "frameStart": start,
+ "frameEnd": end,
+ "fps": fps,
+ "source": data.get("source", ""),
+ "pixelAspect": pixel_aspect,
+ }
+ )
+ if lut_path:
+ instance.data.update({"lutPath": lut_path})
+
+ instance.append(collection)
+ instance.context.data["fps"] = fps
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ representation = {
+ "name": ext,
+ "ext": "{}".format(ext),
+ "files": list(collection),
+ "stagingDir": root,
+ "anatomy_template": "render",
+ "fps": fps,
+ "tags": ["review"],
+ }
+ instance.data["representations"].append(representation)
diff --git a/pype/plugins/global/publish/collect_templates.py b/pype/plugins/global/publish/collect_templates.py
index 42b547b4ef..383944e293 100644
--- a/pype/plugins/global/publish/collect_templates.py
+++ b/pype/plugins/global/publish/collect_templates.py
@@ -87,8 +87,19 @@ class CollectTemplates(pyblish.api.InstancePlugin):
"asset": asset_name,
"subset": subset_name,
"version": version_number,
- "hierarchy": hierarchy,
- "representation": "TEMP"}
+ "hierarchy": hierarchy.replace("\\", "/"),
+ "representation": "TEMP")}
+
+ resolution_width = instance.data.get("resolutionWidth")
+ resolution_height = instance.data.get("resolutionHeight")
+ fps = instance.data.get("fps")
+
+ if resolution_width:
+ template_data["resolution_width"] = resolution_width
+ if resolution_width:
+ template_data["resolution_height"] = resolution_height
+ if resolution_width:
+ template_data["fps"] = fps
instance.data["template"] = template
instance.data["assumedTemplateData"] = template_data
@@ -97,3 +108,6 @@ class CollectTemplates(pyblish.api.InstancePlugin):
instance.data["assumedDestination"] = os.path.dirname(
(anatomy.format(template_data))["publish"]["path"]
)
+ self.log.info("Assumed Destination has been created...")
+ self.log.debug("__ assumedTemplateData: `{}`".format(instance.data["assumedTemplateData"]))
+ self.log.debug("__ template: `{}`".format(instance.data["template"]))
diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py
index b1569aaa45..06a62dd98b 100644
--- a/pype/plugins/global/publish/extract_burnin.py
+++ b/pype/plugins/global/publish/extract_burnin.py
@@ -1,5 +1,6 @@
import os
import json
+import copy
import pype.api
import pyblish
@@ -29,13 +30,28 @@ class ExtractBurnin(pype.api.Extractor):
if instance.context.data.get('version'):
version = "v" + str(instance.context.data['version'])
+ frame_start = int(instance.data.get("frameStart") or 0)
+ frame_end = int(instance.data.get("frameEnd") or 1)
+ duration = frame_end - frame_start + 1
+
prep_data = {
"username": instance.context.data['user'],
"asset": os.environ['AVALON_ASSET'],
"task": os.environ['AVALON_TASK'],
- "start_frame": int(instance.data["frameStart"]),
- "version": version
+ "frame_start": frame_start,
+ "frame_end": frame_end,
+ "duration": duration,
+ "version": version,
+ "comment": instance.context.data.get("comment"),
+ "intent": instance.context.data.get("intent")
}
+ # Update data with template data
+ template_data = instance.data.get("assumedTemplateData") or {}
+ prep_data.update(template_data)
+
+ # get anatomy project
+ anatomy = instance.context.data['anatomy']
+
self.log.debug("__ prep_data: {}".format(prep_data))
for i, repre in enumerate(instance.data["representations"]):
self.log.debug("__ i: `{}`, repre: `{}`".format(i, repre))
@@ -49,14 +65,25 @@ class ExtractBurnin(pype.api.Extractor):
name = "_burnin"
movieFileBurnin = filename.replace(".mov", "") + name + ".mov"
- full_movie_path = os.path.join(os.path.normpath(stagingdir), repre["files"])
- full_burnin_path = os.path.join(os.path.normpath(stagingdir), movieFileBurnin)
+ full_movie_path = os.path.join(
+ os.path.normpath(stagingdir), repre["files"]
+ )
+ full_burnin_path = os.path.join(
+ os.path.normpath(stagingdir), movieFileBurnin
+ )
self.log.debug("__ full_burnin_path: {}".format(full_burnin_path))
+ # create copy of prep_data for anatomy formatting
+ _prep_data = copy.deepcopy(prep_data)
+ _prep_data["representation"] = repre["name"]
+ _prep_data["anatomy"] = (
+ anatomy.format_all(_prep_data).get("solved") or {}
+ )
burnin_data = {
"input": full_movie_path.replace("\\", "/"),
+ "codec": repre.get("codec", []),
"output": full_burnin_path.replace("\\", "/"),
- "burnin_data": prep_data
+ "burnin_data": _prep_data
}
self.log.debug("__ burnin_data2: {}".format(burnin_data))
diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py
index de167710a5..9cdc394c1f 100644
--- a/pype/plugins/global/publish/extract_review.py
+++ b/pype/plugins/global/publish/extract_review.py
@@ -1,9 +1,7 @@
import os
-
import pyblish.api
import clique
import pype.api
-from pypeapp import config
class ExtractReview(pyblish.api.InstancePlugin):
@@ -22,16 +20,21 @@ class ExtractReview(pyblish.api.InstancePlugin):
families = ["review"]
hosts = ["nuke", "maya", "shell"]
+ outputs = {}
+ ext_filter = []
+
def process(self, instance):
- # adding plugin attributes from presets
- publish_presets = config.get_presets()["plugins"]["global"]["publish"]
- plugin_attrs = publish_presets[self.__class__.__name__]
- output_profiles = plugin_attrs.get("outputs", {})
+ to_width = 1920
+ to_height = 1080
+
+ output_profiles = self.outputs or {}
inst_data = instance.data
fps = inst_data.get("fps")
start_frame = inst_data.get("frameStart")
-
+ resolution_width = instance.data.get("resolutionWidth", to_width)
+ resolution_height = instance.data.get("resolutionHeight", to_height)
+ pixel_aspect = instance.data.get("pixelAspect", 1)
self.log.debug("Families In: `{}`".format(instance.data["families"]))
# get representation and loop them
@@ -40,7 +43,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
# filter out mov and img sequences
representations_new = representations[:]
for repre in representations:
- if repre['ext'] in plugin_attrs["ext_filter"]:
+ if repre['ext'] in self.ext_filter:
tags = repre.get("tags", [])
self.log.info("Try repre: {}".format(repre))
@@ -92,8 +95,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
self.log.info("p_tags: `{}`".format(p_tags))
# add families
[instance.data["families"].append(t)
- for t in p_tags
- if t not in instance.data["families"]]
+ for t in p_tags
+ if t not in instance.data["families"]]
+
# add to
[new_tags.append(t) for t in p_tags
if t not in new_tags]
@@ -147,21 +151,112 @@ class ExtractReview(pyblish.api.InstancePlugin):
)
output_args = []
+ codec_args = profile.get('codec', [])
+ output_args.extend(codec_args)
# preset's output data
output_args.extend(profile.get('output', []))
+ # defining image ratios
+ resolution_ratio = float(resolution_width / (
+ resolution_height * pixel_aspect))
+ delivery_ratio = float(to_width) / float(to_height)
+ self.log.debug(resolution_ratio)
+ self.log.debug(delivery_ratio)
+
+ # get scale factor
+ scale_factor = to_height / (
+ resolution_height * pixel_aspect)
+ self.log.debug(scale_factor)
+
# letter_box
- # TODO: add to documentation
- lb = profile.get('letter_box', None)
- if lb:
+ lb = profile.get('letter_box', 0)
+ if lb != 0:
+ ffmpet_width = to_width
+ ffmpet_height = to_height
+ if "reformat" not in p_tags:
+ lb /= pixel_aspect
+ if resolution_ratio != delivery_ratio:
+ ffmpet_width = resolution_width
+ ffmpet_height = int(
+ resolution_height * pixel_aspect)
+ else:
+ # TODO: it might still be failing in some cases
+ if resolution_ratio != delivery_ratio:
+ lb /= scale_factor
+ else:
+ lb /= pixel_aspect
+
output_args.append(
- "-filter:v drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black".format(lb))
+ "-filter:v scale={0}x{1}:flags=lanczos,setsar=1,drawbox=0:0:iw:round((ih-(iw*(1/{2})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{2})))/2):iw:round((ih-(iw*(1/{2})))/2):t=fill:c=black".format(ffmpet_width, ffmpet_height, lb))
# In case audio is longer than video.
output_args.append("-shortest")
# output filename
output_args.append(full_output_path)
+
+ self.log.debug("__ pixel_aspect: `{}`".format(pixel_aspect))
+ self.log.debug("__ resolution_width: `{}`".format(resolution_width))
+ self.log.debug("__ resolution_height: `{}`".format(resolution_height))
+
+ # scaling none square pixels and 1920 width
+ if "reformat" in p_tags:
+ if resolution_ratio < delivery_ratio:
+ self.log.debug("lower then delivery")
+ width_scale = int(to_width * scale_factor)
+ width_half_pad = int((
+ to_width - width_scale)/2)
+ height_scale = to_height
+ height_half_pad = 0
+ else:
+ self.log.debug("heigher then delivery")
+ width_scale = to_width
+ width_half_pad = 0
+ scale_factor = float(to_width) / float(resolution_width)
+ self.log.debug(scale_factor)
+ height_scale = int(
+ resolution_height * scale_factor)
+ height_half_pad = int(
+ (to_height - height_scale)/2)
+
+ self.log.debug("__ width_scale: `{}`".format(width_scale))
+ self.log.debug("__ width_half_pad: `{}`".format(width_half_pad))
+ self.log.debug("__ height_scale: `{}`".format(height_scale))
+ self.log.debug("__ height_half_pad: `{}`".format(height_half_pad))
+
+
+ scaling_arg = "scale={0}x{1}:flags=lanczos,pad={2}:{3}:{4}:{5}:black,setsar=1".format(
+ width_scale, height_scale, to_width, to_height, width_half_pad, height_half_pad
+ )
+
+ vf_back = self.add_video_filter_args(
+ output_args, scaling_arg)
+ # add it to output_args
+ output_args.insert(0, vf_back)
+
+ # baking lut file application
+ lut_path = instance.data.get("lutPath")
+ if lut_path and ("bake-lut" in p_tags):
+ # removing Gama info as it is all baked in lut
+ gamma = next((g for g in input_args
+ if "-gamma" in g), None)
+ if gamma:
+ input_args.remove(gamma)
+
+ # create lut argument
+ lut_arg = "lut3d=file='{}'".format(
+ lut_path.replace(
+ "\\", "/").replace(":/", "\\:/")
+ )
+ lut_arg += ",colormatrix=bt601:bt709"
+
+ vf_back = self.add_video_filter_args(
+ output_args, lut_arg)
+ # add it to output_args
+ output_args.insert(0, vf_back)
+ self.log.info("Added Lut to ffmpeg command")
+ self.log.debug("_ output_args: `{}`".format(output_args))
+
mov_args = [
os.path.join(
os.environ.get(
@@ -183,7 +278,10 @@ class ExtractReview(pyblish.api.InstancePlugin):
'ext': ext,
'files': repr_file,
"tags": new_tags,
- "outputName": name
+ "outputName": name,
+ "codec": codec_args,
+ "resolutionWidth": resolution_width,
+ "resolutionWidth": resolution_height
})
if repre_new.get('preview'):
repre_new.pop("preview")
@@ -207,3 +305,40 @@ class ExtractReview(pyblish.api.InstancePlugin):
instance.data["representations"] = representations_new
self.log.debug("Families Out: `{}`".format(instance.data["families"]))
+
+ def add_video_filter_args(self, args, inserting_arg):
+ """
+ Fixing video filter argumets to be one long string
+
+ Args:
+ args (list): list of string arguments
+ inserting_arg (str): string argument we want to add
+ (without flag `-vf`)
+
+ Returns:
+ str: long joined argument to be added back to list of arguments
+
+ """
+ # find all video format settings
+ vf_settings = [p for p in args
+ for v in ["-filter:v", "-vf"]
+ if v in p]
+ self.log.debug("_ vf_settings: `{}`".format(vf_settings))
+
+ # remove them from output args list
+ for p in vf_settings:
+ self.log.debug("_ remove p: `{}`".format(p))
+ args.remove(p)
+ self.log.debug("_ args: `{}`".format(args))
+
+ # strip them from all flags
+ vf_fixed = [p.replace("-vf ", "").replace("-filter:v ", "")
+ for p in vf_settings]
+
+ self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
+ vf_fixed.insert(0, inserting_arg)
+ self.log.debug("_ vf_fixed: `{}`".format(vf_fixed))
+ # create new video filter setting
+ vf_back = "-vf " + ",".join(vf_fixed)
+
+ return vf_back
diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py
index da2ce5b457..d860452734 100644
--- a/pype/plugins/global/publish/integrate_new.py
+++ b/pype/plugins/global/publish/integrate_new.py
@@ -7,6 +7,7 @@ import errno
import pyblish.api
from avalon import api, io
from avalon.vendor import filelink
+from pathlib2 import Path
# this is needed until speedcopy for linux is fixed
if sys.platform == "win32":
from speedcopy import copyfile
@@ -70,6 +71,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"audio",
"yetiRig",
"yeticache",
+ "nukenodes",
+ "gizmo",
"source",
"matchmove",
"image"
@@ -273,6 +276,17 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"version": int(version["name"]),
"hierarchy": hierarchy}
+ resolution_width = repre.get("resolutionWidth")
+ resolution_height = repre.get("resolutionHeight")
+ fps = instance.data.get("fps")
+
+ if resolution_width:
+ template_data["resolution_width"] = resolution_width
+ if resolution_width:
+ template_data["resolution_height"] = resolution_height
+ if resolution_width:
+ template_data["fps"] = fps
+
files = repre['files']
if repre.get('stagingDir'):
stagingdir = repre['stagingDir']
@@ -418,7 +432,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
}
if sequence_repre and repre.get("frameStart"):
- representation['context']['frame'] = repre.get("frameStart")
+ representation['context']['frame'] = src_padding_exp % int(repre.get("frameStart"))
self.log.debug("__ representation: {}".format(representation))
destination_list.append(dst)
@@ -472,8 +486,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
Returns:
None
"""
- src = os.path.normpath(src)
- dst = os.path.normpath(dst)
+
+ src = str(Path(src).resolve())
+ drive, _path = os.path.splitdrive(dst)
+ unc = Path(drive).resolve()
+ dst = str(unc / _path)
self.log.debug("Copying file .. {} -> {}".format(src, dst))
dirname = os.path.dirname(dst)
@@ -494,6 +511,10 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
def hardlink_file(self, src, dst):
dirname = os.path.dirname(dst)
+ src = Path(src).resolve()
+ drive, _path = os.path.splitdrive(dst)
+ unc = Path(drive).resolve()
+ dst = str(unc / _path)
try:
os.makedirs(dirname)
except OSError as e:
@@ -533,13 +554,11 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# add group if available
if instance.data.get("subsetGroup"):
- subset["data"].update(
- {"subsetGroup": instance.data.get("subsetGroup")}
- )
io.update_many({
'type': 'subset',
'_id': io.ObjectId(subset["_id"])
- }, {'$set': subset["data"]}
+ }, {'$set': {'data.subsetGroup':
+ instance.data.get('subsetGroup')}}
)
return subset
diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py
index ddf1d948a3..9dd917ed55 100644
--- a/pype/plugins/global/publish/submit_publish_job.py
+++ b/pype/plugins/global/publish/submit_publish_job.py
@@ -21,6 +21,12 @@ def _get_script():
if module_path.endswith(".pyc"):
module_path = module_path[:-len(".pyc")] + ".py"
+ module_path = os.path.normpath(module_path)
+ mount_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_MOUNT'])
+ network_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_PATH'])
+
+ module_path = module_path.replace(mount_root, network_root)
+
return module_path
@@ -176,6 +182,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
output_dir = instance.data["outputDir"]
metadata_path = os.path.join(output_dir, metadata_filename)
+ metadata_path = os.path.normpath(metadata_path)
+ mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT'])
+ network_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_PATH'])
+
+ metadata_path = metadata_path.replace(mount_root, network_root)
+
# Generate the payload for Deadline submission
payload = {
"JobInfo": {
@@ -294,6 +306,19 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
relative_path = os.path.relpath(source, api.registered_root())
source = os.path.join("{root}", relative_path).replace("\\", "/")
+ # find subsets and version to attach render to
+ attach_to = instance.data.get("attachTo")
+ attach_subset_versions = []
+ if attach_to:
+ for subset in attach_to:
+ for instance in context:
+ if instance.data["subset"] != subset["subset"]:
+ continue
+ attach_subset_versions.append(
+ {"version": instance.data["version"],
+ "subset": subset["subset"],
+ "family": subset["family"]})
+
# Write metadata for publish job
metadata = {
"asset": asset,
diff --git a/pype/plugins/global/publish/validate_ffmpeg_installed.py b/pype/plugins/global/publish/validate_ffmpeg_installed.py
index 6d5ffba1e1..df7c330e95 100644
--- a/pype/plugins/global/publish/validate_ffmpeg_installed.py
+++ b/pype/plugins/global/publish/validate_ffmpeg_installed.py
@@ -27,6 +27,8 @@ class ValidateFfmpegInstallef(pyblish.api.Validator):
return True
def process(self, instance):
+ self.log.info("ffmpeg path: `{}`".format(
+ os.environ.get("FFMPEG_PATH", "")))
if self.is_tool(
os.path.join(
os.environ.get("FFMPEG_PATH", ""), "ffmpeg")) is False:
diff --git a/pype/plugins/launcher/actions/Aport.py b/pype/plugins/launcher/actions/Aport.py
index 94f14cd0d3..0ecd07c49a 100644
--- a/pype/plugins/launcher/actions/Aport.py
+++ b/pype/plugins/launcher/actions/Aport.py
@@ -1,7 +1,4 @@
import os
-import sys
-from avalon import io
-from pprint import pprint
import acre
from avalon import api, lib
diff --git a/pype/plugins/launcher/actions/unused/PremierePro.py b/pype/plugins/launcher/actions/unused/PremierePro.py
index 97d693ffbb..57aa4eb2cb 100644
--- a/pype/plugins/launcher/actions/unused/PremierePro.py
+++ b/pype/plugins/launcher/actions/unused/PremierePro.py
@@ -1,10 +1,9 @@
import os
-import sys
-from pprint import pprint
import acre
from avalon import api, lib, io
import pype.api as pype
+from pypeapp import Anatomy
class PremierePro(api.Action):
diff --git a/pype/plugins/maya/create/create_ass.py b/pype/plugins/maya/create/create_ass.py
index 84b42e9b20..6d8eda1a40 100644
--- a/pype/plugins/maya/create/create_ass.py
+++ b/pype/plugins/maya/create/create_ass.py
@@ -1,6 +1,7 @@
from collections import OrderedDict
import avalon.maya
+from pype.maya import lib
from maya import cmds
@@ -14,10 +15,21 @@ class CreateAss(avalon.maya.Creator):
icon = "cube"
defaults = ['Main']
+ def __init__(self, *args, **kwargs):
+ super(CreateAss, self).__init__(*args, **kwargs)
+
+ # Add animation data
+ self.data.update(lib.collect_animation_data())
+
+ # Vertex colors with the geometry
+ self.data["exportSequence"] = False
+
def process(self):
instance = super(CreateAss, self).process()
- data = OrderedDict(**self.data)
+ # data = OrderedDict(**self.data)
+
+
nodes = list()
@@ -30,4 +42,6 @@ class CreateAss(avalon.maya.Creator):
assProxy = cmds.sets(name="proxy_SET", empty=True)
cmds.sets([assContent, assProxy], forceElement=instance)
- self.data = data
+ # self.log.info(data)
+ #
+ # self.data = data
diff --git a/pype/plugins/maya/load/load_ass.py b/pype/plugins/maya/load/load_ass.py
index 2960e4403e..83dd80bd4e 100644
--- a/pype/plugins/maya/load/load_ass.py
+++ b/pype/plugins/maya/load/load_ass.py
@@ -2,6 +2,7 @@ from avalon import api
import pype.maya.plugin
import os
from pypeapp import config
+import clique
class AssProxyLoader(pype.maya.plugin.ReferenceLoader):
@@ -21,6 +22,13 @@ class AssProxyLoader(pype.maya.plugin.ReferenceLoader):
from avalon import maya
import pymel.core as pm
+ version = context['version']
+ version_data = version.get("data", {})
+
+ self.log.info("version_data: {}\n".format(version_data))
+
+ frameStart = version_data.get("frameStart", None)
+
try:
family = context["representation"]["context"]["family"]
except ValueError:
@@ -30,7 +38,24 @@ class AssProxyLoader(pype.maya.plugin.ReferenceLoader):
groupName = "{}:{}".format(namespace, name)
path = self.fname
- proxyPath = os.path.splitext(path)[0] + ".ma"
+ proxyPath_base = os.path.splitext(path)[0]
+
+ if frameStart is not None:
+ proxyPath_base = os.path.splitext(proxyPath_base)[0]
+
+ publish_folder = os.path.split(path)[0]
+ files_in_folder = os.listdir(publish_folder)
+ collections, remainder = clique.assemble(files_in_folder)
+
+ if collections:
+ hashes = collections[0].padding * '#'
+ coll = collections[0].format('{head}[index]{tail}')
+ filename = coll.replace('[index]', hashes)
+
+ path = os.path.join(publish_folder, filename)
+
+ proxyPath = proxyPath_base + ".ma"
+ self.log.info
nodes = cmds.file(proxyPath,
namespace=namespace,
@@ -147,6 +172,13 @@ class AssStandinLoader(api.Loader):
import mtoa.ui.arnoldmenu
import pymel.core as pm
+ version = context['version']
+ version_data = version.get("data", {})
+
+ self.log.info("version_data: {}\n".format(version_data))
+
+ frameStart = version_data.get("frameStart", None)
+
asset = context['asset']['name']
namespace = namespace or lib.unique_namespace(
asset + "_",
@@ -182,6 +214,8 @@ class AssStandinLoader(api.Loader):
# Set the standin filepath
standinShape.dso.set(self.fname)
+ if frameStart is not None:
+ standinShape.useFrameExtension.set(1)
nodes = [root, standin]
self[:] = nodes
@@ -199,14 +233,23 @@ class AssStandinLoader(api.Loader):
path = api.get_representation_path(representation)
- # Update the standin
- members = pm.sets(container['objectName'], query=True)
- standins = pm.ls(members, type="AiStandIn", long=True)
+ files_in_path = os.listdir(os.path.split(path)[0])
+ sequence = 0
+ collections, remainder = clique.assemble(files_in_path)
+ if collections:
+ sequence = 1
- assert len(caches) == 1, "This is a bug"
+ # Update the standin
+ standins = list()
+ members = pm.sets(container['objectName'], query=True)
+ for member in members:
+ shape = member.getShape()
+ if (shape and shape.type() == "aiStandIn"):
+ standins.append(shape)
for standin in standins:
- standin.cacheFileName.set(path)
+ standin.dso.set(path)
+ standin.useFrameExtension.set(sequence)
container = pm.PyNode(container["objectName"])
container.representation.set(str(representation["_id"]))
diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py
index 55db019cf4..376fcc2c01 100644
--- a/pype/plugins/maya/load/load_reference.py
+++ b/pype/plugins/maya/load/load_reference.py
@@ -1,9 +1,7 @@
import pype.maya.plugin
import os
from pypeapp import config
-reload(config)
-import pype.maya.plugin
-reload(pype.maya.plugin)
+
class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
"""Load the model"""
@@ -22,7 +20,6 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
from avalon import maya
import pymel.core as pm
-
try:
family = context["representation"]["context"]["family"]
except ValueError:
diff --git a/pype/plugins/maya/publish/collect_ass.py b/pype/plugins/maya/publish/collect_ass.py
index c0174e7026..8e6691120a 100644
--- a/pype/plugins/maya/publish/collect_ass.py
+++ b/pype/plugins/maya/publish/collect_ass.py
@@ -21,15 +21,17 @@ class CollectAssData(pyblish.api.InstancePlugin):
objsets = instance.data['setMembers']
for objset in objsets:
+ objset = str(objset)
members = cmds.sets(objset, query=True)
if members is None:
self.log.warning("Skipped empty instance: \"%s\" " % objset)
continue
- if objset == "content_SET":
+ if "content_SET" in objset:
instance.data['setMembers'] = members
- elif objset == "proxy_SET":
+ self.log.debug('content members: {}'.format(members))
+ elif objset.startswith("proxy_SET"):
assert len(members) == 1, "You have multiple proxy meshes, please only use one"
instance.data['proxy'] = members
-
+ self.log.debug('proxy members: {}'.format(members))
self.log.debug("data: {}".format(instance.data))
diff --git a/pype/plugins/maya/publish/collect_look.py b/pype/plugins/maya/publish/collect_look.py
index 618f2749a4..7a5fea776c 100644
--- a/pype/plugins/maya/publish/collect_look.py
+++ b/pype/plugins/maya/publish/collect_look.py
@@ -219,10 +219,6 @@ class CollectLook(pyblish.api.InstancePlugin):
with lib.renderlayer(instance.data["renderlayer"]):
self.collect(instance)
- # make ftrack publishable
- self.maketx = instance.data.get('maketx', True)
- instance.data['maketx'] = self.maketx
- self.log.info('maketx: {}'.format(self.maketx))
def collect(self, instance):
@@ -297,9 +293,11 @@ class CollectLook(pyblish.api.InstancePlugin):
self.log.info("Collected file nodes:\n{}".format(files))
# Collect textures if any file nodes are found
- instance.data["resources"] = [self.collect_resource(n)
- for n in files]
- self.log.info("Collected resources:\n{}".format(instance.data["resources"]))
+ instance.data["resources"] = []
+ for n in files:
+ instance.data["resources"].append(self.collect_resource(n))
+
+ self.log.info("Collected resources: {}".format(instance.data["resources"]))
# Log a warning when no relevant sets were retrieved for the look.
if not instance.data["lookData"]["relationships"]:
@@ -423,7 +421,7 @@ class CollectLook(pyblish.api.InstancePlugin):
self.log.debug("processing: {}".format(node))
if cmds.nodeType(node) == 'file':
- self.log.debug("file node")
+ self.log.debug(" - file node")
attribute = "{}.fileTextureName".format(node)
computed_attribute = "{}.computedFileTextureNamePattern".format(node)
elif cmds.nodeType(node) == 'aiImage':
@@ -431,7 +429,7 @@ class CollectLook(pyblish.api.InstancePlugin):
attribute = "{}.filename".format(node)
computed_attribute = attribute
source = cmds.getAttr(attribute)
-
+ self.log.info(" - file source: {}".format(source))
color_space_attr = "{}.colorSpace".format(node)
color_space = cmds.getAttr(color_space_attr)
# Compare with the computed file path, e.g. the one with the
@@ -455,6 +453,13 @@ class CollectLook(pyblish.api.InstancePlugin):
if len(files) == 0:
self.log.error("No valid files found from node `%s`" % node)
+ self.log.info("collection of resource done:")
+ self.log.info(" - node: {}".format(node))
+ self.log.info(" - attribute: {}".format(attribute))
+ self.log.info(" - source: {}".format(source))
+ self.log.info(" - file: {}".format(files))
+ self.log.info(" - color space: {}".format(color_space))
+
# Define the resource
return {"node": node,
"attribute": attribute,
diff --git a/pype/plugins/maya/publish/collect_yeti_rig.py b/pype/plugins/maya/publish/collect_yeti_rig.py
index 7ab5649c0b..c743b2c00b 100644
--- a/pype/plugins/maya/publish/collect_yeti_rig.py
+++ b/pype/plugins/maya/publish/collect_yeti_rig.py
@@ -119,11 +119,15 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
texture_filenames = []
if image_search_paths:
+
# TODO: Somehow this uses OS environment path separator, `:` vs `;`
# Later on check whether this is pipeline OS cross-compatible.
image_search_paths = [p for p in
image_search_paths.split(os.path.pathsep) if p]
+ # find all ${TOKEN} tokens and replace them with $TOKEN env. variable
+ image_search_paths = self._replace_tokens(image_search_paths)
+
# List all related textures
texture_filenames = cmds.pgYetiCommand(node, listTextures=True)
self.log.info("Found %i texture(s)" % len(texture_filenames))
@@ -140,6 +144,8 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
"atttribute'" % node)
# Collect all texture files
+ # find all ${TOKEN} tokens and replace them with $TOKEN env. variable
+ texture_filenames = self._replace_tokens(texture_filenames)
for texture in texture_filenames:
files = []
@@ -283,3 +289,20 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
collection, remainder = clique.assemble(files, patterns=pattern)
return collection
+
+ def _replace_tokens(self, strings):
+ env_re = re.compile(r"\$\{(\w+)\}")
+
+ replaced = []
+ for s in strings:
+ matches = re.finditer(env_re, s)
+ for m in matches:
+ try:
+ s = s.replace(m.group(), os.environ[m.group(1)])
+ except KeyError:
+ msg = "Cannot find requested {} in environment".format(
+ m.group(1))
+ self.log.error(msg)
+ raise RuntimeError(msg)
+ replaced.append(s)
+ return replaced
diff --git a/pype/plugins/maya/publish/extract_ass.py b/pype/plugins/maya/publish/extract_ass.py
index 1fed6c8dd7..4cf394aefe 100644
--- a/pype/plugins/maya/publish/extract_ass.py
+++ b/pype/plugins/maya/publish/extract_ass.py
@@ -17,11 +17,15 @@ class ExtractAssStandin(pype.api.Extractor):
label = "Ass Standin (.ass)"
hosts = ["maya"]
families = ["ass"]
+ asciiAss = False
def process(self, instance):
+ sequence = instance.data.get("exportSequence", False)
+
staging_dir = self.staging_dir(instance)
filename = "{}.ass".format(instance.name)
+ filenames = list()
file_path = os.path.join(staging_dir, filename)
# Write out .ass file
@@ -29,13 +33,49 @@ class ExtractAssStandin(pype.api.Extractor):
with avalon.maya.maintained_selection():
self.log.info("Writing: {}".format(instance.data["setMembers"]))
cmds.select(instance.data["setMembers"], noExpand=True)
- cmds.arnoldExportAss( filename=file_path,
- selected=True,
- asciiAss=True,
- shadowLinks=True,
- lightLinks=True,
- boundingBox=True
- )
+
+ if sequence:
+ self.log.info("Extracting ass sequence")
+
+ # Collect the start and end including handles
+ start = instance.data.get("frameStart", 1)
+ end = instance.data.get("frameEnd", 1)
+ handles = instance.data.get("handles", 0)
+ step = instance.data.get("step", 0)
+ if handles:
+ start -= handles
+ end += handles
+
+ exported_files = cmds.arnoldExportAss(filename=file_path,
+ selected=True,
+ asciiAss=self.asciiAss,
+ shadowLinks=True,
+ lightLinks=True,
+ boundingBox=True,
+ startFrame=start,
+ endFrame=end,
+ frameStep=step
+ )
+ for file in exported_files:
+ filenames.append(os.path.split(file)[1])
+ self.log.info("Exported: {}".format(filenames))
+ else:
+ self.log.info("Extracting ass")
+ cmds.arnoldExportAss(filename=file_path,
+ selected=True,
+ asciiAss=False,
+ shadowLinks=True,
+ lightLinks=True,
+ boundingBox=True
+ )
+ self.log.info("Extracted {}".format(filename))
+ filenames = filename
+ optionals = [
+ "frameStart", "frameEnd", "step", "handles",
+ "handleEnd", "handleStart"
+ ]
+ for key in optionals:
+ instance.data.pop(key, None)
if "representations" not in instance.data:
instance.data["representations"] = []
@@ -43,9 +83,13 @@ class ExtractAssStandin(pype.api.Extractor):
representation = {
'name': 'ass',
'ext': 'ass',
- 'files': filename,
+ 'files': filenames,
"stagingDir": staging_dir
}
+
+ if sequence:
+ representation['frameStart'] = start
+
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s"
diff --git a/pype/plugins/maya/publish/extract_assproxy.py b/pype/plugins/maya/publish/extract_assproxy.py
index 34c3113e11..59684febe1 100644
--- a/pype/plugins/maya/publish/extract_assproxy.py
+++ b/pype/plugins/maya/publish/extract_assproxy.py
@@ -43,8 +43,13 @@ class ExtractAssProxy(pype.api.Extractor):
# Get only the shape contents we need in such a way that we avoid
# taking along intermediateObjects
- members = instance.data['proxy']
- members = cmds.ls(members,
+ proxy = instance.data.get('proxy', None)
+
+ if not proxy:
+ self.log.info("no proxy mesh")
+ return
+
+ members = cmds.ls(proxy,
dag=True,
transforms=True,
noIntermediate=True)
diff --git a/pype/plugins/maya/publish/extract_look.py b/pype/plugins/maya/publish/extract_look.py
index 4da28da2a1..fa6ecd72c3 100644
--- a/pype/plugins/maya/publish/extract_look.py
+++ b/pype/plugins/maya/publish/extract_look.py
@@ -38,11 +38,7 @@ def source_hash(filepath, *args):
file_name = os.path.basename(filepath)
time = str(os.path.getmtime(filepath))
size = str(os.path.getsize(filepath))
- return "|".join([
- file_name,
- time,
- size
- ] + list(args)).replace(".", ",")
+ return "|".join([file_name, time, size] + list(args)).replace(".", ",")
def find_paths_by_hash(texture_hash):
@@ -64,36 +60,33 @@ def maketx(source, destination, *args):
"""
cmd = [
- "maketx",
- "-v", # verbose
- "-u", # update mode
- # unpremultiply before conversion (recommended when alpha present)
- "--unpremult",
- "--checknan",
- # use oiio-optimized settings for tile-size, planarconfig, metadata
- "--oiio",
- "--filter lanczos3"
- ]
+ "maketx",
+ "-v", # verbose
+ "-u", # update mode
+ # unpremultiply before conversion (recommended when alpha present)
+ "--unpremult",
+ "--checknan",
+ # use oiio-optimized settings for tile-size, planarconfig, metadata
+ "--oiio",
+ "--filter lanczos3",
+ ]
cmd.extend(args)
- cmd.extend([
- "-o", destination,
- source
- ])
+ cmd.extend(["-o", destination, source])
+
+ cmd = " ".join(cmd)
CREATE_NO_WINDOW = 0x08000000
- kwargs = dict(
- args=cmd,
- stderr=subprocess.STDOUT
- )
+ kwargs = dict(args=cmd, stderr=subprocess.STDOUT)
if sys.platform == "win32":
- kwargs["creationflags"] = CREATE_NO_WIDOW
+ kwargs["creationflags"] = CREATE_NO_WINDOW
try:
out = subprocess.check_output(**kwargs)
except subprocess.CalledProcessError as exc:
print(exc)
import traceback
+
traceback.print_exc()
raise
@@ -180,41 +173,51 @@ class ExtractLook(pype.api.Extractor):
# Preserve color space values (force value after filepath change)
# This will also trigger in the same order at end of context to
# ensure after context it's still the original value.
- color_space = resource.get('color_space')
+ color_space = resource.get("color_space")
for f in resource["files"]:
- files_metadata[os.path.normpath(f)] = {'color_space': color_space}
+ files_metadata[os.path.normpath(f)] = {
+ "color_space": color_space}
# files.update(os.path.normpath(f))
# Process the resource files
transfers = list()
hardlinks = list()
hashes = dict()
+ forceCopy = instance.data.get("forceCopy", False)
self.log.info(files)
for filepath in files_metadata:
- cspace = files_metadata[filepath]['color_space']
+ cspace = files_metadata[filepath]["color_space"]
linearise = False
- if cspace == 'sRGB':
+ if cspace == "sRGB":
linearise = True
+ # set its file node to 'raw' as tx will be linearized
+ files_metadata[filepath]["color_space"] = "raw"
source, mode, hash = self._process_texture(
- filepath, do_maketx, staging=dir_path, linearise=linearise
- )
- destination = self.resource_destination(
- instance, source, do_maketx
+ filepath,
+ do_maketx,
+ staging=dir_path,
+ linearise=linearise,
+ force=forceCopy
)
+ destination = self.resource_destination(instance,
+ source,
+ do_maketx)
# Force copy is specified.
- if instance.data.get("forceCopy", False):
+ if forceCopy:
mode = COPY
if mode == COPY:
transfers.append((source, destination))
+ self.log.info('copying')
elif mode == HARDLINK:
hardlinks.append((source, destination))
+ self.log.info('hardlinking')
# Store the hashes from hash to destination to include in the
# database
@@ -235,13 +238,14 @@ class ExtractLook(pype.api.Extractor):
# Preserve color space values (force value after filepath change)
# This will also trigger in the same order at end of context to
# ensure after context it's still the original value.
- color_space_attr = resource['node'] + ".colorSpace"
+ color_space_attr = resource["node"] + ".colorSpace"
color_space = cmds.getAttr(color_space_attr)
-
+ if files_metadata[source]["color_space"] == "raw":
+ # set colorpsace to raw if we linearized it
+ color_space = "Raw"
# Remap file node filename to destination
- attr = resource['attribute']
+ attr = resource["attribute"]
remap[attr] = destinations[source]
-
remap[color_space_attr] = color_space
self.log.info("Finished remapping destinations ...")
@@ -268,13 +272,15 @@ class ExtractLook(pype.api.Extractor):
channels=True,
constraints=True,
expressions=True,
- constructionHistory=True
+ constructionHistory=True,
)
# Write the JSON data
self.log.info("Extract json..")
- data = {"attributes": lookdata["attributes"],
- "relationships": relationships}
+ data = {
+ "attributes": lookdata["attributes"],
+ "relationships": relationships
+ }
with open(json_path, "w") as f:
json.dump(data, f)
@@ -293,7 +299,7 @@ class ExtractLook(pype.api.Extractor):
instance.data["representations"].append(
{
"name": "ma",
- "ext": 'ma',
+ "ext": "ma",
"files": os.path.basename(maya_fname),
"stagingDir": os.path.dirname(maya_fname),
}
@@ -301,7 +307,7 @@ class ExtractLook(pype.api.Extractor):
instance.data["representations"].append(
{
"name": "json",
- "ext": 'json',
+ "ext": "json",
"files": os.path.basename(json_fname),
"stagingDir": os.path.dirname(json_fname),
}
@@ -314,13 +320,18 @@ class ExtractLook(pype.api.Extractor):
# Source hash for the textures
instance.data["sourceHashes"] = hashes
- self.log.info("Extracted instance '%s' to: %s" % (
- instance.name, maya_path)
- )
+ """
+ self.log.info("Returning colorspaces to their original values ...")
+ for attr, value in remap.items():
+ self.log.info(" - {}: {}".format(attr, value))
+ cmds.setAttr(attr, value, type="string")
+ """
+ self.log.info("Extracted instance '%s' to: %s" % (instance.name,
+ maya_path))
def resource_destination(self, instance, filepath, do_maketx):
- anatomy = instance.context.data['anatomy']
+ anatomy = instance.context.data["anatomy"]
self.create_destination_template(instance, anatomy)
@@ -332,12 +343,10 @@ class ExtractLook(pype.api.Extractor):
ext = ".tx"
return os.path.join(
- instance.data["assumedDestination"],
- "resources",
- basename + ext
+ instance.data["assumedDestination"], "resources", basename + ext
)
- def _process_texture(self, filepath, do_maketx, staging, linearise):
+ def _process_texture(self, filepath, do_maketx, staging, linearise, force):
"""Process a single texture file on disk for publishing.
This will:
1. Check whether it's already published, if so it will do hardlink
@@ -359,24 +368,20 @@ class ExtractLook(pype.api.Extractor):
# If source has been published before with the same settings,
# then don't reprocess but hardlink from the original
existing = find_paths_by_hash(texture_hash)
- if existing:
+ if existing and not force:
self.log.info("Found hash in database, preparing hardlink..")
source = next((p for p in existing if os.path.exists(p)), None)
if filepath:
return source, HARDLINK, texture_hash
else:
self.log.warning(
- "Paths not found on disk, "
- "skipping hardlink: %s" % (existing,)
+ ("Paths not found on disk, "
+ "skipping hardlink: %s") % (existing,)
)
if do_maketx and ext != ".tx":
# Produce .tx file in staging if source file is not .tx
- converted = os.path.join(
- staging,
- "resources",
- fname + ".tx"
- )
+ converted = os.path.join(staging, "resources", fname + ".tx")
if linearise:
self.log.info("tx: converting sRGB -> linear")
@@ -389,9 +394,15 @@ class ExtractLook(pype.api.Extractor):
os.makedirs(os.path.dirname(converted))
self.log.info("Generating .tx file for %s .." % filepath)
- maketx(filepath, converted,
- # Include `source-hash` as string metadata
- "-sattrib", "sourceHash", texture_hash, colorconvert)
+ maketx(
+ filepath,
+ converted,
+ # Include `source-hash` as string metadata
+ "-sattrib",
+ "sourceHash",
+ texture_hash,
+ colorconvert,
+ )
return converted, COPY, texture_hash
@@ -422,10 +433,10 @@ class ExtractLook(pype.api.Extractor):
"type": "project",
"name": project_name
},
- projection={"config": True, "data": True
+ projection={"config": True, "data": True}
)
- template = a_template['publish']['path']
+ template = a_template["publish"]["path"]
# anatomy = instance.context.data['anatomy']
asset = io.find_one({
@@ -435,8 +446,8 @@ class ExtractLook(pype.api.Extractor):
})
assert asset, ("No asset found by the name '{}' "
- "in project '{}'".format(asset_name, project_name))
- silo = asset.get('silo')
+ "in project '{}'").format(asset_name, project_name)
+ silo = asset.get("silo")
subset = io.find_one({
"type": "subset",
@@ -460,27 +471,28 @@ class ExtractLook(pype.api.Extractor):
if version is not None:
version_number += version["name"]
- if instance.data.get('version'):
- version_number = int(instance.data.get('version'))
+ if instance.data.get("version"):
+ version_number = int(instance.data.get("version"))
- padding = int(a_template['render']['padding'])
+ padding = int(a_template["render"]["padding"])
- hierarchy = asset['data']['parents']
+ hierarchy = asset["data"]["parents"]
if hierarchy:
# hierarchy = os.path.sep.join(hierarchy)
hierarchy = "/".join(hierarchy)
- template_data = {"root": api.Session["AVALON_PROJECTS"],
- "project": {"name": project_name,
- "code": project['data']['code']},
- "silo": silo,
- "family": instance.data['family'],
- "asset": asset_name,
- "subset": subset_name,
- "frame": ('#' * padding),
- "version": version_number,
- "hierarchy": hierarchy,
- "representation": "TEMP"}
+ template_data = {
+ "root": api.Session["AVALON_PROJECTS"],
+ "project": {"name": project_name, "code": project["data"]["code"]},
+ "silo": silo,
+ "family": instance.data["family"],
+ "asset": asset_name,
+ "subset": subset_name,
+ "frame": ("#" * padding),
+ "version": version_number,
+ "hierarchy": hierarchy,
+ "representation": "TEMP",
+ }
instance.data["assumedTemplateData"] = template_data
self.log.info(template_data)
diff --git a/pype/plugins/maya/publish/validate_node_ids_in_database.py b/pype/plugins/maya/publish/validate_node_ids_in_database.py
index 7347ce2ab2..fdcf0b20b0 100644
--- a/pype/plugins/maya/publish/validate_node_ids_in_database.py
+++ b/pype/plugins/maya/publish/validate_node_ids_in_database.py
@@ -1,6 +1,6 @@
import pyblish.api
-import avalon.io as io
+from avalon import io
import pype.api
import pype.maya.action
diff --git a/pype/plugins/maya/publish/validate_node_ids_related.py b/pype/plugins/maya/publish/validate_node_ids_related.py
index 7e8565c297..191ac0c2f8 100644
--- a/pype/plugins/maya/publish/validate_node_ids_related.py
+++ b/pype/plugins/maya/publish/validate_node_ids_related.py
@@ -1,7 +1,7 @@
import pyblish.api
import pype.api
-import avalon.io as io
+from avalon import io
import pype.maya.action
from pype.maya import lib
diff --git a/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py b/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py
index dd66b4fb3a..441658297d 100644
--- a/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py
+++ b/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py
@@ -1,7 +1,7 @@
import nuke
import os
import pyblish.api
-import avalon.io as io
+from avalon import io
# TODO: add repair function
diff --git a/pype/plugins/nuke/create/create_backdrop.py b/pype/plugins/nuke/create/create_backdrop.py
index b5600e8b37..767e92b592 100644
--- a/pype/plugins/nuke/create/create_backdrop.py
+++ b/pype/plugins/nuke/create/create_backdrop.py
@@ -1,16 +1,50 @@
from avalon.nuke.pipeline import Creator
-
+from avalon.nuke import lib as anlib
+import nuke
class CreateBackdrop(Creator):
"""Add Publishable Backdrop"""
- name = "backdrop"
- label = "Backdrop"
- family = "group"
- icon = "cube"
+ name = "nukenodes"
+ label = "Create Backdrop"
+ family = "nukenodes"
+ icon = "file-archive-o"
defaults = ["Main"]
def __init__(self, *args, **kwargs):
super(CreateBackdrop, self).__init__(*args, **kwargs)
-
+ self.nodes = nuke.selectedNodes()
+ self.node_color = "0xdfea5dff"
return
+
+ def process(self):
+ from nukescripts import autoBackdrop
+ nodes = list()
+ if (self.options or {}).get("useSelection"):
+ nodes = self.nodes
+
+ if len(nodes) >= 1:
+ anlib.select_nodes(nodes)
+ bckd_node = autoBackdrop()
+ bckd_node["name"].setValue("{}_BDN".format(self.name))
+ bckd_node["tile_color"].setValue(int(self.node_color, 16))
+ bckd_node["note_font_size"].setValue(24)
+ bckd_node["label"].setValue("[{}]".format(self.name))
+ # add avalon knobs
+ instance = anlib.imprint(bckd_node, self.data)
+
+ return instance
+ else:
+ nuke.message("Please select nodes you "
+ "wish to add to a container")
+ return
+ else:
+ bckd_node = autoBackdrop()
+ bckd_node["name"].setValue("{}_BDN".format(self.name))
+ bckd_node["tile_color"].setValue(int(self.node_color, 16))
+ bckd_node["note_font_size"].setValue(24)
+ bckd_node["label"].setValue("[{}]".format(self.name))
+ # add avalon knobs
+ instance = anlib.imprint(bckd_node, self.data)
+
+ return instance
diff --git a/pype/plugins/nuke/create/create_gizmo.py b/pype/plugins/nuke/create/create_gizmo.py
new file mode 100644
index 0000000000..41229862e3
--- /dev/null
+++ b/pype/plugins/nuke/create/create_gizmo.py
@@ -0,0 +1,79 @@
+from avalon.nuke.pipeline import Creator
+from avalon.nuke import lib as anlib
+import nuke
+import nukescripts
+
+class CreateGizmo(Creator):
+ """Add Publishable "gizmo" group
+
+ The name is symbolically gizmo as presumably
+ it is something familiar to nuke users as group of nodes
+ distributed downstream in workflow
+ """
+
+ name = "gizmo"
+ label = "Gizmo"
+ family = "gizmo"
+ icon = "file-archive-o"
+ defaults = ["ViewerInput", "Lut", "Effect"]
+
+ def __init__(self, *args, **kwargs):
+ super(CreateGizmo, self).__init__(*args, **kwargs)
+ self.nodes = nuke.selectedNodes()
+ self.node_color = "0x7533c1ff"
+ return
+
+ def process(self):
+ if (self.options or {}).get("useSelection"):
+ nodes = self.nodes
+ self.log.info(len(nodes))
+ if len(nodes) == 1:
+ anlib.select_nodes(nodes)
+ node = nodes[-1]
+ # check if Group node
+ if node.Class() in "Group":
+ node["name"].setValue("{}_GZM".format(self.name))
+ node["tile_color"].setValue(int(self.node_color, 16))
+ return anlib.imprint(node, self.data)
+ else:
+ nuke.message("Please select a group node "
+ "you wish to publish as the gizmo")
+
+ if len(nodes) >= 2:
+ anlib.select_nodes(nodes)
+ nuke.makeGroup()
+ gizmo_node = nuke.selectedNode()
+ gizmo_node["name"].setValue("{}_GZM".format(self.name))
+ gizmo_node["tile_color"].setValue(int(self.node_color, 16))
+
+ # add sticky node wit guide
+ with gizmo_node:
+ sticky = nuke.createNode("StickyNote")
+ sticky["label"].setValue(
+ "Add following:\n- set Input"
+ " nodes\n- set one Output1\n"
+ "- create User knobs on the group")
+
+ # add avalon knobs
+ return anlib.imprint(gizmo_node, self.data)
+
+ else:
+ nuke.message("Please select nodes you "
+ "wish to add to the gizmo")
+ return
+ else:
+ with anlib.maintained_selection():
+ gizmo_node = nuke.createNode("Group")
+ gizmo_node["name"].setValue("{}_GZM".format(self.name))
+ gizmo_node["tile_color"].setValue(int(self.node_color, 16))
+
+ # add sticky node wit guide
+ with gizmo_node:
+ sticky = nuke.createNode("StickyNote")
+ sticky["label"].setValue(
+ "Add following:\n- add Input"
+ " nodes\n- add one Output1\n"
+ "- create User knobs on the group")
+
+ # add avalon knobs
+ return anlib.imprint(gizmo_node, self.data)
diff --git a/pype/plugins/nuke/create/create_read.py b/pype/plugins/nuke/create/create_read.py
index 87bb45a6ad..1aa7e68746 100644
--- a/pype/plugins/nuke/create/create_read.py
+++ b/pype/plugins/nuke/create/create_read.py
@@ -6,9 +6,6 @@ from pype import api as pype
import nuke
-log = pype.Logger().get_logger(__name__, "nuke")
-
-
class CrateRead(avalon.nuke.Creator):
# change this to template preset
name = "ReadCopy"
diff --git a/pype/plugins/nuke/create/create_write.py b/pype/plugins/nuke/create/create_write.py
index 5eaf4279ee..f522c50511 100644
--- a/pype/plugins/nuke/create/create_write.py
+++ b/pype/plugins/nuke/create/create_write.py
@@ -7,10 +7,6 @@ from pypeapp import config
import nuke
-
-log = pype.Logger().get_logger(__name__, "nuke")
-
-
class CreateWriteRender(plugin.PypeCreator):
# change this to template preset
name = "WriteRender"
@@ -24,8 +20,6 @@ class CreateWriteRender(plugin.PypeCreator):
def __init__(self, *args, **kwargs):
super(CreateWriteRender, self).__init__(*args, **kwargs)
- self.name = self.data["subset"]
-
data = OrderedDict()
data["family"] = self.family
@@ -36,6 +30,7 @@ class CreateWriteRender(plugin.PypeCreator):
data.update({k: v})
self.data = data
+ self.nodes = nuke.selectedNodes()
self.log.info("self.data: '{}'".format(self.data))
def process(self):
@@ -48,9 +43,9 @@ class CreateWriteRender(plugin.PypeCreator):
# use selection
if (self.options or {}).get("useSelection"):
- nodes = nuke.selectedNodes()
+ nodes = self.nodes
- assert len(nodes) == 1, self.log.error("Select only one node. The node you want to connect to, or tick off `Use selection`")
+ assert len(nodes) < 2, self.log.error("Select only one node. The node you want to connect to, or tick off `Use selection`")
selected_node = nodes[0]
inputs = [selected_node]
diff --git a/pype/plugins/nuke/load/load_backdrop.py b/pype/plugins/nuke/load/load_backdrop.py
new file mode 100644
index 0000000000..7f58d4e9ec
--- /dev/null
+++ b/pype/plugins/nuke/load/load_backdrop.py
@@ -0,0 +1,319 @@
+from avalon import api, style, io
+import nuke
+import nukescripts
+from pype.nuke import lib as pnlib
+from avalon.nuke import lib as anlib
+from avalon.nuke import containerise, update_container
+reload(pnlib)
+
+class LoadBackdropNodes(api.Loader):
+ """Loading Published Backdrop nodes (workfile, nukenodes)"""
+
+ representations = ["nk"]
+ families = ["workfile", "nukenodes"]
+
+ label = "Iport Nuke Nodes"
+ order = 0
+ icon = "eye"
+ color = style.colors.light
+ node_color = "0x7533c1ff"
+
+ def load(self, context, name, namespace, data):
+ """
+ Loading function to import .nk file into script and wrap
+ it on backdrop
+
+ Arguments:
+ context (dict): context of version
+ name (str): name of the version
+ namespace (str): asset name
+ data (dict): compulsory attribute > not used
+
+ Returns:
+ nuke node: containerised nuke node object
+ """
+
+ # get main variables
+ version = context['version']
+ version_data = version.get("data", {})
+ vname = version.get("name", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
+ namespace = namespace or context['asset']['name']
+ colorspace = version_data.get("colorspace", None)
+ object_name = "{}_{}".format(name, namespace)
+
+ # prepare data for imprinting
+ # add additional metadata from the version to imprint to Avalon knob
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
+
+ data_imprint = {"frameStart": first,
+ "frameEnd": last,
+ "version": vname,
+ "colorspaceInput": colorspace,
+ "objectName": object_name}
+
+ for k in add_keys:
+ data_imprint.update({k: version_data[k]})
+
+ # getting file path
+ file = self.fname.replace("\\", "/")
+
+ # adding nodes to node graph
+ # just in case we are in group lets jump out of it
+ nuke.endGroup()
+
+ # Get mouse position
+ n = nuke.createNode("NoOp")
+ xcursor, ycursor = (n.xpos(), n.ypos())
+ anlib.reset_selection()
+ nuke.delete(n)
+
+ bdn_frame = 50
+
+ with anlib.maintained_selection():
+
+ # add group from nk
+ nuke.nodePaste(file)
+
+ # get all pasted nodes
+ new_nodes = list()
+ nodes = nuke.selectedNodes()
+
+ # get pointer position in DAG
+ xpointer, ypointer = pnlib.find_free_space_to_paste_nodes(nodes, direction="right", offset=200+bdn_frame)
+
+ # reset position to all nodes and replace inputs and output
+ for n in nodes:
+ anlib.reset_selection()
+ xpos = (n.xpos() - xcursor) + xpointer
+ ypos = (n.ypos() - ycursor) + ypointer
+ n.setXYpos(xpos, ypos)
+
+ # replace Input nodes for dots
+ if n.Class() in "Input":
+ dot = nuke.createNode("Dot")
+ new_name = n.name().replace("INP", "DOT")
+ dot.setName(new_name)
+ dot["label"].setValue(new_name)
+ dot.setXYpos(xpos, ypos)
+ new_nodes.append(dot)
+
+ # rewire
+ dep = n.dependent()
+ for d in dep:
+ index = next((i for i, dpcy in enumerate(
+ d.dependencies())
+ if n is dpcy), 0)
+ d.setInput(index, dot)
+
+ # remove Input node
+ anlib.reset_selection()
+ nuke.delete(n)
+ continue
+
+ # replace Input nodes for dots
+ elif n.Class() in "Output":
+ dot = nuke.createNode("Dot")
+ new_name = n.name() + "_DOT"
+ dot.setName(new_name)
+ dot["label"].setValue(new_name)
+ dot.setXYpos(xpos, ypos)
+ new_nodes.append(dot)
+
+ # rewire
+ dep = next((d for d in n.dependencies()), None)
+ if dep:
+ dot.setInput(0, dep)
+
+ # remove Input node
+ anlib.reset_selection()
+ nuke.delete(n)
+ continue
+ else:
+ new_nodes.append(n)
+
+ # reselect nodes with new Dot instead of Inputs and Output
+ anlib.reset_selection()
+ anlib.select_nodes(new_nodes)
+ # place on backdrop
+ bdn = nukescripts.autoBackdrop()
+
+ # add frame offset
+ xpos = bdn.xpos() - bdn_frame
+ ypos = bdn.ypos() - bdn_frame
+ bdwidth = bdn["bdwidth"].value() + (bdn_frame*2)
+ bdheight = bdn["bdheight"].value() + (bdn_frame*2)
+
+ bdn["xpos"].setValue(xpos)
+ bdn["ypos"].setValue(ypos)
+ bdn["bdwidth"].setValue(bdwidth)
+ bdn["bdheight"].setValue(bdheight)
+
+ bdn["name"].setValue(object_name)
+ bdn["label"].setValue("Version tracked frame: \n`{}`\n\nPLEASE DO NOT REMOVE OR MOVE \nANYTHING FROM THIS FRAME!".format(object_name))
+ bdn["note_font_size"].setValue(20)
+
+ return containerise(
+ node=bdn,
+ name=name,
+ namespace=namespace,
+ context=context,
+ loader=self.__class__.__name__,
+ data=data_imprint)
+
+ def update(self, container, representation):
+ """Update the Loader's path
+
+ Nuke automatically tries to reset some variables when changing
+ the loader's path to a new file. These automatic changes are to its
+ inputs:
+
+ """
+
+ # get main variables
+ # Get version from io
+ version = io.find_one({
+ "type": "version",
+ "_id": representation["parent"]
+ })
+ # get corresponding node
+ GN = nuke.toNode(container['objectName'])
+
+ file = api.get_representation_path(representation).replace("\\", "/")
+ context = representation["context"]
+ name = container['name']
+ version_data = version.get("data", {})
+ vname = version.get("name", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
+ namespace = container['namespace']
+ colorspace = version_data.get("colorspace", None)
+ object_name = "{}_{}".format(name, namespace)
+
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
+
+ data_imprint = {"representation": str(representation["_id"]),
+ "frameStart": first,
+ "frameEnd": last,
+ "version": vname,
+ "colorspaceInput": colorspace,
+ "objectName": object_name}
+
+ for k in add_keys:
+ data_imprint.update({k: version_data[k]})
+
+ # adding nodes to node graph
+ # just in case we are in group lets jump out of it
+ nuke.endGroup()
+
+ with anlib.maintained_selection():
+ xpos = GN.xpos()
+ ypos = GN.ypos()
+ avalon_data = anlib.get_avalon_knob_data(GN)
+ nuke.delete(GN)
+ # add group from nk
+ nuke.nodePaste(file)
+
+ GN = nuke.selectedNode()
+ anlib.set_avalon_knob_data(GN, avalon_data)
+ GN.setXYpos(xpos, ypos)
+ GN["name"].setValue(object_name)
+
+ # get all versions in list
+ versions = io.find({
+ "type": "version",
+ "parent": version["parent"]
+ }).distinct('name')
+
+ max_version = max(versions)
+
+ # change color of node
+ if version.get("name") not in [max_version]:
+ GN["tile_color"].setValue(int("0xd88467ff", 16))
+ else:
+ GN["tile_color"].setValue(int(self.node_color, 16))
+
+ self.log.info("udated to version: {}".format(version.get("name")))
+
+ return update_container(GN, data_imprint)
+
+ def connect_active_viewer(self, group_node):
+ """
+ Finds Active viewer and
+ place the node under it, also adds
+ name of group into Input Process of the viewer
+
+ Arguments:
+ group_node (nuke node): nuke group node object
+
+ """
+ group_node_name = group_node["name"].value()
+
+ viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
+ if len(viewer) > 0:
+ viewer = viewer[0]
+ else:
+ self.log.error("Please create Viewer node before you "
+ "run this action again")
+ return None
+
+ # get coordinates of Viewer1
+ xpos = viewer["xpos"].value()
+ ypos = viewer["ypos"].value()
+
+ ypos += 150
+
+ viewer["ypos"].setValue(ypos)
+
+ # set coordinates to group node
+ group_node["xpos"].setValue(xpos)
+ group_node["ypos"].setValue(ypos + 50)
+
+ # add group node name to Viewer Input Process
+ viewer["input_process_node"].setValue(group_node_name)
+
+ # put backdrop under
+ pnlib.create_backdrop(label="Input Process", layer=2,
+ nodes=[viewer, group_node], color="0x7c7faaff")
+
+ return True
+
+ def get_item(self, data, trackIndex, subTrackIndex):
+ return {key: val for key, val in data.items()
+ if subTrackIndex == val["subTrackIndex"]
+ if trackIndex == val["trackIndex"]}
+
+ def byteify(self, input):
+ """
+ Converts unicode strings to strings
+ It goes trought all dictionary
+
+ Arguments:
+ input (dict/str): input
+
+ Returns:
+ dict: with fixed values and keys
+
+ """
+
+ if isinstance(input, dict):
+ return {self.byteify(key): self.byteify(value)
+ for key, value in input.iteritems()}
+ elif isinstance(input, list):
+ return [self.byteify(element) for element in input]
+ elif isinstance(input, unicode):
+ return input.encode('utf-8')
+ else:
+ return input
+
+ def switch(self, container, representation):
+ self.update(container, representation)
+
+ def remove(self, container):
+ from avalon.nuke import viewer_update_and_undo_stop
+ node = nuke.toNode(container['objectName'])
+ with viewer_update_and_undo_stop():
+ nuke.delete(node)
diff --git a/pype/plugins/nuke/load/load_gizmo_ip.py b/pype/plugins/nuke/load/load_gizmo_ip.py
new file mode 100644
index 0000000000..0d78c14214
--- /dev/null
+++ b/pype/plugins/nuke/load/load_gizmo_ip.py
@@ -0,0 +1,239 @@
+from avalon import api, style, io
+import nuke
+from pype.nuke import lib as pnlib
+from avalon.nuke import lib as anlib
+from avalon.nuke import containerise, update_container
+
+
+class LoadGizmoInputProcess(api.Loader):
+ """Loading colorspace soft effect exported from nukestudio"""
+
+ representations = ["gizmo"]
+ families = ["gizmo"]
+
+ label = "Load Gizmo - Input Process"
+ order = 0
+ icon = "eye"
+ color = style.colors.alert
+ node_color = "0x7533c1ff"
+
+ def load(self, context, name, namespace, data):
+ """
+ Loading function to get Gizmo as Input Process on viewer
+
+ Arguments:
+ context (dict): context of version
+ name (str): name of the version
+ namespace (str): asset name
+ data (dict): compulsory attribute > not used
+
+ Returns:
+ nuke node: containerised nuke node object
+ """
+
+ # get main variables
+ version = context['version']
+ version_data = version.get("data", {})
+ vname = version.get("name", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
+ namespace = namespace or context['asset']['name']
+ colorspace = version_data.get("colorspace", None)
+ object_name = "{}_{}".format(name, namespace)
+
+ # prepare data for imprinting
+ # add additional metadata from the version to imprint to Avalon knob
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
+
+ data_imprint = {"frameStart": first,
+ "frameEnd": last,
+ "version": vname,
+ "colorspaceInput": colorspace,
+ "objectName": object_name}
+
+ for k in add_keys:
+ data_imprint.update({k: version_data[k]})
+
+ # getting file path
+ file = self.fname.replace("\\", "/")
+
+ # adding nodes to node graph
+ # just in case we are in group lets jump out of it
+ nuke.endGroup()
+
+ with anlib.maintained_selection():
+ # add group from nk
+ nuke.nodePaste(file)
+
+ GN = nuke.selectedNode()
+
+ GN["name"].setValue(object_name)
+
+ # try to place it under Viewer1
+ if not self.connect_active_viewer(GN):
+ nuke.delete(GN)
+ return
+
+ return containerise(
+ node=GN,
+ name=name,
+ namespace=namespace,
+ context=context,
+ loader=self.__class__.__name__,
+ data=data_imprint)
+
+ def update(self, container, representation):
+ """Update the Loader's path
+
+ Nuke automatically tries to reset some variables when changing
+ the loader's path to a new file. These automatic changes are to its
+ inputs:
+
+ """
+
+ # get main variables
+ # Get version from io
+ version = io.find_one({
+ "type": "version",
+ "_id": representation["parent"]
+ })
+ # get corresponding node
+ GN = nuke.toNode(container['objectName'])
+
+ file = api.get_representation_path(representation).replace("\\", "/")
+ context = representation["context"]
+ name = container['name']
+ version_data = version.get("data", {})
+ vname = version.get("name", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
+ namespace = container['namespace']
+ colorspace = version_data.get("colorspace", None)
+ object_name = "{}_{}".format(name, namespace)
+
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
+
+ data_imprint = {"representation": str(representation["_id"]),
+ "frameStart": first,
+ "frameEnd": last,
+ "version": vname,
+ "colorspaceInput": colorspace,
+ "objectName": object_name}
+
+ for k in add_keys:
+ data_imprint.update({k: version_data[k]})
+
+ # adding nodes to node graph
+ # just in case we are in group lets jump out of it
+ nuke.endGroup()
+
+ with anlib.maintained_selection():
+ xpos = GN.xpos()
+ ypos = GN.ypos()
+ avalon_data = anlib.get_avalon_knob_data(GN)
+ nuke.delete(GN)
+ # add group from nk
+ nuke.nodePaste(file)
+
+ GN = nuke.selectedNode()
+ anlib.set_avalon_knob_data(GN, avalon_data)
+ GN.setXYpos(xpos, ypos)
+ GN["name"].setValue(object_name)
+
+ # get all versions in list
+ versions = io.find({
+ "type": "version",
+ "parent": version["parent"]
+ }).distinct('name')
+
+ max_version = max(versions)
+
+ # change color of node
+ if version.get("name") not in [max_version]:
+ GN["tile_color"].setValue(int("0xd88467ff", 16))
+ else:
+ GN["tile_color"].setValue(int(self.node_color, 16))
+
+ self.log.info("udated to version: {}".format(version.get("name")))
+
+ return update_container(GN, data_imprint)
+
+ def connect_active_viewer(self, group_node):
+ """
+ Finds Active viewer and
+ place the node under it, also adds
+ name of group into Input Process of the viewer
+
+ Arguments:
+ group_node (nuke node): nuke group node object
+
+ """
+ group_node_name = group_node["name"].value()
+
+ viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
+ if len(viewer) > 0:
+ viewer = viewer[0]
+ else:
+ self.log.error("Please create Viewer node before you "
+ "run this action again")
+ return None
+
+ # get coordinates of Viewer1
+ xpos = viewer["xpos"].value()
+ ypos = viewer["ypos"].value()
+
+ ypos += 150
+
+ viewer["ypos"].setValue(ypos)
+
+ # set coordinates to group node
+ group_node["xpos"].setValue(xpos)
+ group_node["ypos"].setValue(ypos + 50)
+
+ # add group node name to Viewer Input Process
+ viewer["input_process_node"].setValue(group_node_name)
+
+ # put backdrop under
+ pnlib.create_backdrop(label="Input Process", layer=2,
+ nodes=[viewer, group_node], color="0x7c7faaff")
+
+ return True
+
+ def get_item(self, data, trackIndex, subTrackIndex):
+ return {key: val for key, val in data.items()
+ if subTrackIndex == val["subTrackIndex"]
+ if trackIndex == val["trackIndex"]}
+
+ def byteify(self, input):
+ """
+ Converts unicode strings to strings
+ It goes trought all dictionary
+
+ Arguments:
+ input (dict/str): input
+
+ Returns:
+ dict: with fixed values and keys
+
+ """
+
+ if isinstance(input, dict):
+ return {self.byteify(key): self.byteify(value)
+ for key, value in input.iteritems()}
+ elif isinstance(input, list):
+ return [self.byteify(element) for element in input]
+ elif isinstance(input, unicode):
+ return input.encode('utf-8')
+ else:
+ return input
+
+ def switch(self, container, representation):
+ self.update(container, representation)
+
+ def remove(self, container):
+ from avalon.nuke import viewer_update_and_undo_stop
+ node = nuke.toNode(container['objectName'])
+ with viewer_update_and_undo_stop():
+ nuke.delete(node)
diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py
index e6daaaff8a..e598839405 100644
--- a/pype/plugins/nuke/load/load_mov.py
+++ b/pype/plugins/nuke/load/load_mov.py
@@ -1,9 +1,6 @@
-import os
import contextlib
-from avalon import api
-import avalon.io as io
-
+from avalon import api, io
import nuke
@@ -102,7 +99,7 @@ class LoadMov(api.Loader):
handle_start = version_data.get("handleStart", None)
handle_end = version_data.get("handleEnd", None)
repr_cont = context["representation"]["context"]
-
+
# fix handle start and end if none are available
if not handle_start and not handle_end:
handle_start = handles
diff --git a/pype/plugins/nuke/load/load_script_precomp.py b/pype/plugins/nuke/load/load_script_precomp.py
index e84e23a890..310157f099 100644
--- a/pype/plugins/nuke/load/load_script_precomp.py
+++ b/pype/plugins/nuke/load/load_script_precomp.py
@@ -7,7 +7,7 @@ class LinkAsGroup(api.Loader):
"""Copy the published file to be pasted at the desired location"""
representations = ["nk"]
- families = ["workfile"]
+ families = ["workfile", "nukenodes"]
label = "Load Precomp"
order = 0
@@ -63,8 +63,6 @@ class LinkAsGroup(api.Loader):
colorspace = context["version"]["data"].get("colorspace", None)
self.log.info("colorspace: {}\n".format(colorspace))
- # ['version', 'file', 'reading', 'output', 'useOutput']
-
P["name"].setValue("{}_{}".format(name, namespace))
P["useOutput"].setValue(True)
@@ -74,14 +72,15 @@ class LinkAsGroup(api.Loader):
if n.Class() == "Group"
if get_avalon_knob_data(n)]
- # create panel for selecting output
- panel_choices = " ".join(writes)
- panel_label = "Select write node for output"
- p = nuke.Panel("Select Write Node")
- p.addEnumerationPulldown(
- panel_label, panel_choices)
- p.show()
- P["output"].setValue(p.value(panel_label))
+ if writes:
+ # create panel for selecting output
+ panel_choices = " ".join(writes)
+ panel_label = "Select write node for output"
+ p = nuke.Panel("Select Write Node")
+ p.addEnumerationPulldown(
+ panel_label, panel_choices)
+ p.show()
+ P["output"].setValue(p.value(panel_label))
P["tile_color"].setValue(0xff0ff0ff)
diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py
index e1c75584d7..8f01d4511b 100644
--- a/pype/plugins/nuke/load/load_sequence.py
+++ b/pype/plugins/nuke/load/load_sequence.py
@@ -1,9 +1,6 @@
-import os
import contextlib
-from avalon import api
-import avalon.io as io
-
+from avalon import api, io
import nuke
diff --git a/pype/plugins/nuke/publish/collect_backdrop.py b/pype/plugins/nuke/publish/collect_backdrop.py
new file mode 100644
index 0000000000..d98a20aee0
--- /dev/null
+++ b/pype/plugins/nuke/publish/collect_backdrop.py
@@ -0,0 +1,83 @@
+import pyblish.api
+import pype.api as pype
+from pype.nuke import lib as pnlib
+import nuke
+
+@pyblish.api.log
+class CollectBackdrops(pyblish.api.InstancePlugin):
+ """Collect Backdrop node instance and its content
+ """
+
+ order = pyblish.api.CollectorOrder + 0.22
+ label = "Collect Backdrop"
+ hosts = ["nuke"]
+ families = ["nukenodes"]
+
+ def process(self, instance):
+
+ bckn = instance[0]
+
+ # define size of the backdrop
+ left = bckn.xpos()
+ top = bckn.ypos()
+ right = left + bckn['bdwidth'].value()
+ bottom = top + bckn['bdheight'].value()
+
+ # iterate all nodes
+ for node in nuke.allNodes():
+
+ # exclude viewer
+ if node.Class() == "Viewer":
+ continue
+
+ # find all related nodes
+ if (node.xpos() > left) \
+ and (node.xpos() + node.screenWidth() < right) \
+ and (node.ypos() > top) \
+ and (node.ypos() + node.screenHeight() < bottom):
+
+ # add contained nodes to instance's node list
+ instance.append(node)
+
+ # get all connections from outside of backdrop
+ nodes = instance[1:]
+ connections_in, connections_out = pnlib.get_dependent_nodes(nodes)
+ instance.data["connections_in"] = connections_in
+ instance.data["connections_out"] = connections_out
+
+ # make label nicer
+ instance.data["label"] = "{0} ({1} nodes)".format(
+ bckn.name(), len(instance)-1)
+
+ instance.data["families"].append(instance.data["family"])
+
+ # Get frame range
+ handle_start = instance.context.data["handleStart"]
+ handle_end = instance.context.data["handleEnd"]
+ first_frame = int(nuke.root()["first_frame"].getValue())
+ last_frame = int(nuke.root()["last_frame"].getValue())
+
+ # get version
+ version = pype.get_version_from_path(nuke.root().name())
+ instance.data['version'] = version
+
+ # Add version data to instance
+ version_data = {
+ "handles": handle_start,
+ "handleStart": handle_start,
+ "handleEnd": handle_end,
+ "frameStart": first_frame + handle_start,
+ "frameEnd": last_frame - handle_end,
+ "version": int(version),
+ "families": [instance.data["family"]] + instance.data["families"],
+ "subset": instance.data["subset"],
+ "fps": instance.context.data["fps"]
+ }
+
+ instance.data.update({
+ "versionData": version_data,
+ "frameStart": first_frame,
+ "frameEnd": last_frame
+ })
+ self.log.info("Backdrop content collected: `{}`".format(instance[:]))
+ self.log.info("Backdrop instance collected: `{}`".format(instance))
diff --git a/pype/plugins/nuke/publish/collect_gizmo.py b/pype/plugins/nuke/publish/collect_gizmo.py
new file mode 100644
index 0000000000..11e8c17a3f
--- /dev/null
+++ b/pype/plugins/nuke/publish/collect_gizmo.py
@@ -0,0 +1,56 @@
+import pyblish.api
+import pype.api as pype
+import nuke
+
+
+@pyblish.api.log
+class CollectGizmo(pyblish.api.InstancePlugin):
+ """Collect Gizmo (group) node instance and its content
+ """
+
+ order = pyblish.api.CollectorOrder + 0.22
+ label = "Collect Gizmo (Group)"
+ hosts = ["nuke"]
+ families = ["gizmo"]
+
+ def process(self, instance):
+
+ grpn = instance[0]
+
+ # add family to familiess
+ instance.data["families"].insert(0, instance.data["family"])
+ # make label nicer
+ instance.data["label"] = "{0} ({1} nodes)".format(
+ grpn.name(), len(instance) - 1)
+
+ # Get frame range
+ handle_start = instance.context.data["handleStart"]
+ handle_end = instance.context.data["handleEnd"]
+ first_frame = int(nuke.root()["first_frame"].getValue())
+ last_frame = int(nuke.root()["last_frame"].getValue())
+
+ # get version
+ version = pype.get_version_from_path(nuke.root().name())
+ instance.data['version'] = version
+
+ # Add version data to instance
+ version_data = {
+ "handles": handle_start,
+ "handleStart": handle_start,
+ "handleEnd": handle_end,
+ "frameStart": first_frame + handle_start,
+ "frameEnd": last_frame - handle_end,
+ "colorspace": nuke.root().knob('workingSpaceLUT').value(),
+ "version": int(version),
+ "families": [instance.data["family"]] + instance.data["families"],
+ "subset": instance.data["subset"],
+ "fps": instance.context.data["fps"]
+ }
+
+ instance.data.update({
+ "versionData": version_data,
+ "frameStart": first_frame,
+ "frameEnd": last_frame
+ })
+ self.log.info("Gizmo content collected: `{}`".format(instance[:]))
+ self.log.info("Gizmo instance collected: `{}`".format(instance))
diff --git a/pype/plugins/nuke/publish/collect_instances.py b/pype/plugins/nuke/publish/collect_instances.py
index 53a9383b39..5b123ed7b9 100644
--- a/pype/plugins/nuke/publish/collect_instances.py
+++ b/pype/plugins/nuke/publish/collect_instances.py
@@ -22,7 +22,8 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
self.log.debug("asset_data: {}".format(asset_data["data"]))
instances = []
- # creating instances per write node
+
+ root = nuke.root()
self.log.debug("nuke.allNodes(): {}".format(nuke.allNodes()))
for node in nuke.allNodes():
@@ -32,11 +33,11 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
continue
except Exception as E:
self.log.warning(E)
- continue
+
# get data from avalon knob
self.log.debug("node[name]: {}".format(node['name'].value()))
- avalon_knob_data = get_avalon_knob_data(node)
+ avalon_knob_data = get_avalon_knob_data(node, ["avalon:", "ak:"])
self.log.debug("avalon_knob_data: {}".format(avalon_knob_data))
@@ -46,6 +47,14 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
if avalon_knob_data["id"] != "pyblish.avalon.instance":
continue
+ # establish families
+ family = avalon_knob_data["family"]
+ families = list()
+
+ # except disabled nodes but exclude backdrops in test
+ if ("nukenodes" not in family) and (node["disable"].value()):
+ continue
+
subset = avalon_knob_data.get(
"subset", None) or node["name"].value()
@@ -55,16 +64,47 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
# Add all nodes in group instances.
if node.Class() == "Group":
+ # only alter families for render family
+ if ("render" in family):
+ # check if node is not disabled
+ families.append(avalon_knob_data["families"])
+ if node["render"].value():
+ self.log.info("flagged for render")
+ add_family = "render.local"
+ # dealing with local/farm rendering
+ if node["render_farm"].value():
+ self.log.info("adding render farm family")
+ add_family = "render.farm"
+ instance.data["transfer"] = False
+ families.append(add_family)
+ else:
+ # add family into families
+ families.insert(0, family)
+
node.begin()
for i in nuke.allNodes():
instance.append(i)
node.end()
family = avalon_knob_data["family"]
- families = [avalon_knob_data["families"]]
-
+ families = list()
+ families_ak = avalon_knob_data.get("families")
+
+ if families_ak:
+ families.append(families_ak)
+ else:
+ families.append(family)
+
+ # Get format
+ format = root['format'].value()
+ resolution_width = format.width()
+ resolution_height = format.height()
+ pixel_aspect = format.pixelAspect()
+
if node.Class() not in "Read":
- if node["render"].value():
+ if "render" not in node.knobs().keys():
+ pass
+ elif node["render"].value():
self.log.info("flagged for render")
add_family = "render.local"
# dealing with local/farm rendering
@@ -88,7 +128,10 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
"avalonKnob": avalon_knob_data,
"publish": node.knob('publish').value(),
"step": 1,
- "fps": nuke.root()['fps'].value()
+ "fps": nuke.root()['fps'].value(),
+ "resolutionWidth": resolution_width,
+ "resolutionHeight": resolution_height,
+ "pixelAspect": pixel_aspect,
})
@@ -96,5 +139,4 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
instances.append(instance)
context.data["instances"] = instances
-
self.log.debug("context: {}".format(context))
diff --git a/pype/plugins/nuke/publish/collect_legacy_write.py b/pype/plugins/nuke/publish/collect_legacy_write.py
index 74280b743a..cfb0798434 100644
--- a/pype/plugins/nuke/publish/collect_legacy_write.py
+++ b/pype/plugins/nuke/publish/collect_legacy_write.py
@@ -24,7 +24,8 @@ class CollectWriteLegacy(pyblish.api.InstancePlugin):
self.log.info("render")
return
- instance.data.update(
- {"family": "write.legacy",
- "families": []}
- )
+ if "render" in node.knobs():
+ instance.data.update(
+ {"family": "write.legacy",
+ "families": []}
+ )
diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py
index ba8a0534b1..dd3049834d 100644
--- a/pype/plugins/nuke/publish/collect_writes.py
+++ b/pype/plugins/nuke/publish/collect_writes.py
@@ -11,7 +11,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Writes"
hosts = ["nuke", "nukeassist"]
- families = ["render", "render.local", "render.farm"]
+ families = ["write"]
def process(self, instance):
@@ -76,7 +76,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
}
try:
- collected_frames = os.listdir(output_dir)
+ collected_frames = [f for f in os.listdir(output_dir)
+ if ext in f]
if collected_frames:
representation['frameStart'] = "%0{}d".format(
len(str(last_frame))) % first_frame
@@ -95,11 +96,11 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"frameEnd": last_frame - handle_end,
"version": int(version),
"colorspace": node["colorspace"].value(),
- "families": [instance.data["family"]] + instance.data["families"],
+ "families": [instance.data["family"]],
"subset": instance.data["subset"],
"fps": instance.context.data["fps"]
}
-
+ instance.data["family"] = "write"
group_node = [x for x in instance if x.Class() == "Group"][0]
deadlineChunkSize = 1
if "deadlineChunkSize" in group_node.knobs():
@@ -109,6 +110,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
if "deadlinePriority" in group_node.knobs():
deadlinePriority = group_node["deadlinePriority"].value()
+ families = [f for f in instance.data["families"] if "write" not in f]
instance.data.update({
"versionData": version_data,
"path": path,
@@ -119,10 +121,13 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"frameStart": first_frame,
"frameEnd": last_frame,
"outputType": output_type,
+ "family": "write",
+ "families": families,
"colorspace": node["colorspace"].value(),
"deadlineChunkSize": deadlineChunkSize,
"deadlinePriority": deadlinePriority,
"subsetGroup": "renders"
})
+
self.log.debug("instance.data: {}".format(instance.data))
diff --git a/pype/plugins/nuke/publish/extract_backdrop.py b/pype/plugins/nuke/publish/extract_backdrop.py
new file mode 100644
index 0000000000..7b01b5deac
--- /dev/null
+++ b/pype/plugins/nuke/publish/extract_backdrop.py
@@ -0,0 +1,103 @@
+import pyblish.api
+from avalon.nuke import lib as anlib
+from pype.nuke import lib as pnlib
+import nuke
+import os
+import pype
+reload(pnlib)
+
+class ExtractBackdropNode(pype.api.Extractor):
+ """Extracting content of backdrop nodes
+
+ Will create nuke script only with containing nodes.
+ Also it will solve Input and Output nodes.
+
+ """
+
+ order = pyblish.api.ExtractorOrder
+ label = "Extract Backdrop"
+ hosts = ["nuke"]
+ families = ["nukenodes"]
+
+ def process(self, instance):
+ tmp_nodes = list()
+ nodes = instance[1:]
+ # Define extract output file path
+ stagingdir = self.staging_dir(instance)
+ filename = "{0}.nk".format(instance.name)
+ path = os.path.join(stagingdir, filename)
+
+ # maintain selection
+ with anlib.maintained_selection():
+ # all connections outside of backdrop
+ connections_in = instance.data["connections_in"]
+ connections_out = instance.data["connections_out"]
+ self.log.debug("_ connections_in: `{}`".format(connections_in))
+ self.log.debug("_ connections_out: `{}`".format(connections_out))
+
+ # create input nodes and name them as passing node (*_INP)
+ for n, inputs in connections_in.items():
+ for i, input in inputs:
+ inpn = nuke.createNode("Input")
+ inpn["name"].setValue("{}_{}_INP".format(n.name(), i))
+ n.setInput(i, inpn)
+ inpn.setXYpos(input.xpos(), input.ypos())
+ nodes.append(inpn)
+ tmp_nodes.append(inpn)
+
+ anlib.reset_selection()
+
+ # connect output node
+ for n, output in connections_out.items():
+ opn = nuke.createNode("Output")
+ self.log.info(n.name())
+ self.log.info(output.name())
+ output.setInput(
+ next((i for i, d in enumerate(output.dependencies())
+ if d.name() in n.name()), 0), opn)
+ opn.setInput(0, n)
+ opn.autoplace()
+ nodes.append(opn)
+ tmp_nodes.append(opn)
+ anlib.reset_selection()
+
+ # select nodes to copy
+ anlib.reset_selection()
+ anlib.select_nodes(nodes)
+ # create tmp nk file
+ # save file to the path
+ nuke.nodeCopy(path)
+
+ # Clean up
+ for tn in tmp_nodes:
+ nuke.delete(tn)
+
+ # restore original connections
+ # reconnect input node
+ for n, inputs in connections_in.items():
+ for i, input in inputs:
+ n.setInput(i, input)
+
+ # reconnect output node
+ for n, output in connections_out.items():
+ output.setInput(
+ next((i for i, d in enumerate(output.dependencies())
+ if d.name() in n.name()), 0), n)
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ # create representation
+ representation = {
+ 'name': 'nk',
+ 'ext': 'nk',
+ 'files': filename,
+ "stagingDir": stagingdir
+ }
+ instance.data["representations"].append(representation)
+
+ self.log.info("Extracted instance '{}' to: {}".format(
+ instance.name, path))
+
+ self.log.info("Data {}".format(
+ instance.data))
diff --git a/pype/plugins/nuke/publish/extract_gizmo.py b/pype/plugins/nuke/publish/extract_gizmo.py
new file mode 100644
index 0000000000..36ef1d464c
--- /dev/null
+++ b/pype/plugins/nuke/publish/extract_gizmo.py
@@ -0,0 +1,95 @@
+import pyblish.api
+from avalon.nuke import lib as anlib
+from pype.nuke import lib as pnlib
+from pype.nuke import utils as pnutils
+import nuke
+import os
+import pype
+
+
+class ExtractGizmo(pype.api.Extractor):
+ """Extracting Gizmo (Group) node
+
+ Will create nuke script only with the Gizmo node.
+ """
+
+ order = pyblish.api.ExtractorOrder
+ label = "Extract Gizmo (Group)"
+ hosts = ["nuke"]
+ families = ["gizmo"]
+
+ def process(self, instance):
+ tmp_nodes = list()
+ orig_grpn = instance[0]
+ # Define extract output file path
+ stagingdir = self.staging_dir(instance)
+ filename = "{0}.nk".format(instance.name)
+ path = os.path.join(stagingdir, filename)
+
+ # maintain selection
+ with anlib.maintained_selection():
+ orig_grpn_name = orig_grpn.name()
+ tmp_grpn_name = orig_grpn_name + "_tmp"
+ # select original group node
+ anlib.select_nodes([orig_grpn])
+
+ # copy to clipboard
+ nuke.nodeCopy("%clipboard%")
+
+ # reset selection to none
+ anlib.reset_selection()
+
+ # paste clipboard
+ nuke.nodePaste("%clipboard%")
+
+ # assign pasted node
+ copy_grpn = nuke.selectedNode()
+ copy_grpn.setXYpos((orig_grpn.xpos() + 120), orig_grpn.ypos())
+
+ # convert gizmos to groups
+ pnutils.bake_gizmos_recursively(copy_grpn)
+
+ # remove avalonknobs
+ knobs = copy_grpn.knobs()
+ avalon_knobs = [k for k in knobs.keys()
+ for ak in ["avalon:", "ak:"]
+ if ak in k]
+ avalon_knobs.append("publish")
+ for ak in avalon_knobs:
+ copy_grpn.removeKnob(knobs[ak])
+
+ # add to temporary nodes
+ tmp_nodes.append(copy_grpn)
+
+ # swap names
+ orig_grpn.setName(tmp_grpn_name)
+ copy_grpn.setName(orig_grpn_name)
+
+ # create tmp nk file
+ # save file to the path
+ nuke.nodeCopy(path)
+
+ # Clean up
+ for tn in tmp_nodes:
+ nuke.delete(tn)
+
+ # rename back to original
+ orig_grpn.setName(orig_grpn_name)
+
+ if "representations" not in instance.data:
+ instance.data["representations"] = []
+
+ # create representation
+ representation = {
+ 'name': 'gizmo',
+ 'ext': 'nk',
+ 'files': filename,
+ "stagingDir": stagingdir
+ }
+ instance.data["representations"].append(representation)
+
+ self.log.info("Extracted instance '{}' to: {}".format(
+ instance.name, path))
+
+ self.log.info("Data {}".format(
+ instance.data))
diff --git a/pype/plugins/nuke/publish/extract_review_data.py b/pype/plugins/nuke/publish/extract_review_data.py
deleted file mode 100644
index 791b9d7969..0000000000
--- a/pype/plugins/nuke/publish/extract_review_data.py
+++ /dev/null
@@ -1,187 +0,0 @@
-import os
-import nuke
-import pyblish.api
-import pype
-
-class ExtractReviewData(pype.api.Extractor):
- """Extracts movie and thumbnail with baked in luts
-
- must be run after extract_render_local.py
-
- """
-
- order = pyblish.api.ExtractorOrder + 0.01
- label = "Extract Review Data"
-
- families = ["review"]
- hosts = ["nuke"]
-
- def process(self, instance):
-
- # Store selection
- selection = [i for i in nuke.allNodes() if i["selected"].getValue()]
- # Deselect all nodes to prevent external connections
- [i["selected"].setValue(False) for i in nuke.allNodes()]
- self.log.debug("creating staging dir:")
- self.staging_dir(instance)
-
- self.log.debug("instance: {}".format(instance))
- self.log.debug("instance.data[families]: {}".format(
- instance.data["families"]))
-
- if "still" not in instance.data["families"]:
- self.render_review_representation(instance,
- representation="mov")
- self.render_review_representation(instance,
- representation="jpeg")
- else:
- self.render_review_representation(instance, representation="jpeg")
-
- # Restore selection
- [i["selected"].setValue(False) for i in nuke.allNodes()]
- [i["selected"].setValue(True) for i in selection]
-
- def render_review_representation(self,
- instance,
- representation="mov"):
-
- assert instance.data['representations'][0]['files'], "Instance data files should't be empty!"
-
- temporary_nodes = []
- stagingDir = instance.data[
- 'representations'][0]["stagingDir"].replace("\\", "/")
- self.log.debug("StagingDir `{0}`...".format(stagingDir))
-
- collection = instance.data.get("collection", None)
-
- if collection:
- # get path
- fname = os.path.basename(collection.format(
- "{head}{padding}{tail}"))
- fhead = collection.format("{head}")
-
- # get first and last frame
- first_frame = min(collection.indexes)
- last_frame = max(collection.indexes)
- else:
- fname = os.path.basename(instance.data.get("path", None))
- fhead = os.path.splitext(fname)[0] + "."
- first_frame = instance.data.get("frameStart", None)
- last_frame = instance.data.get("frameEnd", None)
-
- rnode = nuke.createNode("Read")
-
- rnode["file"].setValue(
- os.path.join(stagingDir, fname).replace("\\", "/"))
-
- rnode["first"].setValue(first_frame)
- rnode["origfirst"].setValue(first_frame)
- rnode["last"].setValue(last_frame)
- rnode["origlast"].setValue(last_frame)
- temporary_nodes.append(rnode)
- previous_node = rnode
-
- # get input process and connect it to baking
- ipn = self.get_view_process_node()
- if ipn is not None:
- ipn.setInput(0, previous_node)
- previous_node = ipn
- temporary_nodes.append(ipn)
-
- reformat_node = nuke.createNode("Reformat")
-
- ref_node = self.nodes.get("Reformat", None)
- if ref_node:
- for k, v in ref_node:
- self.log.debug("k,v: {0}:{1}".format(k,v))
- if isinstance(v, unicode):
- v = str(v)
- reformat_node[k].setValue(v)
-
- reformat_node.setInput(0, previous_node)
- previous_node = reformat_node
- temporary_nodes.append(reformat_node)
-
- dag_node = nuke.createNode("OCIODisplay")
- dag_node.setInput(0, previous_node)
- previous_node = dag_node
- temporary_nodes.append(dag_node)
-
- # create write node
- write_node = nuke.createNode("Write")
-
- if representation in "mov":
- file = fhead + "baked.mov"
- name = "baked"
- path = os.path.join(stagingDir, file).replace("\\", "/")
- self.log.debug("Path: {}".format(path))
- instance.data["baked_colorspace_movie"] = path
- write_node["file"].setValue(path)
- write_node["file_type"].setValue("mov")
- write_node["raw"].setValue(1)
- write_node.setInput(0, previous_node)
- temporary_nodes.append(write_node)
- tags = ["review", "delete"]
-
- elif representation in "jpeg":
- file = fhead + "jpeg"
- name = "thumbnail"
- path = os.path.join(stagingDir, file).replace("\\", "/")
- instance.data["thumbnail"] = path
- write_node["file"].setValue(path)
- write_node["file_type"].setValue("jpeg")
- write_node["raw"].setValue(1)
- write_node.setInput(0, previous_node)
- temporary_nodes.append(write_node)
- tags = ["thumbnail"]
-
- # retime for
- first_frame = int(last_frame) / 2
- last_frame = int(last_frame) / 2
-
- repre = {
- 'name': name,
- 'ext': representation,
- 'files': file,
- "stagingDir": stagingDir,
- "frameStart": first_frame,
- "frameEnd": last_frame,
- "anatomy_template": "render",
- "tags": tags
- }
- instance.data["representations"].append(repre)
-
- # Render frames
- nuke.execute(write_node.name(), int(first_frame), int(last_frame))
-
- self.log.debug("representations: {}".format(instance.data["representations"]))
-
- # Clean up
- for node in temporary_nodes:
- nuke.delete(node)
-
- def get_view_process_node(self):
-
- # Select only the target node
- if nuke.selectedNodes():
- [n.setSelected(False) for n in nuke.selectedNodes()]
-
- ipn_orig = None
- for v in [n for n in nuke.allNodes()
- if "Viewer" in n.Class()]:
- ip = v['input_process'].getValue()
- ipn = v['input_process_node'].getValue()
- if "VIEWER_INPUT" not in ipn and ip:
- ipn_orig = nuke.toNode(ipn)
- ipn_orig.setSelected(True)
-
- if ipn_orig:
- nuke.nodeCopy('%clipboard%')
-
- [n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
-
- nuke.nodePaste('%clipboard%')
-
- ipn = nuke.selectedNode()
-
- return ipn
diff --git a/pype/plugins/nuke/publish/extract_review_data_lut.py b/pype/plugins/nuke/publish/extract_review_data_lut.py
new file mode 100644
index 0000000000..4373309363
--- /dev/null
+++ b/pype/plugins/nuke/publish/extract_review_data_lut.py
@@ -0,0 +1,59 @@
+import os
+import pyblish.api
+from avalon.nuke import lib as anlib
+from pype.nuke import lib as pnlib
+import pype
+reload(pnlib)
+
+
+class ExtractReviewDataLut(pype.api.Extractor):
+ """Extracts movie and thumbnail with baked in luts
+
+ must be run after extract_render_local.py
+
+ """
+
+ order = pyblish.api.ExtractorOrder + 0.005
+ label = "Extract Review Data Lut"
+
+ families = ["review"]
+ hosts = ["nuke"]
+
+ def process(self, instance):
+ families = instance.data["families"]
+ self.log.info("Creating staging dir...")
+ if "representations" in instance.data:
+ staging_dir = instance.data[
+ "representations"][0]["stagingDir"].replace("\\", "/")
+ instance.data["stagingDir"] = staging_dir
+ instance.data["representations"][0]["tags"] = ["review"]
+ else:
+ instance.data["representations"] = []
+ # get output path
+ render_path = instance.data['path']
+ staging_dir = os.path.normpath(os.path.dirname(render_path))
+ instance.data["stagingDir"] = staging_dir
+
+ self.log.info(
+ "StagingDir `{0}`...".format(instance.data["stagingDir"]))
+
+ # generate data
+ with anlib.maintained_selection():
+ exporter = pnlib.ExporterReviewLut(
+ self, instance
+ )
+ data = exporter.generate_lut()
+
+ # assign to representations
+ instance.data["lutPath"] = os.path.join(
+ exporter.stagingDir, exporter.file).replace("\\", "/")
+ instance.data["representations"] += data["representations"]
+
+ if "render.farm" in families:
+ instance.data["families"].remove("review")
+ instance.data["families"].remove("ftrack")
+
+ self.log.debug(
+ "_ lutPath: {}".format(instance.data["lutPath"]))
+ self.log.debug(
+ "_ representations: {}".format(instance.data["representations"]))
diff --git a/pype/plugins/nuke/publish/extract_review_data_mov.py b/pype/plugins/nuke/publish/extract_review_data_mov.py
new file mode 100644
index 0000000000..333774bcd7
--- /dev/null
+++ b/pype/plugins/nuke/publish/extract_review_data_mov.py
@@ -0,0 +1,56 @@
+import os
+import pyblish.api
+from avalon.nuke import lib as anlib
+from pype.nuke import lib as pnlib
+import pype
+reload(pnlib)
+
+
+class ExtractReviewDataMov(pype.api.Extractor):
+ """Extracts movie and thumbnail with baked in luts
+
+ must be run after extract_render_local.py
+
+ """
+
+ order = pyblish.api.ExtractorOrder + 0.01
+ label = "Extract Review Data Mov"
+
+ families = ["review"]
+ hosts = ["nuke"]
+
+ def process(self, instance):
+ families = instance.data["families"]
+ self.log.info("Creating staging dir...")
+ if "representations" in instance.data:
+ staging_dir = instance.data[
+ "representations"][0]["stagingDir"].replace("\\", "/")
+ instance.data["stagingDir"] = staging_dir
+ instance.data["representations"][0]["tags"] = []
+ else:
+ instance.data["representations"] = []
+ # get output path
+ render_path = instance.data['path']
+ staging_dir = os.path.normpath(os.path.dirname(render_path))
+ instance.data["stagingDir"] = staging_dir
+
+ self.log.info(
+ "StagingDir `{0}`...".format(instance.data["stagingDir"]))
+
+ # generate data
+ with anlib.maintained_selection():
+ exporter = pnlib.ExporterReviewMov(
+ self, instance)
+
+ if "render.farm" in families:
+ instance.data["families"].remove("review")
+ instance.data["families"].remove("ftrack")
+ data = exporter.generate_mov(farm=True)
+ else:
+ data = exporter.generate_mov()
+
+ # assign to representations
+ instance.data["representations"] += data["representations"]
+
+ self.log.debug(
+ "_ representations: {}".format(instance.data["representations"]))
diff --git a/pype/plugins/nuke/publish/extract_thumbnail.py b/pype/plugins/nuke/publish/extract_thumbnail.py
new file mode 100644
index 0000000000..450bb39928
--- /dev/null
+++ b/pype/plugins/nuke/publish/extract_thumbnail.py
@@ -0,0 +1,174 @@
+import os
+import nuke
+from avalon.nuke import lib as anlib
+import pyblish.api
+import pype
+
+
+class ExtractThumbnail(pype.api.Extractor):
+ """Extracts movie and thumbnail with baked in luts
+
+ must be run after extract_render_local.py
+
+ """
+
+ order = pyblish.api.ExtractorOrder + 0.01
+ label = "Extract Thumbnail"
+
+ families = ["review", "render.farm"]
+ hosts = ["nuke"]
+
+ def process(self, instance):
+
+ with anlib.maintained_selection():
+ self.log.debug("instance: {}".format(instance))
+ self.log.debug("instance.data[families]: {}".format(
+ instance.data["families"]))
+
+ self.render_thumbnail(instance)
+
+ def render_thumbnail(self, instance):
+ node = instance[0] # group node
+ self.log.info("Creating staging dir...")
+ if "representations" in instance.data:
+ staging_dir = instance.data[
+ "representations"][0]["stagingDir"].replace("\\", "/")
+ instance.data["stagingDir"] = staging_dir
+ instance.data["representations"][0]["tags"] = ["review"]
+ else:
+ instance.data["representations"] = []
+ # get output path
+ render_path = instance.data['path']
+ staging_dir = os.path.normpath(os.path.dirname(render_path))
+ instance.data["stagingDir"] = staging_dir
+
+ self.log.info(
+ "StagingDir `{0}`...".format(instance.data["stagingDir"]))
+
+ temporary_nodes = []
+ collection = instance.data.get("collection", None)
+
+ if collection:
+ # get path
+ fname = os.path.basename(collection.format(
+ "{head}{padding}{tail}"))
+ fhead = collection.format("{head}")
+
+ # get first and last frame
+ first_frame = min(collection.indexes)
+ last_frame = max(collection.indexes)
+ else:
+ fname = os.path.basename(instance.data.get("path", None))
+ fhead = os.path.splitext(fname)[0] + "."
+ first_frame = instance.data.get("frameStart", None)
+ last_frame = instance.data.get("frameEnd", None)
+
+ if "#" in fhead:
+ fhead = fhead.replace("#", "")[:-1]
+
+ path_render = os.path.join(staging_dir, fname).replace("\\", "/")
+ # check if file exist otherwise connect to write node
+ if os.path.isfile(path_render):
+ rnode = nuke.createNode("Read")
+
+ rnode["file"].setValue(path_render)
+
+ rnode["first"].setValue(first_frame)
+ rnode["origfirst"].setValue(first_frame)
+ rnode["last"].setValue(last_frame)
+ rnode["origlast"].setValue(last_frame)
+ temporary_nodes.append(rnode)
+ previous_node = rnode
+ else:
+ previous_node = node
+
+ # get input process and connect it to baking
+ ipn = self.get_view_process_node()
+ if ipn is not None:
+ ipn.setInput(0, previous_node)
+ previous_node = ipn
+ temporary_nodes.append(ipn)
+
+ reformat_node = nuke.createNode("Reformat")
+
+ ref_node = self.nodes.get("Reformat", None)
+ if ref_node:
+ for k, v in ref_node:
+ self.log.debug("k, v: {0}:{1}".format(k, v))
+ if isinstance(v, unicode):
+ v = str(v)
+ reformat_node[k].setValue(v)
+
+ reformat_node.setInput(0, previous_node)
+ previous_node = reformat_node
+ temporary_nodes.append(reformat_node)
+
+ dag_node = nuke.createNode("OCIODisplay")
+ dag_node.setInput(0, previous_node)
+ previous_node = dag_node
+ temporary_nodes.append(dag_node)
+
+ # create write node
+ write_node = nuke.createNode("Write")
+ file = fhead + "jpeg"
+ name = "thumbnail"
+ path = os.path.join(staging_dir, file).replace("\\", "/")
+ instance.data["thumbnail"] = path
+ write_node["file"].setValue(path)
+ write_node["file_type"].setValue("jpeg")
+ write_node["raw"].setValue(1)
+ write_node.setInput(0, previous_node)
+ temporary_nodes.append(write_node)
+ tags = ["thumbnail"]
+
+ # retime for
+ first_frame = int(last_frame) / 2
+ last_frame = int(last_frame) / 2
+
+ repre = {
+ 'name': name,
+ 'ext': "jpeg",
+ 'files': file,
+ "stagingDir": staging_dir,
+ "frameStart": first_frame,
+ "frameEnd": last_frame,
+ "anatomy_template": "render",
+ "tags": tags
+ }
+ instance.data["representations"].append(repre)
+
+ # Render frames
+ nuke.execute(write_node.name(), int(first_frame), int(last_frame))
+
+ self.log.debug(
+ "representations: {}".format(instance.data["representations"]))
+
+ # Clean up
+ for node in temporary_nodes:
+ nuke.delete(node)
+
+ def get_view_process_node(self):
+
+ # Select only the target node
+ if nuke.selectedNodes():
+ [n.setSelected(False) for n in nuke.selectedNodes()]
+
+ ipn_orig = None
+ for v in [n for n in nuke.allNodes()
+ if "Viewer" in n.Class()]:
+ ip = v['input_process'].getValue()
+ ipn = v['input_process_node'].getValue()
+ if "VIEWER_INPUT" not in ipn and ip:
+ ipn_orig = nuke.toNode(ipn)
+ ipn_orig.setSelected(True)
+
+ if ipn_orig:
+ nuke.nodeCopy('%clipboard%')
+
+ [n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
+
+ nuke.nodePaste('%clipboard%')
+
+ ipn = nuke.selectedNode()
+
+ return ipn
diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py
index 4044026b5e..d9207d2bfc 100644
--- a/pype/plugins/nuke/publish/submit_nuke_deadline.py
+++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py
@@ -1,9 +1,7 @@
import os
import json
import getpass
-
-import nuke
-
+
from avalon import api
from avalon.vendor import requests
import re
@@ -27,40 +25,36 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
def process(self, instance):
- node = None
- for x in instance:
- if x.Class() == "Write":
- node = x
-
- if node is None:
- return
+ node = instance[0]
+ # for x in instance:
+ # if x.Class() == "Write":
+ # node = x
+ #
+ # if node is None:
+ # return
DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
"http://localhost:8082")
assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
context = instance.context
- workspace = os.path.dirname(context.data["currentFile"])
- filepath = None
- # get path
- path = nuke.filename(node)
- output_dir = instance.data['outputDir']
+ # get output path
+ render_path = instance.data['path']
+ render_dir = os.path.normpath(os.path.dirname(render_path))
- filepath = context.data["currentFile"]
+ script_path = context.data["currentFile"]
- self.log.debug(filepath)
-
- filename = os.path.basename(filepath)
+ script_name = os.path.basename(script_path)
comment = context.data.get("comment", "")
- dirname = os.path.join(workspace, "renders")
+
deadline_user = context.data.get("deadlineUser", getpass.getuser())
- jobname = "%s - %s" % (filename, instance.name)
+ jobname = "%s - %s" % (script_name, instance.name)
ver = re.search(r"\d+\.\d+", context.data.get("hostVersion"))
try:
# Ensure render folder exists
- os.makedirs(dirname)
+ os.makedirs(render_dir)
except OSError:
pass
@@ -71,7 +65,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
payload = {
"JobInfo": {
# Top-level group name
- "BatchName": filename,
+ "BatchName": script_name,
# Job name, as seen in Monitor
"Name": jobname,
@@ -95,20 +89,20 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
},
"PluginInfo": {
# Input
- "SceneFile": filepath,
+ "SceneFile": script_path,
# Output directory and filename
- "OutputFilePath": dirname.replace("\\", "/"),
+ "OutputFilePath": render_dir.replace("\\", "/"),
# "OutputFilePrefix": render_variables["filename_prefix"],
# Mandatory for Deadline
"Version": ver.group(),
# Resolve relative references
- "ProjectPath": workspace,
-
+ "ProjectPath": script_path,
+ "AWSAssetFile0": render_path,
# Only the specific write node is rendered.
- "WriteNode": instance[0].name()
+ "WriteNode": node.name()
},
# Mandatory for Deadline, may be empty
diff --git a/pype/plugins/nuke/publish/validate_backdrop.py b/pype/plugins/nuke/publish/validate_backdrop.py
new file mode 100644
index 0000000000..cf2d56087d
--- /dev/null
+++ b/pype/plugins/nuke/publish/validate_backdrop.py
@@ -0,0 +1,69 @@
+import pyblish
+from avalon.nuke import lib as anlib
+import nuke
+
+
+class SelectCenterInNodeGraph(pyblish.api.Action):
+ """
+ Centering failed instance node in node grap
+ """
+
+ label = "Center node in node graph"
+ icon = "wrench"
+ on = "failed"
+
+ def process(self, context, plugin):
+
+ # Get the errored instances
+ failed = []
+ for result in context.data["results"]:
+ if (result["error"] is not None and result["instance"] is not None
+ and result["instance"] not in failed):
+ failed.append(result["instance"])
+
+ # Apply pyblish.logic to get the instances for the plug-in
+ instances = pyblish.api.instances_by_plugin(failed, plugin)
+
+ all_xC = list()
+ all_yC = list()
+
+ # maintain selection
+ with anlib.maintained_selection():
+ # collect all failed nodes xpos and ypos
+ for instance in instances:
+ bdn = instance[0]
+ xC = bdn.xpos() + bdn.screenWidth()/2
+ yC = bdn.ypos() + bdn.screenHeight()/2
+
+ all_xC.append(xC)
+ all_yC.append(yC)
+
+ self.log.info("all_xC: `{}`".format(all_xC))
+ self.log.info("all_yC: `{}`".format(all_yC))
+
+ # zoom to nodes in node graph
+ nuke.zoom(2, [min(all_xC), min(all_yC)])
+
+
+@pyblish.api.log
+class ValidateBackdrop(pyblish.api.InstancePlugin):
+ """Validate amount of nodes on backdrop node in case user
+ forgoten to add nodes above the publishing backdrop node"""
+
+ order = pyblish.api.ValidatorOrder
+ optional = True
+ families = ["nukenodes"]
+ label = "Validate Backdrop"
+ hosts = ["nuke"]
+ actions = [SelectCenterInNodeGraph]
+
+ def process(self, instance):
+ connections_out = instance.data["connections_out"]
+
+ msg_multiple_outputs = "Only one outcoming connection from \"{}\" is allowed".format(
+ instance.data["name"])
+ assert len(connections_out.keys()) <= 1, msg_multiple_outputs
+
+ msg_no_content = "No content on backdrop node: \"{}\"".format(
+ instance.data["name"])
+ assert len(instance) > 1, msg_no_content
diff --git a/pype/plugins/nuke/publish/validate_gizmo.py b/pype/plugins/nuke/publish/validate_gizmo.py
new file mode 100644
index 0000000000..9c94ea88ef
--- /dev/null
+++ b/pype/plugins/nuke/publish/validate_gizmo.py
@@ -0,0 +1,58 @@
+import pyblish
+from avalon.nuke import lib as anlib
+import nuke
+
+
+class OpenFailedGroupNode(pyblish.api.Action):
+ """
+ Centering failed instance node in node grap
+ """
+
+ label = "Open Gizmo in Node Graph"
+ icon = "wrench"
+ on = "failed"
+
+ def process(self, context, plugin):
+
+ # Get the errored instances
+ failed = []
+ for result in context.data["results"]:
+ if (result["error"] is not None and result["instance"] is not None
+ and result["instance"] not in failed):
+ failed.append(result["instance"])
+
+ # Apply pyblish.logic to get the instances for the plug-in
+ instances = pyblish.api.instances_by_plugin(failed, plugin)
+
+ # maintain selection
+ with anlib.maintained_selection():
+ # collect all failed nodes xpos and ypos
+ for instance in instances:
+ grpn = instance[0]
+ nuke.showDag(grpn)
+
+
+@pyblish.api.log
+class ValidateGizmo(pyblish.api.InstancePlugin):
+ """Validate amount of output nodes in gizmo (group) node"""
+
+ order = pyblish.api.ValidatorOrder
+ optional = True
+ families = ["gizmo"]
+ label = "Validate Gizmo (Group)"
+ hosts = ["nuke"]
+ actions = [OpenFailedGroupNode]
+
+ def process(self, instance):
+ grpn = instance[0]
+
+ with grpn:
+ connections_out = nuke.allNodes('Output')
+ msg_multiple_outputs = "Only one outcoming connection from "
+ "\"{}\" is allowed".format(instance.data["name"])
+ assert len(connections_out) <= 1, msg_multiple_outputs
+
+ connections_in = nuke.allNodes('Input')
+ msg_missing_inputs = "At least one Input node has to be used in: "
+ "\"{}\"".format(instance.data["name"])
+ assert len(connections_in) >= 1, msg_missing_inputs
diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py
index e244a9b4b6..3887b5d5b7 100644
--- a/pype/plugins/nuke/publish/validate_rendered_frames.py
+++ b/pype/plugins/nuke/publish/validate_rendered_frames.py
@@ -28,7 +28,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
""" Validates file output. """
order = pyblish.api.ValidatorOrder + 0.1
- families = ["render.no"]
+ families = ["render"]
label = "Validate rendered frame"
hosts = ["nuke", "nukestudio"]
diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py
index 7a400909fd..0729f20957 100644
--- a/pype/plugins/nukestudio/publish/collect_clips.py
+++ b/pype/plugins/nukestudio/publish/collect_clips.py
@@ -4,7 +4,6 @@ from pyblish import api
import nuke
-
class CollectClips(api.ContextPlugin):
"""Collect all Track items selection."""
@@ -31,6 +30,7 @@ class CollectClips(api.ContextPlugin):
sub_items = video_track.subTrackItems()
for item in items:
+ data = dict()
# compare with selection or if disabled
if item not in selection or not item.isEnabled():
continue
@@ -83,9 +83,12 @@ class CollectClips(api.ContextPlugin):
except Exception:
source_first_frame = 0
- data = {"name": "{0}_{1}".format(track.name(), item.name()),
+ data.update({
+ "name": "{0}_{1}".format(track.name(), item.name()),
"item": item,
"source": source,
+ "timecodeStart": str(source.timecodeStart()),
+ "timelineTimecodeStart": str(sequence.timecodeStart()),
"sourcePath": source_path,
"track": track.name(),
"trackIndex": track_index,
@@ -93,19 +96,24 @@ class CollectClips(api.ContextPlugin):
"effects": effects,
"sourceIn": int(item.sourceIn()),
"sourceOut": int(item.sourceOut()),
+ "mediaDuration": (int(item.sourceOut()) -
+ int(item.sourceIn())) + 1,
"clipIn": int(item.timelineIn()),
"clipOut": int(item.timelineOut()),
+ "clipDuration": (int(item.timelineOut()) -
+ int(item.timelineIn())) + 1,
"asset": asset,
"family": "clip",
"families": [],
"handles": 0,
"handleStart": projectdata.get("handles", 0),
"handleEnd": projectdata.get("handles", 0),
- "version": int(version)}
+ "version": int(version)})
instance = context.create_instance(**data)
self.log.info("Created instance: {}".format(instance))
+ self.log.info("Created instance.data: {}".format(instance.data))
self.log.debug(">> effects: {}".format(instance.data["effects"]))
context.data["assetsShared"][asset] = dict()
diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py
index f9eb126772..be448931c8 100644
--- a/pype/plugins/nukestudio/publish/collect_plates.py
+++ b/pype/plugins/nukestudio/publish/collect_plates.py
@@ -234,8 +234,9 @@ class CollectPlatesData(api.InstancePlugin):
'stagingDir': staging_dir,
'name': ext,
'ext': ext,
- "frameStart": frame_start,
"frameEnd": frame_end,
+ "frameStart": "%0{}d".format(
+ len(str(frame_end))) % frame_start
}
instance.data["representations"].append(plates_representation)
diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py
index ad2e59fc96..d5bc2594a4 100644
--- a/pype/scripts/otio_burnin.py
+++ b/pype/scripts/otio_burnin.py
@@ -1,5 +1,7 @@
import os
import datetime
+import subprocess
+import json
import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins
from pypeapp.lib import config
from pype import api as pype
@@ -9,6 +11,53 @@ from pype import api as pype
log = pype.Logger().get_logger("BurninWrapper", "burninwrap")
+ffmpeg_path = os.environ.get("FFMPEG_PATH")
+if ffmpeg_path and os.path.exists(ffmpeg_path):
+ # add separator "/" or "\" to be prepared for next part
+ ffmpeg_path += os.path.sep
+else:
+ ffmpeg_path = ""
+
+FFMPEG = (
+ '{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s'
+).format(os.path.normpath(ffmpeg_path + "ffmpeg"))
+FFPROBE = (
+ '{} -v quiet -print_format json -show_format -show_streams %(source)s'
+).format(os.path.normpath(ffmpeg_path + "ffprobe"))
+
+
+def _streams(source):
+ """Reimplemented from otio burnins to be able use full path to ffprobe
+ :param str source: source media file
+ :rtype: [{}, ...]
+ """
+ command = FFPROBE % {'source': source}
+ proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
+ out = proc.communicate()[0]
+ if proc.returncode != 0:
+ raise RuntimeError("Failed to run: %s" % command)
+ return json.loads(out)['streams']
+
+
+def get_fps(str_value):
+ if str_value == "0/0":
+ print("Source has \"r_frame_rate\" value set to \"0/0\".")
+ return "Unknown"
+
+ items = str_value.split("/")
+ if len(items) == 1:
+ fps = float(items[0])
+
+ elif len(items) == 2:
+ fps = float(items[0]) / float(items[1])
+
+ # Check if fps is integer or float number
+ if int(fps) == fps:
+ fps = int(fps)
+
+ return str(fps)
+
+
class ModifiedBurnins(ffmpeg_burnins.Burnins):
'''
This is modification of OTIO FFmpeg Burnin adapter.
@@ -61,7 +110,11 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
}
def __init__(self, source, streams=None, options_init=None):
+ if not streams:
+ streams = _streams(source)
+
super().__init__(source, streams)
+
if options_init:
self.options_init.update(options_init)
@@ -91,7 +144,9 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
text = today.strftime(date_format)
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
- def add_frame_numbers(self, align, options=None, start_frame=None):
+ def add_frame_numbers(
+ self, align, options=None, start_frame=None, text=None
+ ):
"""
Convenience method to create the frame number expression.
@@ -103,8 +158,14 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
if start_frame:
options['frame_offset'] = start_frame
- options['expression'] = r'%%{eif\:n+%d\:d}' % options['frame_offset']
- text = str(int(self.end_frame + options['frame_offset']))
+ expr = r'%%{eif\:n+%d\:d}' % options['frame_offset']
+ _text = str(int(self.end_frame + options['frame_offset']))
+ if text and isinstance(text, str):
+ text = r"{}".format(text)
+ expr = text.replace("{current_frame}", expr)
+ text = text.replace("{current_frame}", _text)
+
+ options['expression'] = expr
self._add_burnin(text, align, options, ffmpeg_burnins.DRAWTEXT)
def add_timecode(self, align, options=None, start_frame=None):
@@ -121,7 +182,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
timecode = ffmpeg_burnins._frames_to_timecode(
options['frame_offset'],
- self.frame_rate
+ self.frame_rate
)
options = options.copy()
if not options.get('fps'):
@@ -180,7 +241,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
if self.filter_string:
filters = '-vf "{}"'.format(self.filter_string)
- return (ffmpeg_burnins.FFMPEG % {
+ return (FFMPEG % {
'input': self.source,
'output': output,
'args': '%s ' % args if args else '',
@@ -213,13 +274,15 @@ def example(input_path, output_path):
burnin.render(output_path, overwrite=True)
-def burnins_from_data(input_path, output_path, data, overwrite=True):
+def burnins_from_data(input_path, codec_data, output_path, data, overwrite=True):
'''
This method adds burnins to video/image file based on presets setting.
Extension of output MUST be same as input. (mov -> mov, avi -> avi,...)
:param input_path: full path to input file where burnins should be add
:type input_path: str
+ :param codec_data: all codec related arguments in list
+ :param codec_data: list
:param output_path: full path to output file where output will be rendered
:type output_path: str
:param data: data required for burnin settings (more info below)
@@ -284,8 +347,19 @@ def burnins_from_data(input_path, output_path, data, overwrite=True):
burnin = ModifiedBurnins(input_path, options_init=options_init)
- start_frame = data.get("start_frame")
- start_frame_tc = data.get('start_frame_tc', start_frame)
+ frame_start = data.get("frame_start")
+ frame_start_tc = data.get('frame_start_tc', frame_start)
+
+ stream = burnin._streams[0]
+ if "resolution_width" not in data:
+ data["resolution_width"] = stream.get("width", "Unknown")
+
+ if "resolution_height" not in data:
+ data["resolution_height"] = stream.get("height", "Unknown")
+
+ if "fps" not in data:
+ data["fps"] = get_fps(stream.get("r_frame_rate", "0/0"))
+
for align_text, preset in presets.get('burnins', {}).items():
align = None
if align_text == 'TOP_LEFT':
@@ -311,7 +385,7 @@ def burnins_from_data(input_path, output_path, data, overwrite=True):
if (
bi_func in ['frame_numbers', 'timecode'] and
- start_frame is None
+ frame_start is None
):
log.error(
'start_frame is not set in entered data!'
@@ -320,15 +394,34 @@ def burnins_from_data(input_path, output_path, data, overwrite=True):
return
if bi_func == 'frame_numbers':
- burnin.add_frame_numbers(align, start_frame=start_frame)
+ current_frame_identifier = "{current_frame}"
+ text = preset.get('text') or current_frame_identifier
+
+ if current_frame_identifier not in text:
+ log.warning((
+ 'Text for Frame numbers don\'t have '
+ '`{current_frame}` key in text!'
+ ))
+
+ text_items = []
+ split_items = text.split(current_frame_identifier)
+ for item in split_items:
+ text_items.append(item.format(**data))
+
+ text = "{current_frame}".join(text_items)
+
+ burnin.add_frame_numbers(align, start_frame=frame_start, text=text)
+
elif bi_func == 'timecode':
- burnin.add_timecode(align, start_frame=start_frame_tc)
+ burnin.add_timecode(align, start_frame=frame_start_tc)
+
elif bi_func == 'text':
if not preset.get('text'):
log.error('Text is not set for text function burnin!')
return
text = preset['text'].format(**data)
burnin.add_text(text, align)
+
elif bi_func == "datetime":
date_format = preset["format"]
burnin.add_datetime(date_format, align)
@@ -339,11 +432,20 @@ def burnins_from_data(input_path, output_path, data, overwrite=True):
)
return
- burnin.render(output_path, overwrite=overwrite)
+ codec_args = ''
+ if codec_data is not []:
+ codec_args = " ".join(codec_data)
+
+ burnin.render(output_path, args=codec_args, overwrite=overwrite)
if __name__ == '__main__':
import sys
import json
data = json.loads(sys.argv[-1])
- burnins_from_data(data['input'], data['output'], data['burnin_data'])
+ burnins_from_data(
+ data['input'],
+ data['codec'],
+ data['output'],
+ data['burnin_data']
+ )
diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py
index 25ed4135c3..5517cfeb4c 100644
--- a/pype/scripts/publish_filesequence.py
+++ b/pype/scripts/publish_filesequence.py
@@ -4,6 +4,16 @@ import os
import logging
import subprocess
import platform
+try:
+ from shutil import which
+except ImportError:
+ # we are in python < 3.3
+ def which(command):
+ path = os.getenv('PATH')
+ for p in path.split(os.path.pathsep):
+ p = os.path.join(p, command)
+ if os.path.exists(p) and os.access(p, os.X_OK):
+ return p
handler = logging.basicConfig()
log = logging.getLogger("Publish Image Sequences")
@@ -35,22 +45,32 @@ def __main__():
auto_pype_root = os.path.abspath(auto_pype_root + "../../../../..")
auto_pype_root = os.environ.get('PYPE_ROOT') or auto_pype_root
- if kwargs.pype:
- pype_root = kwargs.pype
- else:
- # if pype argument not specified, lets assume it is set in PATH
- pype_root = ""
-
- print("Set pype root to: {}".format(pype_root))
- print("Paths: {}".format(kwargs.paths or [os.getcwd()]))
-
- paths = kwargs.paths or [os.getcwd()]
pype_command = "pype.ps1"
if platform.system().lower() == "linux":
pype_command = "pype"
elif platform.system().lower() == "windows":
pype_command = "pype.bat"
+ if kwargs.pype:
+ pype_root = kwargs.pype
+ else:
+ # test if pype.bat / pype is in the PATH
+ # if it is, which() will return its path and we use that.
+ # if not, we use auto_pype_root path. Caveat of that one is
+ # that it can be UNC path and that will not work on windows.
+
+ pype_path = which(pype_command)
+
+ if pype_path:
+ pype_root = os.path.dirname(pype_path)
+ else:
+ pype_root = auto_pype_root
+
+ print("Set pype root to: {}".format(pype_root))
+ print("Paths: {}".format(kwargs.paths or [os.getcwd()]))
+
+ paths = kwargs.paths or [os.getcwd()]
+
args = [
os.path.join(pype_root, pype_command),
"publish",
@@ -60,9 +80,11 @@ def __main__():
print("Pype command: {}".format(" ".join(args)))
# Forcing forwaring the environment because environment inheritance does
# not always work.
- exit_code = subprocess.call(args, env=os.environ)
+ # Cast all values in environment to str to be safe
+ env = {k: str(v) for k, v in os.environ.items()}
+ exit_code = subprocess.call(args, env=env)
if exit_code != 0:
- raise ValueError("Publishing failed.")
+ raise RuntimeError("Publishing failed.")
if __name__ == '__main__':
diff --git a/pype/services/idle_manager/idle_manager.py b/pype/services/idle_manager/idle_manager.py
index 64cafcd193..0897245049 100644
--- a/pype/services/idle_manager/idle_manager.py
+++ b/pype/services/idle_manager/idle_manager.py
@@ -1,6 +1,6 @@
import time
import collections
-from Qt import QtCore, QtGui, QtWidgets
+from Qt import QtCore
from pynput import mouse, keyboard
from pypeapp import Logger
@@ -29,6 +29,13 @@ class IdleManager(QtCore.QThread):
def tray_start(self):
self.start()
+ def tray_exit(self):
+ self.stop()
+ try:
+ self.time_signals = {}
+ except Exception:
+ pass
+
def add_time_signal(self, emit_time, signal):
""" If any module want to use IdleManager, need to use add_time_signal
:param emit_time: time when signal will be emitted
diff --git a/pype/setdress_api.py b/pype/setdress_api.py
index 63b3967828..707a5b713f 100644
--- a/pype/setdress_api.py
+++ b/pype/setdress_api.py
@@ -7,8 +7,7 @@ import copy
from maya import cmds
-from avalon import api
-import avalon.io as io
+from avalon import api, io
from avalon.maya.lib import unique_namespace
from pype.maya.lib import matrix_equals
diff --git a/pype/standalonepublish/resources/menu.png b/pype/standalonepublish/resources/menu.png
new file mode 100644
index 0000000000..da83b45244
Binary files /dev/null and b/pype/standalonepublish/resources/menu.png differ
diff --git a/pype/standalonepublish/resources/menu.svg b/pype/standalonepublish/resources/menu.svg
deleted file mode 100644
index ac1e728011..0000000000
--- a/pype/standalonepublish/resources/menu.svg
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-
diff --git a/pype/standalonepublish/resources/menu_disabled.png b/pype/standalonepublish/resources/menu_disabled.png
new file mode 100644
index 0000000000..e4758f0b19
Binary files /dev/null and b/pype/standalonepublish/resources/menu_disabled.png differ
diff --git a/pype/standalonepublish/resources/menu_hover.png b/pype/standalonepublish/resources/menu_hover.png
new file mode 100644
index 0000000000..dfe8ed53b2
Binary files /dev/null and b/pype/standalonepublish/resources/menu_hover.png differ
diff --git a/pype/standalonepublish/resources/menu_pressed.png b/pype/standalonepublish/resources/menu_pressed.png
new file mode 100644
index 0000000000..a5f931b2c4
Binary files /dev/null and b/pype/standalonepublish/resources/menu_pressed.png differ
diff --git a/pype/standalonepublish/resources/menu_pressed_hover.png b/pype/standalonepublish/resources/menu_pressed_hover.png
new file mode 100644
index 0000000000..51503add0f
Binary files /dev/null and b/pype/standalonepublish/resources/menu_pressed_hover.png differ
diff --git a/pype/standalonepublish/resources/preview.svg b/pype/standalonepublish/resources/preview.svg
deleted file mode 100644
index 4a9810c1d5..0000000000
--- a/pype/standalonepublish/resources/preview.svg
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
diff --git a/pype/standalonepublish/resources/thumbnail.svg b/pype/standalonepublish/resources/thumbnail.svg
deleted file mode 100644
index dbc228f8c8..0000000000
--- a/pype/standalonepublish/resources/thumbnail.svg
+++ /dev/null
@@ -1,19 +0,0 @@
-
-
diff --git a/pype/standalonepublish/resources/trash.png b/pype/standalonepublish/resources/trash.png
new file mode 100644
index 0000000000..8d12d5f8e0
Binary files /dev/null and b/pype/standalonepublish/resources/trash.png differ
diff --git a/pype/standalonepublish/resources/trash.svg b/pype/standalonepublish/resources/trash.svg
deleted file mode 100644
index 07905024c0..0000000000
--- a/pype/standalonepublish/resources/trash.svg
+++ /dev/null
@@ -1,23 +0,0 @@
-
-
-
-
diff --git a/pype/standalonepublish/resources/trash_disabled.png b/pype/standalonepublish/resources/trash_disabled.png
new file mode 100644
index 0000000000..06f5ae5276
Binary files /dev/null and b/pype/standalonepublish/resources/trash_disabled.png differ
diff --git a/pype/standalonepublish/resources/trash_hover.png b/pype/standalonepublish/resources/trash_hover.png
new file mode 100644
index 0000000000..4725c0f8ab
Binary files /dev/null and b/pype/standalonepublish/resources/trash_hover.png differ
diff --git a/pype/standalonepublish/resources/trash_pressed.png b/pype/standalonepublish/resources/trash_pressed.png
new file mode 100644
index 0000000000..901b0e6d35
Binary files /dev/null and b/pype/standalonepublish/resources/trash_pressed.png differ
diff --git a/pype/standalonepublish/resources/trash_pressed_hover.png b/pype/standalonepublish/resources/trash_pressed_hover.png
new file mode 100644
index 0000000000..076ced260f
Binary files /dev/null and b/pype/standalonepublish/resources/trash_pressed_hover.png differ
diff --git a/pype/standalonepublish/widgets/widget_component_item.py b/pype/standalonepublish/widgets/widget_component_item.py
index 9631fed258..6275238412 100644
--- a/pype/standalonepublish/widgets/widget_component_item.py
+++ b/pype/standalonepublish/widgets/widget_component_item.py
@@ -1,21 +1,19 @@
import os
from . import QtCore, QtGui, QtWidgets
-from . import SvgButton
from . import get_resource
-from avalon import style
+from pypeapp import style
class ComponentItem(QtWidgets.QFrame):
- C_NORMAL = '#777777'
- C_HOVER = '#ffffff'
- C_ACTIVE = '#4BB543'
- C_ACTIVE_HOVER = '#4BF543'
signal_remove = QtCore.Signal(object)
signal_thumbnail = QtCore.Signal(object)
signal_preview = QtCore.Signal(object)
signal_repre_change = QtCore.Signal(object, object)
+ preview_text = "PREVIEW"
+ thumbnail_text = "THUMBNAIL"
+
def __init__(self, parent, main_parent):
super().__init__()
self.has_valid_repre = True
@@ -55,10 +53,8 @@ class ComponentItem(QtWidgets.QFrame):
self.icon.setText("")
self.icon.setScaledContents(True)
- self.btn_action_menu = SvgButton(
- get_resource('menu.svg'), 22, 22,
- [self.C_NORMAL, self.C_HOVER],
- frame_image_info, False
+ self.btn_action_menu = PngButton(
+ name="menu", size=QtCore.QSize(22, 22)
)
self.action_menu = QtWidgets.QMenu()
@@ -85,7 +81,9 @@ class ComponentItem(QtWidgets.QFrame):
self.file_info.setStyleSheet('padding-left:3px;')
- expanding_sizePolicy.setHeightForWidth(self.name.sizePolicy().hasHeightForWidth())
+ expanding_sizePolicy.setHeightForWidth(
+ self.name.sizePolicy().hasHeightForWidth()
+ )
frame_name_repre = QtWidgets.QFrame(frame)
@@ -101,7 +99,8 @@ class ComponentItem(QtWidgets.QFrame):
layout.addWidget(self.ext, alignment=QtCore.Qt.AlignRight)
frame_name_repre.setSizePolicy(
- QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding
+ QtWidgets.QSizePolicy.MinimumExpanding,
+ QtWidgets.QSizePolicy.MinimumExpanding
)
# Repre + icons
@@ -124,17 +123,8 @@ class ComponentItem(QtWidgets.QFrame):
frame_icons = QtWidgets.QFrame(frame_repre_icons)
- self.preview = SvgButton(
- get_resource('preview.svg'), 64, 18,
- [self.C_NORMAL, self.C_HOVER, self.C_ACTIVE, self.C_ACTIVE_HOVER],
- frame_icons
- )
-
- self.thumbnail = SvgButton(
- get_resource('thumbnail.svg'), 84, 18,
- [self.C_NORMAL, self.C_HOVER, self.C_ACTIVE, self.C_ACTIVE_HOVER],
- frame_icons
- )
+ self.preview = LightingButton(self.preview_text)
+ self.thumbnail = LightingButton(self.thumbnail_text)
layout = QtWidgets.QHBoxLayout(frame_icons)
layout.setSpacing(6)
@@ -162,12 +152,7 @@ class ComponentItem(QtWidgets.QFrame):
layout_main.addWidget(frame_middle)
- self.remove = SvgButton(
- get_resource('trash.svg'), 22, 22,
- [self.C_NORMAL, self.C_HOVER],
- frame, False
- )
-
+ self.remove = PngButton(name="trash", size=QtCore.QSize(22, 22))
layout_main.addWidget(self.remove)
layout = QtWidgets.QVBoxLayout(self)
@@ -272,16 +257,16 @@ class ComponentItem(QtWidgets.QFrame):
self.signal_repre_change.emit(self, repre_name)
def is_thumbnail(self):
- return self.thumbnail.checked
+ return self.thumbnail.isChecked()
def change_thumbnail(self, hover=True):
- self.thumbnail.change_checked(hover)
+ self.thumbnail.setChecked(hover)
def is_preview(self):
- return self.preview.checked
+ return self.preview.isChecked()
def change_preview(self, hover=True):
- self.preview.change_checked(hover)
+ self.preview.setChecked(hover)
def collect_data(self):
in_files = self.in_data['files']
@@ -309,3 +294,229 @@ class ComponentItem(QtWidgets.QFrame):
data['fps'] = self.in_data['fps']
return data
+
+
+class LightingButton(QtWidgets.QPushButton):
+ lightingbtnstyle = """
+ QPushButton {
+ font: %(font_size_pt)spt;
+ text-align: center;
+ color: #777777;
+ background-color: transparent;
+ border-width: 1px;
+ border-color: #777777;
+ border-style: solid;
+ padding-top: 0px;
+ padding-bottom: 0px;
+ padding-left: 3px;
+ padding-right: 3px;
+ border-radius: 3px;
+ }
+
+ QPushButton:hover {
+ border-color: #cccccc;
+ color: #cccccc;
+ }
+
+ QPushButton:pressed {
+ border-color: #ffffff;
+ color: #ffffff;
+ }
+
+ QPushButton:disabled {
+ border-color: #3A3939;
+ color: #3A3939;
+ }
+
+ QPushButton:checked {
+ border-color: #4BB543;
+ color: #4BB543;
+ }
+
+ QPushButton:checked:hover {
+ border-color: #4Bd543;
+ color: #4Bd543;
+ }
+
+ QPushButton:checked:pressed {
+ border-color: #4BF543;
+ color: #4BF543;
+ }
+ """
+
+ def __init__(self, text, font_size_pt=8, *args, **kwargs):
+ super(LightingButton, self).__init__(text, *args, **kwargs)
+ self.setStyleSheet(self.lightingbtnstyle % {
+ "font_size_pt": font_size_pt
+ })
+ self.setCheckable(True)
+
+
+class PngFactory:
+ png_names = {
+ "trash": {
+ "normal": QtGui.QIcon(get_resource("trash.png")),
+ "hover": QtGui.QIcon(get_resource("trash_hover.png")),
+ "pressed": QtGui.QIcon(get_resource("trash_pressed.png")),
+ "pressed_hover": QtGui.QIcon(
+ get_resource("trash_pressed_hover.png")
+ ),
+ "disabled": QtGui.QIcon(get_resource("trash_disabled.png"))
+ },
+
+ "menu": {
+ "normal": QtGui.QIcon(get_resource("menu.png")),
+ "hover": QtGui.QIcon(get_resource("menu_hover.png")),
+ "pressed": QtGui.QIcon(get_resource("menu_pressed.png")),
+ "pressed_hover": QtGui.QIcon(
+ get_resource("menu_pressed_hover.png")
+ ),
+ "disabled": QtGui.QIcon(get_resource("menu_disabled.png"))
+ }
+ }
+
+
+class PngButton(QtWidgets.QPushButton):
+ png_button_style = """
+ QPushButton {
+ border: none;
+ background-color: transparent;
+ padding-top: 0px;
+ padding-bottom: 0px;
+ padding-left: 0px;
+ padding-right: 0px;
+ }
+ QPushButton:hover {}
+ QPushButton:pressed {}
+ QPushButton:disabled {}
+ QPushButton:checked {}
+ QPushButton:checked:hover {}
+ QPushButton:checked:pressed {}
+ """
+
+ def __init__(
+ self, name=None, path=None, hover_path=None, pressed_path=None,
+ hover_pressed_path=None, disabled_path=None,
+ size=None, *args, **kwargs
+ ):
+ self._hovered = False
+ self._pressed = False
+ super(PngButton, self).__init__(*args, **kwargs)
+ self.setStyleSheet(self.png_button_style)
+
+ png_dict = {}
+ if name:
+ png_dict = PngFactory.png_names.get(name) or {}
+ if not png_dict:
+ print((
+ "WARNING: There is not set icon with name \"{}\""
+ "in PngFactory!"
+ ).format(name))
+
+ ico_normal = png_dict.get("normal")
+ ico_hover = png_dict.get("hover")
+ ico_pressed = png_dict.get("pressed")
+ ico_hover_pressed = png_dict.get("pressed_hover")
+ ico_disabled = png_dict.get("disabled")
+
+ if path:
+ ico_normal = QtGui.QIcon(path)
+ if hover_path:
+ ico_hover = QtGui.QIcon(hover_path)
+
+ if pressed_path:
+ ico_pressed = QtGui.QIcon(hover_path)
+
+ if hover_pressed_path:
+ ico_hover_pressed = QtGui.QIcon(hover_pressed_path)
+
+ if disabled_path:
+ ico_disabled = QtGui.QIcon(disabled_path)
+
+ self.setIcon(ico_normal)
+ if size:
+ self.setIconSize(size)
+ self.setMaximumSize(size)
+
+ self.ico_normal = ico_normal
+ self.ico_hover = ico_hover
+ self.ico_pressed = ico_pressed
+ self.ico_hover_pressed = ico_hover_pressed
+ self.ico_disabled = ico_disabled
+
+ def setDisabled(self, in_bool):
+ super(PngButton, self).setDisabled(in_bool)
+ icon = self.ico_normal
+ if in_bool and self.ico_disabled:
+ icon = self.ico_disabled
+ self.setIcon(icon)
+
+ def enterEvent(self, event):
+ self._hovered = True
+ if not self.isEnabled():
+ return
+ icon = self.ico_normal
+ if self.ico_hover:
+ icon = self.ico_hover
+
+ if self._pressed and self.ico_hover_pressed:
+ icon = self.ico_hover_pressed
+
+ if self.icon() != icon:
+ self.setIcon(icon)
+
+ def mouseMoveEvent(self, event):
+ super(PngButton, self).mouseMoveEvent(event)
+ if self._pressed:
+ mouse_pos = event.pos()
+ hovering = self.rect().contains(mouse_pos)
+ if hovering and not self._hovered:
+ self.enterEvent(event)
+ elif not hovering and self._hovered:
+ self.leaveEvent(event)
+
+ def leaveEvent(self, event):
+ self._hovered = False
+ if not self.isEnabled():
+ return
+ icon = self.ico_normal
+ if self._pressed and self.ico_pressed:
+ icon = self.ico_pressed
+
+ if self.icon() != icon:
+ self.setIcon(icon)
+
+ def mousePressEvent(self, event):
+ self._pressed = True
+ if not self.isEnabled():
+ return
+ icon = self.ico_hover
+ if self.ico_pressed:
+ icon = self.ico_pressed
+
+ if self.ico_hover_pressed:
+ mouse_pos = event.pos()
+ if self.rect().contains(mouse_pos):
+ icon = self.ico_hover_pressed
+
+ if icon is None:
+ icon = self.ico_normal
+
+ if self.icon() != icon:
+ self.setIcon(icon)
+
+ def mouseReleaseEvent(self, event):
+ if not self.isEnabled():
+ return
+ if self._pressed:
+ self._pressed = False
+ mouse_pos = event.pos()
+ if self.rect().contains(mouse_pos):
+ self.clicked.emit()
+
+ icon = self.ico_normal
+ if self._hovered and self.ico_hover:
+ icon = self.ico_hover
+
+ if self.icon() != icon:
+ self.setIcon(icon)
diff --git a/pype/standalonepublish/widgets/widget_drop_frame.py b/pype/standalonepublish/widgets/widget_drop_frame.py
index ba8ab44cf8..73b9f0e179 100644
--- a/pype/standalonepublish/widgets/widget_drop_frame.py
+++ b/pype/standalonepublish/widgets/widget_drop_frame.py
@@ -92,28 +92,32 @@ class DropDataFrame(QtWidgets.QFrame):
self._refresh_view()
def _set_thumbnail(self, in_item):
+ current_state = in_item.is_thumbnail()
+ in_item.change_thumbnail(not current_state)
+
checked_item = None
for item in self.components_list.widgets():
if item.is_thumbnail():
checked_item = item
break
- if checked_item is None or checked_item == in_item:
- in_item.change_thumbnail()
- else:
+ if checked_item is not None and checked_item != in_item:
checked_item.change_thumbnail(False)
- in_item.change_thumbnail()
+
+ in_item.change_thumbnail(current_state)
def _set_preview(self, in_item):
+ current_state = in_item.is_preview()
+ in_item.change_preview(not current_state)
+
checked_item = None
for item in self.components_list.widgets():
if item.is_preview():
checked_item = item
break
- if checked_item is None or checked_item == in_item:
- in_item.change_preview()
- else:
+ if checked_item is not None and checked_item != in_item:
checked_item.change_preview(False)
- in_item.change_preview()
+
+ in_item.change_preview(current_state)
def _remove_item(self, in_item):
valid_repre = in_item.has_valid_repre is True
diff --git a/pype/tools/assetcreator/widget.py b/pype/tools/assetcreator/widget.py
index 75e793479a..1e9e4ab624 100644
--- a/pype/tools/assetcreator/widget.py
+++ b/pype/tools/assetcreator/widget.py
@@ -3,9 +3,8 @@ import contextlib
import collections
from avalon.vendor import qtawesome
-from avalon.vendor.Qt import QtWidgets, QtCore, QtGui
-from avalon import io
-from avalon import style
+from Qt import QtWidgets, QtCore, QtGui
+from avalon import style, io
from .model import (
TreeModel,
diff --git a/res/app_icons/blender.png b/res/app_icons/blender.png
new file mode 100644
index 0000000000..6070a51fae
Binary files /dev/null and b/res/app_icons/blender.png differ
diff --git a/res/ftrack/action_icons/Delivery.svg b/res/ftrack/action_icons/Delivery.svg
new file mode 100644
index 0000000000..3380487c31
--- /dev/null
+++ b/res/ftrack/action_icons/Delivery.svg
@@ -0,0 +1,34 @@
+
+
diff --git a/setup/blender/init.py b/setup/blender/init.py
new file mode 100644
index 0000000000..05c15eaeb2
--- /dev/null
+++ b/setup/blender/init.py
@@ -0,0 +1,3 @@
+from pype import blender
+
+blender.install()