diff --git a/pype/__init__.py b/pype/__init__.py
index a5858f49e7..bcbedc9a90 100644
--- a/pype/__init__.py
+++ b/pype/__init__.py
@@ -3,6 +3,8 @@ import os
from pyblish import api as pyblish
from avalon import api as avalon
from .lib import filter_pyblish_plugins
+from pypeapp import config
+
import logging
log = logging.getLogger(__name__)
@@ -16,6 +18,51 @@ PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "global", "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "global", "load")
+# we are monkey patching `avalon.api.discover()` to allow us to load
+# plugin presets on plugins being discovered by avalon. Little bit of
+# hacking, but it allows us to add out own features without need
+# to modify upstream code.
+
+_original_discover = avalon.discover
+
+
+def patched_discover(superclass):
+ """
+ Monkey patched version of :func:`avalon.api.discover()`. It allows
+ us to load presets on plugins being discovered.
+ """
+ # run original discover and get plugins
+ plugins = _original_discover(superclass)
+
+ # determine host application to use for finding presets
+ host = avalon.registered_host().__name__.split(".")[-1]
+
+ # map plugin superclass to preset json. Currenly suppoted is load and
+ # create (avalon.api.Loader and avalon.api.Creator)
+ plugin_type = "undefined"
+ if superclass.__name__.split(".")[-1] == "Loader":
+ plugin_type = "load"
+ elif superclass.__name__.split(".")[-1] == "Creator":
+ plugin_type = "create"
+
+ print(">>> trying to find presets for {}:{} ...".format(host, plugin_type))
+ try:
+ config_data = config.get_presets()['plugins'][host][plugin_type]
+ except KeyError:
+ print("*** no presets found.")
+ else:
+ for plugin in plugins:
+ if plugin.__name__ in config_data:
+ print(">>> We have preset for {}".format(plugin.__name__))
+ for option, value in config_data[plugin.__name__].items():
+ if option == "enabled" and value is False:
+ setattr(plugin, "active", False)
+ print(" - is disabled by preset")
+ else:
+ setattr(plugin, option, value)
+ print(" - setting `{}`: `{}`".format(option, value))
+ return plugins
+
def install():
log.info("Registering global plug-ins..")
@@ -23,6 +70,9 @@ def install():
pyblish.register_discovery_filter(filter_pyblish_plugins)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
+ # apply monkey patched discover to original one
+ avalon.discover = patched_discover
+
def uninstall():
log.info("Deregistering global plug-ins..")
@@ -30,3 +80,6 @@ def uninstall():
pyblish.deregister_discovery_filter(filter_pyblish_plugins)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
log.info("Global plug-ins unregistred")
+
+ # restore original discover
+ avalon.discover = _original_discover
diff --git a/pype/api.py b/pype/api.py
index 2c626f36a3..2c227b5b4b 100644
--- a/pype/api.py
+++ b/pype/api.py
@@ -23,6 +23,7 @@ from .lib import (
get_asset,
get_project,
get_hierarchy,
+ get_subsets,
get_version_from_path,
modified_environ,
add_tool_to_environment
@@ -53,6 +54,7 @@ __all__ = [
"get_project",
"get_hierarchy",
"get_asset",
+ "get_subsets",
"get_version_from_path",
"modified_environ",
"add_tool_to_environment",
diff --git a/pype/clockify/__init__.py b/pype/clockify/__init__.py
index 063f88db73..aab0d048de 100644
--- a/pype/clockify/__init__.py
+++ b/pype/clockify/__init__.py
@@ -1,11 +1,13 @@
from .clockify_api import ClockifyAPI
from .widget_settings import ClockifySettings
+from .widget_message import MessageWidget
from .clockify import ClockifyModule
__all__ = [
- 'ClockifyAPI',
- 'ClockifySettings',
- 'ClockifyModule'
+ "ClockifyAPI",
+ "ClockifySettings",
+ "ClockifyModule",
+ "MessageWidget"
]
def tray_init(tray_widget, main_widget):
diff --git a/pype/clockify/clockify.py b/pype/clockify/clockify.py
index 1476c3d488..5e6cfec778 100644
--- a/pype/clockify/clockify.py
+++ b/pype/clockify/clockify.py
@@ -1,16 +1,19 @@
import os
import threading
-from pypeapp import style
+from pypeapp import style, Logger
from Qt import QtWidgets
-from pype.clockify import ClockifySettings, ClockifyAPI
+from . import ClockifySettings, ClockifyAPI, MessageWidget
class ClockifyModule:
def __init__(self, main_parent=None, parent=None):
+ self.log = Logger().get_logger(self.__class__.__name__, "PypeTray")
+
self.main_parent = main_parent
self.parent = parent
self.clockapi = ClockifyAPI()
+ self.message_widget = None
self.widget_settings = ClockifySettings(main_parent, self)
self.widget_settings_required = None
@@ -74,6 +77,7 @@ class ClockifyModule:
self.timer_manager.start_timers(data)
def timer_stopped(self):
+ self.bool_timer_run = False
if hasattr(self, 'timer_manager'):
self.timer_manager.stop_timers()
@@ -102,26 +106,37 @@ class ClockifyModule:
if self.bool_timer_run != bool_timer_run:
if self.bool_timer_run is True:
self.timer_stopped()
- else:
+ elif self.bool_timer_run is False:
actual_timer = self.clockapi.get_in_progress()
if not actual_timer:
continue
- actual_project_id = actual_timer["projectId"]
- project = self.clockapi.get_project_by_id(
- actual_project_id
- )
+ actual_proj_id = actual_timer["projectId"]
+ if not actual_proj_id:
+ continue
+
+ project = self.clockapi.get_project_by_id(actual_proj_id)
+ if project and project.get("code") == 501:
+ continue
+
project_name = project["name"]
actual_timer_hierarchy = actual_timer["description"]
hierarchy_items = actual_timer_hierarchy.split("/")
+ # Each pype timer must have at least 2 items!
+ if len(hierarchy_items) < 2:
+ continue
task_name = hierarchy_items[-1]
hierarchy = hierarchy_items[:-1]
+ task_type = None
+ if len(actual_timer.get("tags", [])) > 0:
+ task_type = actual_timer["tags"][0].get("name")
data = {
"task_name": task_name,
"hierarchy": hierarchy,
- "project_name": project_name
+ "project_name": project_name,
+ "task_type": task_type
}
self.timer_started(data)
@@ -134,9 +149,23 @@ class ClockifyModule:
self.clockapi.finish_time_entry()
if self.bool_timer_run:
self.timer_stopped()
- self.bool_timer_run = False
+
+ def signed_in(self):
+ if hasattr(self, 'timer_manager'):
+ if not self.timer_manager:
+ return
+
+ if not self.timer_manager.last_task:
+ return
+
+ if self.timer_manager.is_running:
+ self.start_timer_manager(self.timer_manager.last_task)
def start_timer(self, input_data):
+ # If not api key is not entered then skip
+ if not self.clockapi.get_api_key():
+ return
+
actual_timer = self.clockapi.get_in_progress()
actual_timer_hierarchy = None
actual_project_id = None
@@ -144,11 +173,31 @@ class ClockifyModule:
actual_timer_hierarchy = actual_timer.get("description")
actual_project_id = actual_timer.get("projectId")
+ # Concatenate hierarchy and task to get description
desc_items = [val for val in input_data.get("hierarchy", [])]
desc_items.append(input_data["task_name"])
description = "/".join(desc_items)
- project_id = self.clockapi.get_project_id(input_data["project_name"])
+ # Check project existence
+ project_name = input_data["project_name"]
+ project_id = self.clockapi.get_project_id(project_name)
+ if not project_id:
+ self.log.warning((
+ "Project \"{}\" was not found in Clockify. Timer won't start."
+ ).format(project_name))
+
+ msg = (
+ "Project \"{}\" is not in Clockify Workspace \"{}\"."
+ "
Please inform your Project Manager."
+ ).format(project_name, str(self.clockapi.workspace))
+
+ self.message_widget = MessageWidget(
+ self.main_parent, msg, "Clockify - Info Message"
+ )
+ self.message_widget.closed.connect(self.on_message_widget_close)
+ self.message_widget.show()
+
+ return
if (
actual_timer is not None and
@@ -158,7 +207,7 @@ class ClockifyModule:
return
tag_ids = []
- task_tag_id = self.clockapi.get_tag_id(input_data["task_name"])
+ task_tag_id = self.clockapi.get_tag_id(input_data["task_type"])
if task_tag_id is not None:
tag_ids.append(task_tag_id)
@@ -166,6 +215,9 @@ class ClockifyModule:
description, project_id, tag_ids=tag_ids
)
+ def on_message_widget_close(self):
+ self.message_widget = None
+
# Definition of Tray menu
def tray_menu(self, parent_menu):
# Menu for Tray App
diff --git a/pype/clockify/clockify_api.py b/pype/clockify/clockify_api.py
index ed932eedce..f012efc002 100644
--- a/pype/clockify/clockify_api.py
+++ b/pype/clockify/clockify_api.py
@@ -25,6 +25,7 @@ class ClockifyAPI(metaclass=Singleton):
fpath = os.path.join(app_dir, file_name)
admin_permission_names = ['WORKSPACE_OWN', 'WORKSPACE_ADMIN']
master_parent = None
+ workspace = None
workspace_id = None
def set_master(self, master_parent):
@@ -43,6 +44,8 @@ class ClockifyAPI(metaclass=Singleton):
if api_key is not None and self.validate_api_key(api_key) is True:
self.headers["X-Api-Key"] = api_key
self.set_workspace()
+ if self.master_parent:
+ self.master_parent.signed_in()
return True
return False
diff --git a/pype/clockify/ftrack_actions/action_clockify_start.py b/pype/clockify/ftrack_actions/action_clockify_start.py
deleted file mode 100644
index 5b54476297..0000000000
--- a/pype/clockify/ftrack_actions/action_clockify_start.py
+++ /dev/null
@@ -1,69 +0,0 @@
-import os
-import sys
-import argparse
-import logging
-
-from pype.vendor import ftrack_api
-from pype.ftrack import BaseAction
-from pype.clockify import ClockifyAPI
-
-
-class StartClockify(BaseAction):
- '''Starts timer on clockify.'''
-
- #: Action identifier.
- identifier = 'clockify.start.timer'
- #: Action label.
- label = 'Clockify - Start timer'
- #: Action description.
- description = 'Starts timer on clockify'
- #: roles that are allowed to register this action
- icon = '{}/app_icons/clockify.png'.format(
- os.environ.get('PYPE_STATICS_SERVER', '')
- )
- #: Clockify api
- clockapi = ClockifyAPI()
-
- def discover(self, session, entities, event):
- if len(entities) != 1:
- return False
- if entities[0].entity_type.lower() != 'task':
- return False
- if self.clockapi.workspace_id is None:
- return False
- return True
-
- def launch(self, session, entities, event):
- task = entities[0]
- task_name = task['type']['name']
- project_name = task['project']['full_name']
-
- def get_parents(entity):
- output = []
- if entity.entity_type.lower() == 'project':
- return output
- output.extend(get_parents(entity['parent']))
- output.append(entity['name'])
-
- return output
-
- desc_items = get_parents(task['parent'])
- desc_items.append(task['name'])
- description = '/'.join(desc_items)
- project_id = self.clockapi.get_project_id(project_name)
- tag_ids = []
- tag_ids.append(self.clockapi.get_tag_id(task_name))
- self.clockapi.start_time_entry(
- description, project_id, tag_ids=tag_ids
- )
-
- return True
-
-
-def register(session, **kw):
- '''Register plugin. Called when used as an plugin.'''
-
- if not isinstance(session, ftrack_api.session.Session):
- return
-
- StartClockify(session).register()
diff --git a/pype/clockify/ftrack_actions/action_clockify_sync.py b/pype/clockify/ftrack_actions/action_clockify_sync.py
index 695f7581c0..e679894d0e 100644
--- a/pype/clockify/ftrack_actions/action_clockify_sync.py
+++ b/pype/clockify/ftrack_actions/action_clockify_sync.py
@@ -17,10 +17,8 @@ class SyncClocify(BaseAction):
label = 'Sync To Clockify'
#: Action description.
description = 'Synchronise data to Clockify workspace'
- #: priority
- priority = 100
#: roles that are allowed to register this action
- role_list = ['Pypeclub', 'Administrator']
+ role_list = ["Pypeclub", "Administrator", "project Manager"]
#: icon
icon = '{}/app_icons/clockify-white.png'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
@@ -28,16 +26,22 @@ class SyncClocify(BaseAction):
#: CLockifyApi
clockapi = ClockifyAPI()
- def register(self):
+ def preregister(self):
if self.clockapi.workspace_id is None:
- raise ValueError('Clockify Workspace or API key are not set!')
+ return "Clockify Workspace or API key are not set!"
if self.clockapi.validate_workspace_perm() is False:
raise MissingPermision('Clockify')
- super().register()
+
+ return True
def discover(self, session, entities, event):
''' Validation '''
+ if len(entities) != 1:
+ return False
+
+ if entities[0].entity_type.lower() != "project":
+ return False
return True
def launch(self, session, entities, event):
diff --git a/pype/clockify/widget_message.py b/pype/clockify/widget_message.py
new file mode 100644
index 0000000000..349875b9e5
--- /dev/null
+++ b/pype/clockify/widget_message.py
@@ -0,0 +1,91 @@
+from Qt import QtCore, QtGui, QtWidgets
+from pypeapp import style
+
+
+class MessageWidget(QtWidgets.QWidget):
+
+ SIZE_W = 300
+ SIZE_H = 130
+
+ closed = QtCore.Signal()
+
+ def __init__(self, parent=None, messages=[], title="Message"):
+
+ super(MessageWidget, self).__init__()
+
+ self._parent = parent
+
+ # Icon
+ if parent and hasattr(parent, 'icon'):
+ self.setWindowIcon(parent.icon)
+ else:
+ from pypeapp.resources import get_resource
+ self.setWindowIcon(QtGui.QIcon(get_resource('icon.png')))
+
+ self.setWindowFlags(
+ QtCore.Qt.WindowCloseButtonHint |
+ QtCore.Qt.WindowMinimizeButtonHint
+ )
+
+ # Font
+ self.font = QtGui.QFont()
+ self.font.setFamily("DejaVu Sans Condensed")
+ self.font.setPointSize(9)
+ self.font.setBold(True)
+ self.font.setWeight(50)
+ self.font.setKerning(True)
+
+ # Size setting
+ self.resize(self.SIZE_W, self.SIZE_H)
+ self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H))
+ self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100))
+
+ # Style
+ self.setStyleSheet(style.load_stylesheet())
+
+ self.setLayout(self._ui_layout(messages))
+ self.setWindowTitle(title)
+
+ def _ui_layout(self, messages):
+ if not messages:
+ messages = ["*Misssing messages (This is a bug)*", ]
+
+ elif not isinstance(messages, (tuple, list)):
+ messages = [messages, ]
+
+ main_layout = QtWidgets.QVBoxLayout(self)
+
+ labels = []
+ for message in messages:
+ label = QtWidgets.QLabel(message)
+ label.setFont(self.font)
+ label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
+ label.setTextFormat(QtCore.Qt.RichText)
+ label.setWordWrap(True)
+
+ labels.append(label)
+ main_layout.addWidget(label)
+
+ btn_close = QtWidgets.QPushButton("Close")
+ btn_close.setToolTip('Close this window')
+ btn_close.clicked.connect(self.on_close_clicked)
+
+ btn_group = QtWidgets.QHBoxLayout()
+ btn_group.addStretch(1)
+ btn_group.addWidget(btn_close)
+
+ main_layout.addLayout(btn_group)
+
+ self.labels = labels
+ self.btn_group = btn_group
+ self.btn_close = btn_close
+ self.main_layout = main_layout
+
+ return main_layout
+
+ def on_close_clicked(self):
+ self.close()
+
+ def close(self, *args, **kwargs):
+ self.closed.emit()
+ super(MessageWidget, self).close(*args, **kwargs)
diff --git a/pype/ftrack/actions/action_attributes_remapper.py b/pype/ftrack/actions/action_attributes_remapper.py
index db33fd1365..2c4899410d 100644
--- a/pype/ftrack/actions/action_attributes_remapper.py
+++ b/pype/ftrack/actions/action_attributes_remapper.py
@@ -2,7 +2,7 @@ import os
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
-from avalon.tools.libraryloader.io_nonsingleton import DbConnector
+from pype.ftrack.lib.io_nonsingleton import DbConnector
class AttributesRemapper(BaseAction):
@@ -275,7 +275,7 @@ class AttributesRemapper(BaseAction):
message = {'type': 'label', 'value': '
{}
'.format(value)}
items.append(message)
- self.show_interface(event, items, title)
+ self.show_interface(items=items, title=title, event=event)
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
diff --git a/pype/ftrack/actions/action_create_folders.py b/pype/ftrack/actions/action_create_folders.py
index b9e10f7c30..269316e052 100644
--- a/pype/ftrack/actions/action_create_folders.py
+++ b/pype/ftrack/actions/action_create_folders.py
@@ -7,7 +7,7 @@ import re
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
from avalon import lib as avalonlib
-from avalon.tools.libraryloader.io_nonsingleton import DbConnector
+from pype.ftrack.lib.io_nonsingleton import DbConnector
from pypeapp import config, Anatomy
diff --git a/pype/ftrack/actions/action_cust_attr_doctor.py b/pype/ftrack/actions/action_cust_attr_doctor.py
index 1b8f250e5b..b875f52ab8 100644
--- a/pype/ftrack/actions/action_cust_attr_doctor.py
+++ b/pype/ftrack/actions/action_cust_attr_doctor.py
@@ -23,10 +23,10 @@ class CustomAttributeDoctor(BaseAction):
icon = '{}/ftrack/action_icons/PypeDoctor.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
- hierarchical_ca = ['handle_start', 'handle_end', 'fstart', 'fend']
+ hierarchical_ca = ['handleStart', 'handleEnd', 'frameStart', 'frameEnd']
hierarchical_alternatives = {
- 'handle_start': 'handles',
- 'handle_end': 'handles'
+ 'handleStart': 'handles',
+ 'handleEnd': 'handles'
}
# Roles for new custom attributes
@@ -34,22 +34,22 @@ class CustomAttributeDoctor(BaseAction):
write_roles = ['ALL',]
data_ca = {
- 'handle_start': {
+ 'handleStart': {
'label': 'Frame handles start',
'type': 'number',
'config': json.dumps({'isdecimal': False})
},
- 'handle_end': {
+ 'handleEnd': {
'label': 'Frame handles end',
'type': 'number',
'config': json.dumps({'isdecimal': False})
},
- 'fstart': {
+ 'frameStart': {
'label': 'Frame start',
'type': 'number',
'config': json.dumps({'isdecimal': False})
},
- 'fend': {
+ 'frameEnd': {
'label': 'Frame end',
'type': 'number',
'config': json.dumps({'isdecimal': False})
diff --git a/pype/ftrack/actions/action_delete_asset.py b/pype/ftrack/actions/action_delete_asset.py
index a408de45b2..1b1e7fc905 100644
--- a/pype/ftrack/actions/action_delete_asset.py
+++ b/pype/ftrack/actions/action_delete_asset.py
@@ -5,7 +5,7 @@ from bson.objectid import ObjectId
import argparse
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
-from avalon.tools.libraryloader.io_nonsingleton import DbConnector
+from pype.ftrack.lib.io_nonsingleton import DbConnector
class DeleteAsset(BaseAction):
diff --git a/pype/ftrack/actions/action_delete_asset_byname.py b/pype/ftrack/actions/action_delete_asset_byname.py
index 4f2a0e515c..2431b2311e 100644
--- a/pype/ftrack/actions/action_delete_asset_byname.py
+++ b/pype/ftrack/actions/action_delete_asset_byname.py
@@ -4,7 +4,7 @@ import logging
import argparse
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
-from avalon.tools.libraryloader.io_nonsingleton import DbConnector
+from pype.ftrack.lib.io_nonsingleton import DbConnector
class AssetsRemover(BaseAction):
diff --git a/pype/ftrack/actions/action_prepare_project.py b/pype/ftrack/actions/action_prepare_project.py
index 60a7435907..e914fa74f0 100644
--- a/pype/ftrack/actions/action_prepare_project.py
+++ b/pype/ftrack/actions/action_prepare_project.py
@@ -1,11 +1,14 @@
import os
import json
+from ruamel import yaml
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction
from pypeapp import config
from pype.ftrack.lib import get_avalon_attr
+from pype.vendor.ftrack_api import session as fa_session
+
class PrepareProject(BaseAction):
'''Edit meta data action.'''
@@ -22,6 +25,9 @@ class PrepareProject(BaseAction):
os.environ.get('PYPE_STATICS_SERVER', '')
)
+ # Key to store info about trigerring create folder structure
+ create_project_structure_key = "create_folder_structure"
+
def discover(self, session, entities, event):
''' Validation '''
if len(entities) != 1:
@@ -41,9 +47,9 @@ class PrepareProject(BaseAction):
self.log.debug("Loading custom attributes")
cust_attrs, hier_cust_attrs = get_avalon_attr(session, True)
- project_defaults = config.get_presets().get("ftrack", {}).get(
- "project_defaults", {}
- )
+ project_defaults = config.get_presets(
+ entities[0]["full_name"]
+ ).get("ftrack", {}).get("project_defaults", {})
self.log.debug("Preparing data which will be shown")
attributes_to_set = {}
@@ -74,8 +80,29 @@ class PrepareProject(BaseAction):
str([key for key in attributes_to_set])
))
- title = "Set Attribute values"
+ item_splitter = {'type': 'label', 'value': '---'}
+ title = "Prepare Project"
items = []
+
+ # Ask if want to trigger Action Create Folder Structure
+ items.append({
+ "type": "label",
+ "value": "Want to create basic Folder Structure?
"
+ })
+
+ items.append({
+ "name": self.create_project_structure_key,
+ "type": "boolean",
+ "value": False,
+ "label": "Check if Yes"
+ })
+
+ items.append(item_splitter)
+ items.append({
+ "type": "label",
+ "value": "Set basic Attributes:
"
+ })
+
multiselect_enumerators = []
# This item will be last (before enumerators)
@@ -88,8 +115,6 @@ class PrepareProject(BaseAction):
"label": "AutoSync to Avalon"
}
- item_splitter = {'type': 'label', 'value': '---'}
-
for key, in_data in attributes_to_set.items():
attr = in_data["object"]
@@ -195,6 +220,10 @@ class PrepareProject(BaseAction):
return
in_data = event['data']['values']
+
+ # pop out info about creating project structure
+ create_proj_struct = in_data.pop(self.create_project_structure_key)
+
# Find hidden items for multiselect enumerators
keys_to_process = []
for key in in_data:
@@ -228,8 +257,117 @@ class PrepareProject(BaseAction):
session.commit()
+ # Create project structure
+ self.create_project_specific_config(entities[0]["full_name"], in_data)
+
+ # Trigger Create Project Structure action
+ if create_proj_struct is True:
+ self.trigger_action("create.project.structure", event)
+
return True
+ def create_project_specific_config(self, project_name, json_data):
+ self.log.debug("*** Creating project specifig configs ***")
+
+ path_proj_configs = os.environ.get('PYPE_PROJECT_CONFIGS', "")
+
+ # Skip if PYPE_PROJECT_CONFIGS is not set
+ # TODO show user OS message
+ if not path_proj_configs:
+ self.log.warning((
+ "Environment variable \"PYPE_PROJECT_CONFIGS\" is not set."
+ " Project specific config can't be set."
+ ))
+ return
+
+ path_proj_configs = os.path.normpath(path_proj_configs)
+ # Skip if path does not exist
+ # TODO create if not exist?!!!
+ if not os.path.exists(path_proj_configs):
+ self.log.warning((
+ "Path set in Environment variable \"PYPE_PROJECT_CONFIGS\""
+ " Does not exist."
+ ))
+ return
+
+ project_specific_path = os.path.normpath(
+ os.path.join(path_proj_configs, project_name)
+ )
+ if not os.path.exists(project_specific_path):
+ os.makedirs(project_specific_path)
+ self.log.debug((
+ "Project specific config folder for project \"{}\" created."
+ ).format(project_name))
+
+ # Anatomy ####################################
+ self.log.debug("--- Processing Anatomy Begins: ---")
+
+ anatomy_dir = os.path.normpath(os.path.join(
+ project_specific_path, "anatomy"
+ ))
+ anatomy_path = os.path.normpath(os.path.join(
+ anatomy_dir, "default.yaml"
+ ))
+
+ anatomy = None
+ if os.path.exists(anatomy_path):
+ self.log.debug(
+ "Anatomy file already exist. Trying to read: \"{}\"".format(
+ anatomy_path
+ )
+ )
+ # Try to load data
+ with open(anatomy_path, 'r') as file_stream:
+ try:
+ anatomy = yaml.load(file_stream, Loader=yaml.loader.Loader)
+ self.log.debug("Reading Anatomy file was successful")
+ except yaml.YAMLError as exc:
+ self.log.warning(
+ "Reading Yaml file failed: \"{}\"".format(anatomy_path),
+ exc_info=True
+ )
+
+ if not anatomy:
+ self.log.debug("Anatomy is not set. Duplicating default.")
+ # Create Anatomy folder
+ if not os.path.exists(anatomy_dir):
+ self.log.debug(
+ "Creating Anatomy folder: \"{}\"".format(anatomy_dir)
+ )
+ os.makedirs(anatomy_dir)
+
+ source_items = [
+ os.environ["PYPE_CONFIG"], "anatomy", "default.yaml"
+ ]
+
+ source_path = os.path.normpath(os.path.join(*source_items))
+ with open(source_path, 'r') as file_stream:
+ source_data = file_stream.read()
+
+ with open(anatomy_path, 'w') as file_stream:
+ file_stream.write(source_data)
+
+ # Presets ####################################
+ self.log.debug("--- Processing Presets Begins: ---")
+
+ project_defaults_dir = os.path.normpath(os.path.join(*[
+ project_specific_path, "presets", "ftrack"
+ ]))
+ project_defaults_path = os.path.normpath(os.path.join(*[
+ project_defaults_dir, "project_defaults.json"
+ ]))
+ # Create folder if not exist
+ if not os.path.exists(project_defaults_dir):
+ self.log.debug("Creating Ftrack Presets folder: \"{}\"".format(
+ project_defaults_dir
+ ))
+ os.makedirs(project_defaults_dir)
+
+ with open(project_defaults_path, 'w') as file_stream:
+ json.dump(json_data, file_stream, indent=4)
+
+ self.log.debug("*** Creating project specifig configs Finished ***")
+
def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
diff --git a/pype/ftrack/actions/action_start_timer.py b/pype/ftrack/actions/action_start_timer.py
index ad83edfc9e..36752a1edc 100644
--- a/pype/ftrack/actions/action_start_timer.py
+++ b/pype/ftrack/actions/action_start_timer.py
@@ -19,55 +19,19 @@ class StartTimer(BaseAction):
entity = entities[0]
if entity.entity_type.lower() != 'task':
return
- self.start_ftrack_timer(entity)
- try:
- self.start_clockify_timer(entity)
- except Exception:
- self.log.warning(
- 'Failed starting Clockify timer for task: ' + entity['name']
- )
- return
- def start_ftrack_timer(self, task):
- user_query = 'User where username is "{}"'.format(self.session.api_user)
- user = self.session.query(user_query).one()
- self.log.info('Starting Ftrack timer for task: ' + task['name'])
- user.start_timer(task, force=True)
+ user = self.session.query(
+ "User where username is \"{}\"".format(self.session.api_user)
+ ).one()
+
+ user.start_timer(entity, force=True)
self.session.commit()
-
- def start_clockify_timer(self, task):
- # Validate Clockify settings if Clockify is required
- clockify_timer = os.environ.get('CLOCKIFY_WORKSPACE', None)
- if clockify_timer is None:
- return
-
- from pype.clockify import ClockifyAPI
- clockapi = ClockifyAPI()
- if clockapi.verify_api() is False:
- return
- task_type = task['type']['name']
- project_name = task['project']['full_name']
-
- def get_parents(entity):
- output = []
- if entity.entity_type.lower() == 'project':
- return output
- output.extend(get_parents(entity['parent']))
- output.append(entity['name'])
-
- return output
-
- desc_items = get_parents(task['parent'])
- desc_items.append(task['name'])
- description = '/'.join(desc_items)
-
- project_id = clockapi.get_project_id(project_name)
- tag_ids = []
- tag_ids.append(clockapi.get_tag_id(task_type))
- clockapi.start_time_entry(
- description, project_id, tag_ids=tag_ids
+
+ self.log.info(
+ "Starting Ftrack timer for task: {}".format(entity['name'])
)
- self.log.info('Starting Clockify timer for task: ' + task['name'])
+
+ return
def register(session, plugins_presets={}):
diff --git a/pype/ftrack/actions/action_sync_hier_attrs_local.py b/pype/ftrack/actions/action_sync_hier_attrs_local.py
index 01434470f3..05a70461a1 100644
--- a/pype/ftrack/actions/action_sync_hier_attrs_local.py
+++ b/pype/ftrack/actions/action_sync_hier_attrs_local.py
@@ -7,7 +7,7 @@ import collections
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction, lib
-from avalon.tools.libraryloader.io_nonsingleton import DbConnector
+from pype.ftrack.lib.io_nonsingleton import DbConnector
from bson.objectid import ObjectId
diff --git a/pype/ftrack/actions/action_sync_to_avalon_local.py b/pype/ftrack/actions/action_sync_to_avalon_local.py
index ad39b0ca12..6a43688026 100644
--- a/pype/ftrack/actions/action_sync_to_avalon_local.py
+++ b/pype/ftrack/actions/action_sync_to_avalon_local.py
@@ -178,17 +178,7 @@ class SyncToAvalon(BaseAction):
job['status'] = 'failed'
session.commit()
- event = fa_session.ftrack_api.event.base.Event(
- topic='ftrack.action.launch',
- data=dict(
- actionIdentifier='sync.hierarchical.attrs.local',
- selection=event['data']['selection']
- ),
- source=dict(
- user=event['source']['user']
- )
- )
- session.event_hub.publish(event, on_error='ignore')
+ self.trigger_action("sync.hierarchical.attrs.local", event)
if len(message) > 0:
message = "Unable to sync: {}".format(message)
diff --git a/pype/ftrack/actions/action_where_run_ask.py b/pype/ftrack/actions/action_where_run_ask.py
index 95bbf1fdd7..0351c09909 100644
--- a/pype/ftrack/actions/action_where_run_ask.py
+++ b/pype/ftrack/actions/action_where_run_ask.py
@@ -29,18 +29,10 @@ class ActionAskWhereIRun(BaseAction):
return True
def launch(self, session, entities, event):
- event = fa_session.ftrack_api.event.base.Event(
- topic='ftrack.action.launch',
- data=dict(
- actionIdentifier="show.where.i.run",
- selection=event["data"]["selection"],
- event_hub_id=session.event_hub.id
- ),
- source=dict(
- user=dict(username=session.api_user)
- )
+ more_data = {"event_hub_id": session.event_hub.id}
+ self.trigger_action(
+ "show.where.i.run", event, additional_event_data=more_data
)
- session.event_hub.publish(event, on_error='ignore')
return True
diff --git a/pype/ftrack/events/action_sync_hier_attrs.py b/pype/ftrack/events/action_sync_hier_attrs.py
index 22ad7bf5aa..f8ecb9e3cd 100644
--- a/pype/ftrack/events/action_sync_hier_attrs.py
+++ b/pype/ftrack/events/action_sync_hier_attrs.py
@@ -8,7 +8,7 @@ import collections
from pypeapp import config
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction, lib
-from avalon.tools.libraryloader.io_nonsingleton import DbConnector
+from pype.ftrack.lib.io_nonsingleton import DbConnector
from bson.objectid import ObjectId
@@ -222,7 +222,11 @@ class SyncHierarchicalAttrs(BaseAction):
session.commit()
if self.interface_messages:
- self.show_interface_from_dict(self.interface_messages, event)
+ self.show_interface_from_dict(
+ messages=self.interface_messages,
+ title="something went wrong",
+ event=event
+ )
return True
diff --git a/pype/ftrack/events/action_sync_to_avalon.py b/pype/ftrack/events/action_sync_to_avalon.py
index 5628554c85..91cdfbdc42 100644
--- a/pype/ftrack/events/action_sync_to_avalon.py
+++ b/pype/ftrack/events/action_sync_to_avalon.py
@@ -10,7 +10,7 @@ from pype.ftrack import BaseAction, lib
from pype.vendor.ftrack_api import session as fa_session
-class Sync_To_Avalon(BaseAction):
+class SyncToAvalon(BaseAction):
'''
Synchronizing data action - from Ftrack to Avalon DB
@@ -207,18 +207,8 @@ class Sync_To_Avalon(BaseAction):
job['status'] = 'failed'
session.commit()
-
- event = fa_session.ftrack_api.event.base.Event(
- topic='ftrack.action.launch',
- data=dict(
- actionIdentifier='sync.hierarchical.attrs',
- selection=event['data']['selection']
- ),
- source=dict(
- user=event['source']['user']
- )
- )
- session.event_hub.publish(event, on_error='ignore')
+
+ self.trigger_action("sync.hierarchical.attrs", event)
if len(message) > 0:
message = "Unable to sync: {}".format(message)
diff --git a/pype/ftrack/events/event_sync_hier_attr.py b/pype/ftrack/events/event_sync_hier_attr.py
index 7c5c4b820b..24d6d9ced4 100644
--- a/pype/ftrack/events/event_sync_hier_attr.py
+++ b/pype/ftrack/events/event_sync_hier_attr.py
@@ -1,7 +1,7 @@
import os
import sys
-from avalon.tools.libraryloader.io_nonsingleton import DbConnector
+from pype.ftrack.lib.io_nonsingleton import DbConnector
from pype.vendor import ftrack_api
from pype.ftrack import BaseEvent, lib
@@ -23,7 +23,10 @@ class SyncHierarchicalAttrs(BaseEvent):
if not keys:
continue
- entity = session.get(ent['entity_type'], ent['entityId'])
+ if not ent['entityType'] in ['task', 'show']:
+ continue
+
+ entity = session.get(self._get_entity_type(ent), ent['entityId'])
processable.append(ent)
processable_ent[ent['entityId']] = entity
diff --git a/pype/ftrack/events/event_user_assigment.py b/pype/ftrack/events/event_user_assigment.py
index 3e250b988a..e98b1519b2 100644
--- a/pype/ftrack/events/event_user_assigment.py
+++ b/pype/ftrack/events/event_user_assigment.py
@@ -1,6 +1,6 @@
from pype.vendor import ftrack_api
from pype.ftrack import BaseEvent, lib
-from avalon.tools.libraryloader.io_nonsingleton import DbConnector
+from pype.ftrack.lib.io_nonsingleton import DbConnector
from bson.objectid import ObjectId
from pypeapp import config
from pypeapp import Anatomy
diff --git a/pype/ftrack/ftrack_server/ftrack_server.py b/pype/ftrack/ftrack_server/ftrack_server.py
index 2a58c12d09..e1c13cda32 100644
--- a/pype/ftrack/ftrack_server/ftrack_server.py
+++ b/pype/ftrack/ftrack_server/ftrack_server.py
@@ -97,7 +97,7 @@ class FtrackServer:
msg = 'Loading of file "{}" failed ({})'.format(
file, str(e)
)
- log.warning(msg)
+ log.warning(msg, exc_info=e)
if len(register_functions_dict) < 1:
raise Exception
diff --git a/pype/ftrack/lib/ftrack_base_handler.py b/pype/ftrack/lib/ftrack_base_handler.py
index c6aa176363..9eda74f0f3 100644
--- a/pype/ftrack/lib/ftrack_base_handler.py
+++ b/pype/ftrack/lib/ftrack_base_handler.py
@@ -94,13 +94,12 @@ class BaseHandler(object):
def launch_log(self, func):
@functools.wraps(func)
def wrapper_launch(*args, **kwargs):
+ label = self.__class__.__name__
if hasattr(self, 'label'):
+ label = self.label
if hasattr(self, 'variant'):
- label = '{} {}'.format(self.label, self.variant)
- else:
- label = self.label
- else:
- label = self.__class__.__name__
+ if self.variant is not None:
+ label = '{} {}'.format(self.label, self.variant)
self.log.info(('{} "{}": Launched').format(self.type, label))
try:
@@ -141,6 +140,13 @@ class BaseHandler(object):
# Custom validations
result = self.preregister()
+ if result is None:
+ self.log.debug((
+ "\"{}\" 'preregister' method returned 'None'. Expected it"
+ " didn't fail and continue as preregister returned True."
+ ).format(self.__class__.__name__))
+ return
+
if result is True:
return
msg = "Pre-register conditions were not met"
@@ -321,30 +327,13 @@ class BaseHandler(object):
# Launch preactions
for preaction in self.preactions:
- event = fa_session.ftrack_api.event.base.Event(
- topic='ftrack.action.launch',
- data=dict(
- actionIdentifier=preaction,
- selection=selection
- ),
- source=dict(
- user=dict(username=session.api_user)
- )
- )
- session.event_hub.publish(event, on_error='ignore')
+ self.trigger_action(preaction, event)
+
# Relaunch this action
- event = fa_session.ftrack_api.event.base.Event(
- topic='ftrack.action.launch',
- data=dict(
- actionIdentifier=self.identifier,
- selection=selection,
- preactions_launched=True
- ),
- source=dict(
- user=dict(username=session.api_user)
- )
+ additional_data = {"preactions_launched": True}
+ self.trigger_action(
+ self.identifier, event, additional_event_data=additional_data
)
- session.event_hub.publish(event, on_error='ignore')
return False
@@ -505,7 +494,8 @@ class BaseHandler(object):
)
def show_interface_from_dict(
- self, messages, title="", event=None, user=None, username=None, user_id=None
+ self, messages, title="", event=None,
+ user=None, username=None, user_id=None
):
if not messages:
self.log.debug("No messages to show! (messages dict is empty)")
@@ -532,3 +522,60 @@ class BaseHandler(object):
items.append(message)
self.show_interface(items, title, event, user, username, user_id)
+
+ def trigger_action(
+ self, action_name, event=None, session=None,
+ selection=None, user_data=None,
+ topic="ftrack.action.launch", additional_event_data={},
+ on_error="ignore"
+ ):
+ self.log.debug("Triggering action \"{}\" Begins".format(action_name))
+
+ if not session:
+ session = self.session
+
+ # Getting selection and user data
+ _selection = None
+ _user_data = None
+
+ if event:
+ _selection = event.get("data", {}).get("selection")
+ _user_data = event.get("source", {}).get("user")
+
+ if selection is not None:
+ _selection = selection
+
+ if user_data is not None:
+ _user_data = user_data
+
+ # Without selection and user data skip triggering
+ msg = "Can't trigger \"{}\" action without {}."
+ if _selection is None:
+ self.log.error(msg.format(action_name, "selection"))
+ return
+
+ if _user_data is None:
+ self.log.error(msg.format(action_name, "user data"))
+ return
+
+ _event_data = {
+ "actionIdentifier": action_name,
+ "selection": _selection
+ }
+
+ # Add additional data
+ if additional_event_data:
+ _event_data.update(additional_event_data)
+
+ # Create and trigger event
+ session.event_hub.publish(
+ fa_session.ftrack_api.event.base.Event(
+ topic=topic,
+ data=_event_data,
+ source=dict(user=_user_data)
+ ),
+ on_error=on_error
+ )
+ self.log.debug(
+ "Action \"{}\" Triggered successfully".format(action_name)
+ )
diff --git a/pype/ftrack/lib/io_nonsingleton.py b/pype/ftrack/lib/io_nonsingleton.py
new file mode 100644
index 0000000000..702cc93a90
--- /dev/null
+++ b/pype/ftrack/lib/io_nonsingleton.py
@@ -0,0 +1,433 @@
+"""
+Wrapper around interactions with the database
+
+Copy of io module in avalon-core.
+ - In this case not working as singleton with api.Session!
+"""
+
+import os
+import time
+import errno
+import shutil
+import logging
+import tempfile
+import functools
+import contextlib
+
+from avalon import schema
+from avalon.vendor import requests
+
+# Third-party dependencies
+import pymongo
+
+
+def auto_reconnect(func):
+ """Handling auto reconnect in 3 retry times"""
+ @functools.wraps(func)
+ def decorated(*args, **kwargs):
+ object = args[0]
+ for retry in range(3):
+ try:
+ return func(*args, **kwargs)
+ except pymongo.errors.AutoReconnect:
+ object.log.error("Reconnecting..")
+ time.sleep(0.1)
+ else:
+ raise
+
+ return decorated
+
+
+class DbConnector(object):
+
+ log = logging.getLogger(__name__)
+
+ def __init__(self):
+ self.Session = {}
+ self._mongo_client = None
+ self._sentry_client = None
+ self._sentry_logging_handler = None
+ self._database = None
+ self._is_installed = False
+
+ def install(self):
+ """Establish a persistent connection to the database"""
+ if self._is_installed:
+ return
+
+ logging.basicConfig()
+ self.Session.update(self._from_environment())
+
+ timeout = int(self.Session["AVALON_TIMEOUT"])
+ self._mongo_client = pymongo.MongoClient(
+ self.Session["AVALON_MONGO"], serverSelectionTimeoutMS=timeout)
+
+ for retry in range(3):
+ try:
+ t1 = time.time()
+ self._mongo_client.server_info()
+
+ except Exception:
+ self.log.error("Retrying..")
+ time.sleep(1)
+ timeout *= 1.5
+
+ else:
+ break
+
+ else:
+ raise IOError(
+ "ERROR: Couldn't connect to %s in "
+ "less than %.3f ms" % (self.Session["AVALON_MONGO"], timeout))
+
+ self.log.info("Connected to %s, delay %.3f s" % (
+ self.Session["AVALON_MONGO"], time.time() - t1))
+
+ self._install_sentry()
+
+ self._database = self._mongo_client[self.Session["AVALON_DB"]]
+ self._is_installed = True
+
+ def _install_sentry(self):
+ if "AVALON_SENTRY" not in self.Session:
+ return
+
+ try:
+ from raven import Client
+ from raven.handlers.logging import SentryHandler
+ from raven.conf import setup_logging
+ except ImportError:
+ # Note: There was a Sentry address in this Session
+ return self.log.warning("Sentry disabled, raven not installed")
+
+ client = Client(self.Session["AVALON_SENTRY"])
+
+ # Transmit log messages to Sentry
+ handler = SentryHandler(client)
+ handler.setLevel(logging.WARNING)
+
+ setup_logging(handler)
+
+ self._sentry_client = client
+ self._sentry_logging_handler = handler
+ self.log.info(
+ "Connected to Sentry @ %s" % self.Session["AVALON_SENTRY"]
+ )
+
+ def _from_environment(self):
+ Session = {
+ item[0]: os.getenv(item[0], item[1])
+ for item in (
+ # Root directory of projects on disk
+ ("AVALON_PROJECTS", None),
+
+ # Name of current Project
+ ("AVALON_PROJECT", ""),
+
+ # Name of current Asset
+ ("AVALON_ASSET", ""),
+
+ # Name of current silo
+ ("AVALON_SILO", ""),
+
+ # Name of current task
+ ("AVALON_TASK", None),
+
+ # Name of current app
+ ("AVALON_APP", None),
+
+ # Path to working directory
+ ("AVALON_WORKDIR", None),
+
+ # Name of current Config
+ # TODO(marcus): Establish a suitable default config
+ ("AVALON_CONFIG", "no_config"),
+
+ # Name of Avalon in graphical user interfaces
+ # Use this to customise the visual appearance of Avalon
+ # to better integrate with your surrounding pipeline
+ ("AVALON_LABEL", "Avalon"),
+
+ # Used during any connections to the outside world
+ ("AVALON_TIMEOUT", "1000"),
+
+ # Address to Asset Database
+ ("AVALON_MONGO", "mongodb://localhost:27017"),
+
+ # Name of database used in MongoDB
+ ("AVALON_DB", "avalon"),
+
+ # Address to Sentry
+ ("AVALON_SENTRY", None),
+
+ # Address to Deadline Web Service
+ # E.g. http://192.167.0.1:8082
+ ("AVALON_DEADLINE", None),
+
+ # Enable features not necessarily stable. The user's own risk
+ ("AVALON_EARLY_ADOPTER", None),
+
+ # Address of central asset repository, contains
+ # the following interface:
+ # /upload
+ # /download
+ # /manager (optional)
+ ("AVALON_LOCATION", "http://127.0.0.1"),
+
+ # Boolean of whether to upload published material
+ # to central asset repository
+ ("AVALON_UPLOAD", None),
+
+ # Generic username and password
+ ("AVALON_USERNAME", "avalon"),
+ ("AVALON_PASSWORD", "secret"),
+
+ # Unique identifier for instances in working files
+ ("AVALON_INSTANCE_ID", "avalon.instance"),
+ ("AVALON_CONTAINER_ID", "avalon.container"),
+
+ # Enable debugging
+ ("AVALON_DEBUG", None),
+
+ ) if os.getenv(item[0], item[1]) is not None
+ }
+
+ Session["schema"] = "avalon-core:session-1.0"
+ try:
+ schema.validate(Session)
+ except schema.ValidationError as e:
+ # TODO(marcus): Make this mandatory
+ self.log.warning(e)
+
+ return Session
+
+ def uninstall(self):
+ """Close any connection to the database"""
+ try:
+ self._mongo_client.close()
+ except AttributeError:
+ pass
+
+ self._mongo_client = None
+ self._database = None
+ self._is_installed = False
+
+ def active_project(self):
+ """Return the name of the active project"""
+ return self.Session["AVALON_PROJECT"]
+
+ def activate_project(self, project_name):
+ self.Session["AVALON_PROJECT"] = project_name
+
+ def projects(self):
+ """List available projects
+
+ Returns:
+ list of project documents
+
+ """
+
+ collection_names = self.collections()
+ for project in collection_names:
+ if project in ("system.indexes",):
+ continue
+
+ # Each collection will have exactly one project document
+ document = self.find_project(project)
+
+ if document is not None:
+ yield document
+
+ def locate(self, path):
+ """Traverse a hierarchy from top-to-bottom
+
+ Example:
+ representation = locate(["hulk", "Bruce", "modelDefault", 1, "ma"])
+
+ Returns:
+ representation (ObjectId)
+
+ """
+
+ components = zip(
+ ("project", "asset", "subset", "version", "representation"),
+ path
+ )
+
+ parent = None
+ for type_, name in components:
+ latest = (type_ == "version") and name in (None, -1)
+
+ try:
+ if latest:
+ parent = self.find_one(
+ filter={
+ "type": type_,
+ "parent": parent
+ },
+ projection={"_id": 1},
+ sort=[("name", -1)]
+ )["_id"]
+ else:
+ parent = self.find_one(
+ filter={
+ "type": type_,
+ "name": name,
+ "parent": parent
+ },
+ projection={"_id": 1},
+ )["_id"]
+
+ except TypeError:
+ return None
+
+ return parent
+
+ @auto_reconnect
+ def collections(self):
+ return self._database.collection_names()
+
+ @auto_reconnect
+ def find_project(self, project):
+ return self._database[project].find_one({"type": "project"})
+
+ @auto_reconnect
+ def insert_one(self, item):
+ assert isinstance(item, dict), "item must be of type "
+ schema.validate(item)
+ return self._database[self.Session["AVALON_PROJECT"]].insert_one(item)
+
+ @auto_reconnect
+ def insert_many(self, items, ordered=True):
+ # check if all items are valid
+ assert isinstance(items, list), "`items` must be of type "
+ for item in items:
+ assert isinstance(item, dict), "`item` must be of type "
+ schema.validate(item)
+
+ return self._database[self.Session["AVALON_PROJECT"]].insert_many(
+ items,
+ ordered=ordered)
+
+ @auto_reconnect
+ def find(self, filter, projection=None, sort=None):
+ return self._database[self.Session["AVALON_PROJECT"]].find(
+ filter=filter,
+ projection=projection,
+ sort=sort
+ )
+
+ @auto_reconnect
+ def find_one(self, filter, projection=None, sort=None):
+ assert isinstance(filter, dict), "filter must be "
+
+ return self._database[self.Session["AVALON_PROJECT"]].find_one(
+ filter=filter,
+ projection=projection,
+ sort=sort
+ )
+
+ @auto_reconnect
+ def save(self, *args, **kwargs):
+ return self._database[self.Session["AVALON_PROJECT"]].save(
+ *args, **kwargs)
+
+ @auto_reconnect
+ def replace_one(self, filter, replacement):
+ return self._database[self.Session["AVALON_PROJECT"]].replace_one(
+ filter, replacement)
+
+ @auto_reconnect
+ def update_many(self, filter, update):
+ return self._database[self.Session["AVALON_PROJECT"]].update_many(
+ filter, update)
+
+ @auto_reconnect
+ def distinct(self, *args, **kwargs):
+ return self._database[self.Session["AVALON_PROJECT"]].distinct(
+ *args, **kwargs)
+
+ @auto_reconnect
+ def drop(self, *args, **kwargs):
+ return self._database[self.Session["AVALON_PROJECT"]].drop(
+ *args, **kwargs)
+
+ @auto_reconnect
+ def delete_many(self, *args, **kwargs):
+ return self._database[self.Session["AVALON_PROJECT"]].delete_many(
+ *args, **kwargs)
+
+ def parenthood(self, document):
+ assert document is not None, "This is a bug"
+
+ parents = list()
+
+ while document.get("parent") is not None:
+ document = self.find_one({"_id": document["parent"]})
+
+ if document is None:
+ break
+
+ parents.append(document)
+
+ return parents
+
+ @contextlib.contextmanager
+ def tempdir(self):
+ tempdir = tempfile.mkdtemp()
+ try:
+ yield tempdir
+ finally:
+ shutil.rmtree(tempdir)
+
+ def download(self, src, dst):
+ """Download `src` to `dst`
+
+ Arguments:
+ src (str): URL to source file
+ dst (str): Absolute path to destination file
+
+ Yields tuple (progress, error):
+ progress (int): Between 0-100
+ error (Exception): Any exception raised when first making connection
+
+ """
+
+ try:
+ response = requests.get(
+ src,
+ stream=True,
+ auth=requests.auth.HTTPBasicAuth(
+ self.Session["AVALON_USERNAME"],
+ self.Session["AVALON_PASSWORD"]
+ )
+ )
+ except requests.ConnectionError as e:
+ yield None, e
+ return
+
+ with self.tempdir() as dirname:
+ tmp = os.path.join(dirname, os.path.basename(src))
+
+ with open(tmp, "wb") as f:
+ total_length = response.headers.get("content-length")
+
+ if total_length is None: # no content length header
+ f.write(response.content)
+ else:
+ downloaded = 0
+ total_length = int(total_length)
+ for data in response.iter_content(chunk_size=4096):
+ downloaded += len(data)
+ f.write(data)
+
+ yield int(100.0 * downloaded / total_length), None
+
+ try:
+ os.makedirs(os.path.dirname(dst))
+ except OSError as e:
+ # An already existing destination directory is fine.
+ if e.errno != errno.EEXIST:
+ raise
+
+ shutil.copy(tmp, dst)
diff --git a/pype/lib.py b/pype/lib.py
index 6eee38f6d8..6f6895085e 100644
--- a/pype/lib.py
+++ b/pype/lib.py
@@ -5,6 +5,7 @@ import importlib
import itertools
import contextlib
import subprocess
+import inspect
from .vendor import pather
from .vendor.pather.error import ParseError
@@ -31,7 +32,9 @@ def _subprocess(args):
output = proc.communicate()[0]
if proc.returncode != 0:
+ log.error(output)
raise ValueError("\"{}\" was not successful: {}".format(args, output))
+ return output
def get_hierarchy(asset_name=None):
@@ -421,7 +424,7 @@ def get_version_from_path(file):
v: version number in string ('001')
"""
- pattern = re.compile(r"[\._]v([0-9]*)")
+ pattern = re.compile(r"[\._]v([0-9]+)")
try:
return pattern.findall(file)[0]
except IndexError:
@@ -467,9 +470,7 @@ def filter_pyblish_plugins(plugins):
host = api.current_host()
- presets = config.get_presets().get('plugins', {}).get(host, {}).get(
- "publish", {}
- )
+ presets = config.get_presets().get('plugins', {})
# iterate over plugins
for plugin in plugins[:]:
@@ -477,10 +478,20 @@ def filter_pyblish_plugins(plugins):
if not presets:
continue
+ file = os.path.normpath(inspect.getsourcefile(plugin))
+ file = os.path.normpath(file)
+
+ # host determined from path
+ host_from_file = file.split(os.path.sep)[-3:-2][0]
+ plugin_kind = file.split(os.path.sep)[-2:-1][0]
+
try:
- config_data = presets[plugin.__name__] # noqa: E501
+ config_data = presets[host]["publish"][plugin.__name__]
except KeyError:
- continue
+ try:
+ config_data = presets[host_from_file][plugin_kind][plugin.__name__] # noqa: E501
+ except KeyError:
+ continue
for option, value in config_data.items():
if option == "enabled" and value is False:
@@ -492,6 +503,72 @@ def filter_pyblish_plugins(plugins):
setattr(plugin, option, value)
- # Remove already processed plugins from dictionary
- # WARNING Requires plugins with unique names
- presets.pop(plugin.__name__)
+
+def get_subsets(asset_name,
+ regex_filter=None,
+ version=None,
+ representations=["exr", "dpx"]):
+ """
+ Query subsets with filter on name.
+
+ The method will return all found subsets and its defined version and subsets. Version could be specified with number. Representation can be filtered.
+
+ Arguments:
+ asset_name (str): asset (shot) name
+ regex_filter (raw): raw string with filter pattern
+ version (str or int): `last` or number of version
+ representations (list): list for all representations
+
+ Returns:
+ dict: subsets with version and representaions in keys
+ """
+ from avalon import io
+
+ # query asset from db
+ asset_io = io.find_one({"type": "asset",
+ "name": asset_name})
+
+ # check if anything returned
+ assert asset_io, "Asset not existing. \
+ Check correct name: `{}`".format(asset_name)
+
+ # create subsets query filter
+ filter_query = {"type": "subset", "parent": asset_io["_id"]}
+
+ # add reggex filter string into query filter
+ if regex_filter:
+ filter_query.update({"name": {"$regex": r"{}".format(regex_filter)}})
+ else:
+ filter_query.update({"name": {"$regex": r'.*'}})
+
+ # query all assets
+ subsets = [s for s in io.find(filter_query)]
+
+ assert subsets, "No subsets found. Check correct filter. Try this for start `r'.*'`: asset: `{}`".format(asset_name)
+
+ output_dict = {}
+ # Process subsets
+ for subset in subsets:
+ if not version:
+ version_sel = io.find_one({"type": "version",
+ "parent": subset["_id"]},
+ sort=[("name", -1)])
+ else:
+ assert isinstance(version, int), "version needs to be `int` type"
+ version_sel = io.find_one({"type": "version",
+ "parent": subset["_id"],
+ "name": int(version)})
+
+ find_dict = {"type": "representation",
+ "parent": version_sel["_id"]}
+
+ filter_repr = {"$or": [{"name": repr} for repr in representations]}
+
+ find_dict.update(filter_repr)
+ repres_out = [i for i in io.find(find_dict)]
+
+ if len(repres_out) > 0:
+ output_dict[subset["name"]] = {"version": version_sel,
+ "representaions": repres_out}
+
+ return output_dict
diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py
index 0c4cdc10ab..42ca633e40 100644
--- a/pype/nuke/__init__.py
+++ b/pype/nuke/__init__.py
@@ -104,7 +104,7 @@ def install():
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
-
+ workfile_settings = lib.WorkfileSettings()
# Disable all families except for the ones we explicitly want to see
family_states = [
"write",
@@ -121,7 +121,7 @@ def install():
nuke.addOnCreate(launch_workfiles_app, nodeClass="Root")
# Set context settings.
- nuke.addOnCreate(lib.set_context_settings, nodeClass="Root")
+ nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root")
menu.install()
diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py
index 82244afdb5..f182088457 100644
--- a/pype/nuke/lib.py
+++ b/pype/nuke/lib.py
@@ -1,10 +1,12 @@
import os
import sys
+import getpass
from collections import OrderedDict
from pprint import pprint
from avalon import api, io, lib
import avalon.nuke
import pype.api as pype
+
import nuke
from .templates import (
get_colorspace_preset,
@@ -12,6 +14,11 @@ from .templates import (
get_node_colorspace_preset
)
+from .templates import (
+ get_anatomy
+)
+# TODO: remove get_anatomy and import directly Anatomy() here
+
from pypeapp import Logger
log = Logger().get_logger(__name__, "nuke")
@@ -159,11 +166,6 @@ def format_anatomy(data):
'''
# TODO: perhaps should be nonPublic
- from .templates import (
- get_anatomy
- )
- # TODO: remove get_anatomy and import directly Anatomy() here
-
anatomy = get_anatomy()
log.debug("__ anatomy.templates: {}".format(anatomy.templates))
@@ -195,6 +197,7 @@ def script_name():
'''
return nuke.root().knob('name').value()
+
def add_button_write_to_read(node):
name = "createReadNode"
label = "Create Read"
@@ -203,6 +206,7 @@ def add_button_write_to_read(node):
k.setFlag(0x1000)
node.addKnob(k)
+
def create_write_node(name, data, prenodes=None):
''' Creating write node which is group node
@@ -311,7 +315,6 @@ def create_write_node(name, data, prenodes=None):
else:
prev_node = nuke.createNode("Input", "name rgba")
-
# creating write node
now_node = avalon.nuke.lib.add_write_node("inside_{}".format(name),
**_data
@@ -331,7 +334,6 @@ def create_write_node(name, data, prenodes=None):
# imprinting group node
GN = avalon.nuke.imprint(GN, data["avalon"])
-
divider = nuke.Text_Knob('')
GN.addKnob(divider)
@@ -339,7 +341,7 @@ def create_write_node(name, data, prenodes=None):
# adding write to read button
add_button_write_to_read(GN)
-
+
divider = nuke.Text_Knob('')
GN.addKnob(divider)
@@ -347,13 +349,15 @@ def create_write_node(name, data, prenodes=None):
tile_color = _data.get("tile_color", "0xff0000ff")
GN["tile_color"].setValue(tile_color)
-
# add render button
lnk = nuke.Link_Knob("Render")
lnk.makeLink(write_node.name(), "Render")
lnk.setName("Render")
GN.addKnob(lnk)
+ # Deadline tab.
+ add_deadline_tab(GN)
+
return GN
@@ -378,301 +382,391 @@ def add_rendering_knobs(node):
return node
-def set_viewers_colorspace(viewer):
- ''' Adds correct colorspace to viewer
+def add_deadline_tab(node):
+ node.addKnob(nuke.Tab_Knob("Deadline"))
+
+ knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size")
+ knob.setValue(1)
+ node.addKnob(knob)
+
+
+def create_backdrop(label="", color=None, layer=0,
+ nodes=None):
+ """
+ Create Backdrop node
Arguments:
- viewer (obj): nuke viewer node object to be fixed
+ color (str): nuke compatible string with color code
+ layer (int): layer of node usually used (self.pos_layer - 1)
+ label (str): the message
+ nodes (list): list of nodes to be wrapped into backdrop
- '''
- assert isinstance(viewer, dict), log.error(
- "set_viewers_colorspace(): argument should be dictionary")
+ """
+ assert isinstance(nodes, list), "`nodes` should be a list of nodes"
- filter_knobs = [
- "viewerProcess",
- "wipe_position"
- ]
- viewers = [n for n in nuke.allNodes() if n.Class() == 'Viewer']
- erased_viewers = []
+ # Calculate bounds for the backdrop node.
+ bdX = min([node.xpos() for node in nodes])
+ bdY = min([node.ypos() for node in nodes])
+ bdW = max([node.xpos() + node.screenWidth() for node in nodes]) - bdX
+ bdH = max([node.ypos() + node.screenHeight() for node in nodes]) - bdY
- for v in viewers:
- v['viewerProcess'].setValue(str(viewer["viewerProcess"]))
- if str(viewer["viewerProcess"]) not in v['viewerProcess'].value():
- copy_inputs = v.dependencies()
- copy_knobs = {k: v[k].value() for k in v.knobs()
- if k not in filter_knobs}
- pprint(copy_knobs)
- # delete viewer with wrong settings
- erased_viewers.append(v['name'].value())
- nuke.delete(v)
+ # Expand the bounds to leave a little border. Elements are offsets
+ # for left, top, right and bottom edges respectively
+ left, top, right, bottom = (-20, -65, 20, 60)
+ bdX += left
+ bdY += top
+ bdW += (right - left)
+ bdH += (bottom - top)
- # create new viewer
- nv = nuke.createNode("Viewer")
+ bdn = nuke.createNode("BackdropNode")
+ bdn["z_order"].setValue(layer)
- # connect to original inputs
- for i, n in enumerate(copy_inputs):
- nv.setInput(i, n)
+ if color:
+ bdn["tile_color"].setValue(int(color, 16))
- # set coppied knobs
- for k, v in copy_knobs.items():
- print(k, v)
- nv[k].setValue(v)
+ bdn["xpos"].setValue(bdX)
+ bdn["ypos"].setValue(bdY)
+ bdn["bdwidth"].setValue(bdW)
+ bdn["bdheight"].setValue(bdH)
- # set viewerProcess
- nv['viewerProcess'].setValue(str(viewer["viewerProcess"]))
+ if label:
+ bdn["label"].setValue(label)
- if erased_viewers:
- log.warning(
- "Attention! Viewer nodes {} were erased."
- "It had wrong color profile".format(erased_viewers))
+ bdn["note_font_size"].setValue(20)
+ return bdn
-def set_root_colorspace(root_dict):
- ''' Adds correct colorspace to root
+class WorkfileSettings(object):
+ """
+ All settings for workfile will be set
+
+ This object is setting all possible root settings to the workfile.
+ Including Colorspace, Frame ranges, Resolution format. It can set it
+ to Root node or to any given node.
Arguments:
- root_dict (dict): nuke root node as dictionary
+ root (node): nuke's root node
+ nodes (list): list of nuke's nodes
+ nodes_filter (list): filtering classes for nodes
- '''
- assert isinstance(root_dict, dict), log.error(
- "set_root_colorspace(): argument should be dictionary")
+ """
- # first set OCIO
- if nuke.root()["colorManagement"].value() not in str(root_dict["colorManagement"]):
- nuke.root()["colorManagement"].setValue(
- str(root_dict["colorManagement"]))
+ def __init__(self,
+ root_node=None,
+ nodes=None,
+ **kwargs):
+ self._project = kwargs.get(
+ "project") or io.find_one({"type": "project"})
+ self._asset = kwargs.get("asset_name") or api.Session["AVALON_ASSET"]
+ self._asset_entity = pype.get_asset(self._asset)
+ self._root_node = root_node or nuke.root()
+ self._nodes = self.get_nodes(nodes=nodes)
- # second set ocio version
- if nuke.root()["OCIO_config"].value() not in str(root_dict["OCIO_config"]):
- nuke.root()["OCIO_config"].setValue(str(root_dict["OCIO_config"]))
+ self.data = kwargs
- # then set the rest
- for knob, value in root_dict.items():
- if nuke.root()[knob].value() not in value:
- nuke.root()[knob].setValue(str(value))
- log.debug("nuke.root()['{}'] changed to: {}".format(knob, value))
+ def get_nodes(self, nodes=None, nodes_filter=None):
+ # filter out only dictionaries for node creation
+ #
+ # print("\n\n")
+ # pprint(self._nodes)
+ #
+ if not isinstance(nodes, list) and not isinstance(nodes_filter, list):
+ return [n for n in nuke.allNodes()]
+ elif not isinstance(nodes, list) and isinstance(nodes_filter, list):
+ nodes = list()
+ for filter in nodes_filter:
+ [nodes.append(n) for n in nuke.allNodes(filter=filter)]
+ return nodes
+ elif isinstance(nodes, list) and not isinstance(nodes_filter, list):
+ return [n for n in self._nodes]
+ elif isinstance(nodes, list) and isinstance(nodes_filter, list):
+ for filter in nodes_filter:
+ return [n for n in self._nodes if filter in n.Class()]
-def set_writes_colorspace(write_dict):
- ''' Adds correct colorspace to write node dict
+ def set_viewers_colorspace(self, viewer_dict):
+ ''' Adds correct colorspace to viewer
- Arguments:
- write_dict (dict): nuke write node as dictionary
+ Arguments:
+ viewer_dict (dict): adjustments from presets
- '''
- # TODO: complete this function so any write node in scene will have fixed colorspace following presets for the project
- assert isinstance(write_dict, dict), log.error(
- "set_root_colorspace(): argument should be dictionary")
+ '''
+ assert isinstance(viewer_dict, dict), log.error(
+ "set_viewers_colorspace(): argument should be dictionary")
- log.debug("__ set_writes_colorspace(): {}".format(write_dict))
+ filter_knobs = [
+ "viewerProcess",
+ "wipe_position"
+ ]
+ erased_viewers = []
+ for v in [n for n in self._nodes
+ if "Viewer" in n.Class()]:
+ v['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
+ if str(viewer_dict["viewerProcess"]) \
+ not in v['viewerProcess'].value():
+ copy_inputs = v.dependencies()
+ copy_knobs = {k: v[k].value() for k in v.knobs()
+ if k not in filter_knobs}
-def set_colorspace():
- ''' Setting colorpace following presets
- '''
- nuke_colorspace = get_colorspace_preset().get("nuke", None)
+ # delete viewer with wrong settings
+ erased_viewers.append(v['name'].value())
+ nuke.delete(v)
- try:
- set_root_colorspace(nuke_colorspace["root"])
- except AttributeError:
- log.error(
- "set_colorspace(): missing `root` settings in template")
- try:
- set_viewers_colorspace(nuke_colorspace["viewer"])
- except AttributeError:
- log.error(
- "set_colorspace(): missing `viewer` settings in template")
- try:
- set_writes_colorspace(nuke_colorspace["write"])
- except AttributeError:
- log.error(
- "set_colorspace(): missing `write` settings in template")
+ # create new viewer
+ nv = nuke.createNode("Viewer")
- try:
- for key in nuke_colorspace:
- log.debug("Preset's colorspace key: {}".format(key))
- except TypeError:
- log.error("Nuke is not in templates! \n\n\n"
- "contact your supervisor!")
+ # connect to original inputs
+ for i, n in enumerate(copy_inputs):
+ nv.setInput(i, n)
+ # set coppied knobs
+ for k, v in copy_knobs.items():
+ print(k, v)
+ nv[k].setValue(v)
-def reset_frame_range_handles():
- """Set frame range to current asset"""
+ # set viewerProcess
+ nv['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
- root = nuke.root()
- name = api.Session["AVALON_ASSET"]
- asset_entity = pype.get_asset(name)
+ if erased_viewers:
+ log.warning(
+ "Attention! Viewer nodes {} were erased."
+ "It had wrong color profile".format(erased_viewers))
- if "data" not in asset_entity:
- msg = "Asset {} don't have set any 'data'".format(name)
- log.warning(msg)
- nuke.message(msg)
- return
- data = asset_entity["data"]
+ def set_root_colorspace(self, root_dict):
+ ''' Adds correct colorspace to root
- missing_cols = []
- check_cols = ["fps", "frameStart", "frameEnd", "handleStart", "handleEnd"]
+ Arguments:
+ root_dict (dict): adjustmensts from presets
- for col in check_cols:
- if col not in data:
- missing_cols.append(col)
+ '''
+ assert isinstance(root_dict, dict), log.error(
+ "set_root_colorspace(): argument should be dictionary")
- if len(missing_cols) > 0:
- missing = ", ".join(missing_cols)
- msg = "'{}' are not set for asset '{}'!".format(missing, name)
- log.warning(msg)
- nuke.message(msg)
- return
+ # first set OCIO
+ if self._root_node["colorManagement"].value() \
+ not in str(root_dict["colorManagement"]):
+ self._root_node["colorManagement"].setValue(
+ str(root_dict["colorManagement"]))
- # get handles values
- handle_start = asset_entity["data"]["handleStart"]
- handle_end = asset_entity["data"]["handleEnd"]
+ # second set ocio version
+ if self._root_node["OCIO_config"].value() \
+ not in str(root_dict["OCIO_config"]):
+ self._root_node["OCIO_config"].setValue(
+ str(root_dict["OCIO_config"]))
- fps = asset_entity["data"]["fps"]
- frame_start = int(asset_entity["data"]["frameStart"]) - handle_start
- frame_end = int(asset_entity["data"]["frameEnd"]) + handle_end
+ # then set the rest
+ for knob, value in root_dict.items():
+ if self._root_node[knob].value() not in value:
+ self._root_node[knob].setValue(str(value))
+ log.debug("nuke.root()['{}'] changed to: {}".format(
+ knob, value))
- root["fps"].setValue(fps)
- root["first_frame"].setValue(frame_start)
- root["last_frame"].setValue(frame_end)
+ def set_writes_colorspace(self, write_dict):
+ ''' Adds correct colorspace to write node dict
- # setting active viewers
- nuke.frame(int(asset_entity["data"]["frameStart"]))
+ Arguments:
+ write_dict (dict): nuke write node as dictionary
- range = '{0}-{1}'.format(
- int(asset_entity["data"]["frameStart"]),
- int(asset_entity["data"]["frameEnd"]))
+ '''
+ # TODO: complete this function so any write node in
+ # scene will have fixed colorspace following presets for the project
+ assert isinstance(write_dict, dict), log.error(
+ "set_root_colorspace(): argument should be dictionary")
- for node in nuke.allNodes(filter="Viewer"):
- node['frame_range'].setValue(range)
- node['frame_range_lock'].setValue(True)
- node['frame_range'].setValue(range)
- node['frame_range_lock'].setValue(True)
+ log.debug("__ set_writes_colorspace(): {}".format(write_dict))
- # adding handle_start/end to root avalon knob
- if not avalon.nuke.imprint(root, {
- "handleStart": int(handle_start),
- "handleEnd": int(handle_end)
- }):
- log.warning("Cannot set Avalon knob to Root node!")
+ def set_colorspace(self):
+ ''' Setting colorpace following presets
+ '''
+ nuke_colorspace = get_colorspace_preset().get("nuke", None)
-
-def reset_resolution():
- """Set resolution to project resolution."""
- log.info("Reseting resolution")
- project = io.find_one({"type": "project"})
- asset = api.Session["AVALON_ASSET"]
- asset = io.find_one({"name": asset, "type": "asset"})
-
- width = asset.get('data', {}).get("resolutionWidth")
- height = asset.get('data', {}).get("resolutionHeight")
- pixel_aspect = asset.get('data', {}).get("pixelAspect")
-
- if any(not x for x in [width, height, pixel_aspect]):
- log.error("Missing set shot attributes in DB. \nContact your supervisor!. \n\nWidth: `{0}` \nHeight: `{1}` \nPixel Asspect: `{2}`".format(
- width, height, pixel_aspect))
- return
-
- bbox = asset.get('data', {}).get('crop')
-
- if bbox:
try:
- x, y, r, t = bbox.split(".")
+ self.set_root_colorspace(nuke_colorspace["root"])
+ except AttributeError:
+ log.error(
+ "set_colorspace(): missing `root` settings in template")
+ try:
+ self.set_viewers_colorspace(nuke_colorspace["viewer"])
+ except AttributeError:
+ log.error(
+ "set_colorspace(): missing `viewer` settings in template")
+ try:
+ self.set_writes_colorspace(nuke_colorspace["write"])
+ except AttributeError:
+ log.error(
+ "set_colorspace(): missing `write` settings in template")
+
+ try:
+ for key in nuke_colorspace:
+ log.debug("Preset's colorspace key: {}".format(key))
+ except TypeError:
+ log.error("Nuke is not in templates! \n\n\n"
+ "contact your supervisor!")
+
+ def reset_frame_range_handles(self):
+ """Set frame range to current asset"""
+
+ if "data" not in self._asset_entity:
+ msg = "Asset {} don't have set any 'data'".format(self._asset)
+ log.warning(msg)
+ nuke.message(msg)
+ return
+ data = self._asset_entity["data"]
+
+ missing_cols = []
+ check_cols = ["fps", "frameStart", "frameEnd",
+ "handleStart", "handleEnd"]
+
+ for col in check_cols:
+ if col not in data:
+ missing_cols.append(col)
+
+ if len(missing_cols) > 0:
+ missing = ", ".join(missing_cols)
+ msg = "'{}' are not set for asset '{}'!".format(
+ missing, self._asset)
+ log.warning(msg)
+ nuke.message(msg)
+ return
+
+ # get handles values
+ handle_start = data["handleStart"]
+ handle_end = data["handleEnd"]
+
+ fps = data["fps"]
+ frame_start = int(data["frameStart"]) - handle_start
+ frame_end = int(data["frameEnd"]) + handle_end
+
+ self._root_node["fps"].setValue(fps)
+ self._root_node["first_frame"].setValue(frame_start)
+ self._root_node["last_frame"].setValue(frame_end)
+
+ # setting active viewers
+ try:
+ nuke.frame(int(data["frameStart"]))
except Exception as e:
- bbox = None
- log.error("{}: {} \nFormat:Crop need to be set with dots, example: "
- "0.0.1920.1080, /nSetting to default".format(__name__, e))
+ log.warning("no viewer in scene: `{}`".format(e))
- used_formats = list()
- for f in nuke.formats():
- if project["name"] in str(f.name()):
- used_formats.append(f)
- else:
- format_name = project["name"] + "_1"
+ range = '{0}-{1}'.format(
+ int(data["frameStart"]),
+ int(data["frameEnd"]))
- crnt_fmt_str = ""
- if used_formats:
- check_format = used_formats[-1]
- format_name = "{}_{}".format(
- project["name"],
- int(used_formats[-1].name()[-1]) + 1
- )
- log.info(
- "Format exists: {}. "
- "Will create new: {}...".format(
- used_formats[-1].name(),
- format_name)
- )
- crnt_fmt_kargs = {
- "width": (check_format.width()),
- "height": (check_format.height()),
- "pixelAspect": float(check_format.pixelAspect())
+ for node in nuke.allNodes(filter="Viewer"):
+ node['frame_range'].setValue(range)
+ node['frame_range_lock'].setValue(True)
+ node['frame_range'].setValue(range)
+ node['frame_range_lock'].setValue(True)
+
+ # adding handle_start/end to root avalon knob
+ if not avalon.nuke.imprint(self._root_node, {
+ "handleStart": int(handle_start),
+ "handleEnd": int(handle_end)
+ }):
+ log.warning("Cannot set Avalon knob to Root node!")
+
+ def reset_resolution(self):
+ """Set resolution to project resolution."""
+ log.info("Reseting resolution")
+ project = io.find_one({"type": "project"})
+ asset = api.Session["AVALON_ASSET"]
+ asset = io.find_one({"name": asset, "type": "asset"})
+ asset_data = asset.get('data', {})
+
+ data = {
+ "width": int(asset_data.get(
+ 'resolutionWidth',
+ asset_data.get('resolution_width'))),
+ "height": int(asset_data.get(
+ 'resolutionHeight',
+ asset_data.get('resolution_height'))),
+ "pixel_aspect": asset_data.get(
+ 'pixelAspect',
+ asset_data.get('pixel_aspect', 1)),
+ "name": project["name"]
}
+
+ if any(x for x in data.values() if x is None):
+ log.error(
+ "Missing set shot attributes in DB."
+ "\nContact your supervisor!."
+ "\n\nWidth: `{width}`"
+ "\nHeight: `{height}`"
+ "\nPixel Asspect: `{pixel_aspect}`".format(**data)
+ )
+
+ bbox = self._asset_entity.get('data', {}).get('crop')
+
if bbox:
- crnt_fmt_kargs.update({
- "x": int(check_format.x()),
- "y": int(check_format.y()),
- "r": int(check_format.r()),
- "t": int(check_format.t()),
- })
- crnt_fmt_str = make_format_string(**crnt_fmt_kargs)
+ try:
+ x, y, r, t = bbox.split(".")
+ data.update(
+ {
+ "x": int(x),
+ "y": int(y),
+ "r": int(r),
+ "t": int(t),
+ }
+ )
+ except Exception as e:
+ bbox = None
+ log.error(
+ "{}: {} \nFormat:Crop need to be set with dots, example: "
+ "0.0.1920.1080, /nSetting to default".format(__name__, e)
+ )
- new_fmt_kargs = {
- "width": int(width),
- "height": int(height),
- "pixelAspect": float(pixel_aspect),
- "project_name": format_name
- }
- if bbox:
- new_fmt_kargs.update({
- "x": int(x),
- "y": int(y),
- "r": int(r),
- "t": int(t),
- })
+ existing_format = None
+ for format in nuke.formats():
+ if data["name"] == format.name():
+ existing_format = format
+ break
- new_fmt_str = make_format_string(**new_fmt_kargs)
+ if existing_format:
+ # Enforce existing format to be correct.
+ existing_format.setWidth(data["width"])
+ existing_format.setHeight(data["height"])
+ existing_format.setPixelAspect(data["pixel_aspect"])
- if new_fmt_str not in crnt_fmt_str:
- make_format(frm_str=new_fmt_str,
- project_name=new_fmt_kargs["project_name"])
+ if bbox:
+ existing_format.setX(data["x"])
+ existing_format.setY(data["y"])
+ existing_format.setR(data["r"])
+ existing_format.setT(data["t"])
+ else:
+ format_string = self.make_format_string(**data)
+ log.info("Creating new format: {}".format(format_string))
+ nuke.addFormat(format_string)
- log.info("Format is set")
+ nuke.root()["format"].setValue(data["name"])
+ log.info("Format is set.")
+ def make_format_string(self, **kwargs):
+ if kwargs.get("r"):
+ return (
+ "{width} "
+ "{height} "
+ "{x} "
+ "{y} "
+ "{r} "
+ "{t} "
+ "{pixel_aspect:.2f} "
+ "{name}".format(**kwargs)
+ )
+ else:
+ return (
+ "{width} "
+ "{height} "
+ "{pixel_aspect:.2f} "
+ "{name}".format(**kwargs)
+ )
-def make_format_string(**args):
- if args.get("r"):
- return (
- "{width} "
- "{height} "
- "{x} "
- "{y} "
- "{r} "
- "{t} "
- "{pixelAspect:.2f}".format(**args)
- )
- else:
- return (
- "{width} "
- "{height} "
- "{pixelAspect:.2f}".format(**args)
- )
-
-
-def make_format(**args):
- log.info("Format does't exist, will create: \n{}".format(args))
- nuke.addFormat("{frm_str} "
- "{project_name}".format(**args))
- nuke.root()["format"].setValue("{project_name}".format(**args))
-
-
-def set_context_settings():
- # replace reset resolution from avalon core to pype's
- reset_resolution()
- # replace reset resolution from avalon core to pype's
- reset_frame_range_handles()
- # add colorspace menu item
- set_colorspace()
+ def set_context_settings(self):
+ # replace reset resolution from avalon core to pype's
+ self.reset_resolution()
+ # replace reset resolution from avalon core to pype's
+ self.reset_frame_range_handles()
+ # add colorspace menu item
+ self.set_colorspace()
def get_hierarchical_attr(entity, attr, default=None):
@@ -730,3 +824,297 @@ def get_write_node_template_attr(node):
# fix badly encoded data
return avalon.nuke.lib.fix_data_for_node_create(correct_data)
+
+
+class BuildWorkfile(WorkfileSettings):
+ """
+ Building first version of workfile.
+
+ Settings are taken from presets and db. It will add all subsets in last version for defined representaions
+
+ Arguments:
+ variable (type): description
+
+ """
+ xpos = 0
+ ypos = 0
+ xpos_size = 80
+ ypos_size = 90
+ xpos_gap = 50
+ ypos_gap = 50
+ pos_layer = 10
+
+ def __init__(self,
+ root_path=None,
+ root_node=None,
+ nodes=None,
+ to_script=None,
+ **kwargs):
+ """
+ A short description.
+
+ A bit longer description.
+
+ Argumetns:
+ root_path (str): description
+ root_node (nuke.Node): description
+ nodes (list): list of nuke.Node
+ nodes_effects (dict): dictionary with subsets
+
+ Example:
+ nodes_effects = {
+ "plateMain": {
+ "nodes": [
+ [("Class", "Reformat"),
+ ("resize", "distort"),
+ ("flip", True)],
+
+ [("Class", "Grade"),
+ ("blackpoint", 0.5),
+ ("multiply", 0.4)]
+ ]
+ },
+ }
+
+ """
+
+ WorkfileSettings.__init__(self,
+ root_node=root_node,
+ nodes=nodes,
+ **kwargs)
+ self.to_script = to_script
+ # collect data for formating
+ data = {
+ "root": root_path or api.Session["AVALON_PROJECTS"],
+ "project": {"name": self._project["name"],
+ "code": self._project["data"].get("code", '')},
+ "asset": self._asset or os.environ["AVALON_ASSET"],
+ "task": kwargs.get("task") or api.Session["AVALON_TASK"].lower(),
+ "hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(),
+ "version": kwargs.get("version", {}).get("name", 1),
+ "user": getpass.getuser(),
+ "comment": "firstBuild"
+ }
+
+ # get presets from anatomy
+ anatomy = get_anatomy()
+ # format anatomy
+ anatomy_filled = anatomy.format(data)
+
+ # get dir and file for workfile
+ self.work_dir = anatomy_filled["avalon"]["work"]
+ self.work_file = anatomy_filled["avalon"]["workfile"] + ".nk"
+
+ def save_script_as(self, path=None):
+ # first clear anything in open window
+ nuke.scriptClear()
+
+ if not path:
+ dir = self.work_dir
+ path = os.path.join(
+ self.work_dir,
+ self.work_file).replace("\\", "/")
+ else:
+ dir = os.path.dirname(path)
+
+ # check if folder is created
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+
+ # save script to path
+ nuke.scriptSaveAs(path)
+
+ def process(self,
+ regex_filter=None,
+ version=None,
+ representations=["exr", "dpx", "lutJson"]):
+ """
+ A short description.
+
+ A bit longer description.
+
+ Args:
+ regex_filter (raw string): regex pattern to filter out subsets
+ version (int): define a particular version, None gets last
+ representations (list):
+
+ Returns:
+ type: description
+
+ Raises:
+ Exception: description
+
+ """
+
+ if not self.to_script:
+ # save the script
+ self.save_script_as()
+
+ # create viewer and reset frame range
+ viewer = self.get_nodes(nodes_filter=["Viewer"])
+ if not viewer:
+ vn = nuke.createNode("Viewer")
+ vn["xpos"].setValue(self.xpos)
+ vn["ypos"].setValue(self.ypos)
+ else:
+ vn = viewer[-1]
+
+ # move position
+ self.position_up()
+
+ wn = self.write_create()
+ wn["xpos"].setValue(self.xpos)
+ wn["ypos"].setValue(self.ypos)
+ wn["render"].setValue(True)
+ vn.setInput(0, wn)
+
+ bdn = self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
+ color='0xcc1102ff', layer=-1,
+ nodes=[wn])
+
+ # move position
+ self.position_up(4)
+
+ # set frame range for new viewer
+ self.reset_frame_range_handles()
+
+ # get all available representations
+ subsets = pype.get_subsets(self._asset,
+ regex_filter=regex_filter,
+ version=version,
+ representations=representations)
+
+ nodes_backdrop = list()
+
+ for name, subset in subsets.items():
+ if "lut" in name:
+ continue
+ log.info("Building Loader to: `{}`".format(name))
+ version = subset["version"]
+ log.info("Version to: `{}`".format(version["name"]))
+ representations = subset["representaions"]
+ for repr in representations:
+ rn = self.read_loader(repr)
+ rn["xpos"].setValue(self.xpos)
+ rn["ypos"].setValue(self.ypos)
+ wn.setInput(0, rn)
+
+ # get editional nodes
+ lut_subset = [s for n, s in subsets.items()
+ if "lut{}".format(name.lower()) in n.lower()]
+ log.debug(">> lut_subset: `{}`".format(lut_subset))
+
+ if len(lut_subset) > 0:
+ lsub = lut_subset[0]
+ fxn = self.effect_loader(lsub["representaions"][-1])
+ fxn_ypos = fxn["ypos"].value()
+ fxn["ypos"].setValue(fxn_ypos - 100)
+ nodes_backdrop.append(fxn)
+
+ nodes_backdrop.append(rn)
+ # move position
+ self.position_right()
+
+ bdn = self.create_backdrop(label="Loaded Reads",
+ color='0x2d7702ff', layer=-1,
+ nodes=nodes_backdrop)
+
+ def read_loader(self, representation):
+ """
+ Gets Loader plugin for image sequence or mov
+
+ Arguments:
+ representation (dict): avalon db entity
+
+ """
+ context = representation["context"]
+
+ loader_name = "LoadSequence"
+ if "mov" in context["representation"]:
+ loader_name = "LoadMov"
+
+ loader_plugin = None
+ for Loader in api.discover(api.Loader):
+ if Loader.__name__ != loader_name:
+ continue
+
+ loader_plugin = Loader
+
+ return api.load(Loader=loader_plugin,
+ representation=representation["_id"])
+
+ def effect_loader(self, representation):
+ """
+ Gets Loader plugin for effects
+
+ Arguments:
+ representation (dict): avalon db entity
+
+ """
+ context = representation["context"]
+
+ loader_name = "LoadLuts"
+
+ loader_plugin = None
+ for Loader in api.discover(api.Loader):
+ if Loader.__name__ != loader_name:
+ continue
+
+ loader_plugin = Loader
+
+ return api.load(Loader=loader_plugin,
+ representation=representation["_id"])
+
+ def write_create(self):
+ """
+ Create render write
+
+ Arguments:
+ representation (dict): avalon db entity
+
+ """
+
+ Create_name = "CreateWriteRender"
+
+ creator_plugin = None
+ for Creator in api.discover(api.Creator):
+ if Creator.__name__ != Create_name:
+ continue
+
+ creator_plugin = Creator
+
+ # return api.create()
+ return creator_plugin("render_writeMain", self._asset).process()
+
+ def create_backdrop(self, label="", color=None, layer=0,
+ nodes=None):
+ """
+ Create Backdrop node
+
+ Arguments:
+ color (str): nuke compatible string with color code
+ layer (int): layer of node usually used (self.pos_layer - 1)
+ label (str): the message
+ nodes (list): list of nodes to be wrapped into backdrop
+
+ """
+ assert isinstance(nodes, list), "`nodes` should be a list of nodes"
+ layer = self.pos_layer + layer
+
+ create_backdrop(label=label, color=color, layer=layer, nodes=nodes)
+
+ def position_reset(self, xpos=0, ypos=0):
+ self.xpos = xpos
+ self.ypos = ypos
+
+ def position_right(self, multiply=1):
+ self.xpos += (self.xpos_size * multiply) + self.xpos_gap
+
+ def position_left(self, multiply=1):
+ self.xpos -= (self.xpos_size * multiply) + self.xpos_gap
+
+ def position_down(self, multiply=1):
+ self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
+
+ def position_up(self, multiply=1):
+ self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
diff --git a/pype/nuke/menu.py b/pype/nuke/menu.py
index 4f5410f8fd..56111674a8 100644
--- a/pype/nuke/menu.py
+++ b/pype/nuke/menu.py
@@ -9,7 +9,7 @@ log = Logger().get_logger(__name__, "nuke")
def install():
menubar = nuke.menu("Nuke")
menu = menubar.findItem(Session["AVALON_LABEL"])
-
+ workfile_settings = lib.WorkfileSettings()
# replace reset resolution from avalon core to pype's
name = "Reset Resolution"
new_name = "Set Resolution"
@@ -20,7 +20,7 @@ def install():
log.debug("Changing Item: {}".format(rm_item))
# rm_item[1].setEnabled(False)
menu.removeItem(rm_item[1].name())
- menu.addCommand(new_name, lib.reset_resolution, index=(rm_item[0]))
+ menu.addCommand(new_name, workfile_settings.reset_resolution, index=(rm_item[0]))
# replace reset frame range from avalon core to pype's
name = "Reset Frame Range"
@@ -31,20 +31,28 @@ def install():
log.debug("Changing Item: {}".format(rm_item))
# rm_item[1].setEnabled(False)
menu.removeItem(rm_item[1].name())
- menu.addCommand(new_name, lib.reset_frame_range_handles, index=(rm_item[0]))
+ menu.addCommand(new_name, workfile_settings.reset_frame_range_handles, index=(rm_item[0]))
# add colorspace menu item
name = "Set colorspace"
menu.addCommand(
- name, lib.set_colorspace,
+ name, workfile_settings.set_colorspace,
index=(rm_item[0]+2)
)
log.debug("Adding menu item: {}".format(name))
+ # add workfile builder menu item
+ name = "Build First Workfile.."
+ menu.addCommand(
+ name, lib.BuildWorkfile().process,
+ index=(rm_item[0]+7)
+ )
+ log.debug("Adding menu item: {}".format(name))
+
# add item that applies all setting above
name = "Apply all settings"
menu.addCommand(
- name, lib.set_context_settings, index=(rm_item[0]+3)
+ name, workfile_settings.set_context_settings, index=(rm_item[0]+3)
)
log.debug("Adding menu item: {}".format(name))
diff --git a/pype/nukestudio/__init__.py b/pype/nukestudio/__init__.py
index c900848a93..9283e732af 100644
--- a/pype/nukestudio/__init__.py
+++ b/pype/nukestudio/__init__.py
@@ -1,7 +1,6 @@
import os
from pypeapp import Logger
import hiero
-from avalon.tools import workfiles
from avalon import api as avalon
from pyblish import api as pyblish
diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py
index 6674e8a3aa..81b48f294d 100644
--- a/pype/nukestudio/lib.py
+++ b/pype/nukestudio/lib.py
@@ -189,7 +189,7 @@ def add_submission():
class PublishAction(QtWidgets.QAction):
"""
- Action with is showing as menu item
+ Action with is showing as menu item
"""
def __init__(self):
@@ -287,3 +287,59 @@ def _show_no_gui():
messagebox.setStandardButtons(messagebox.Ok)
messagebox.exec_()
+
+
+def CreateNukeWorkfile(nodes=None,
+ nodes_effects=None,
+ to_timeline=False,
+ **kwargs):
+ ''' Creating nuke workfile with particular version with given nodes
+ Also it is creating timeline track items as precomps.
+
+ Arguments:
+ nodes(list of dict): each key in dict is knob order is important
+ to_timeline(type): will build trackItem with metadata
+
+ Returns:
+ bool: True if done
+
+ Raises:
+ Exception: with traceback
+
+ '''
+ import hiero.core
+ from avalon.nuke import imprint
+ from pype.nuke import (
+ lib as nklib
+ )
+
+ # check if the file exists if does then Raise "File exists!"
+ if os.path.exists(filepath):
+ raise FileExistsError("File already exists: `{}`".format(filepath))
+
+ # if no representations matching then
+ # Raise "no representations to be build"
+ if len(representations) == 0:
+ raise AttributeError("Missing list of `representations`")
+
+ # check nodes input
+ if len(nodes) == 0:
+ log.warning("Missing list of `nodes`")
+
+ # create temp nk file
+ nuke_script = hiero.core.nuke.ScriptWriter()
+
+ # create root node and save all metadata
+ root_node = hiero.core.nuke.RootNode()
+
+ root_path = os.environ["AVALON_PROJECTS"]
+
+ nuke_script.addNode(root_node)
+
+ # here to call pype.nuke.lib.BuildWorkfile
+ script_builder = nklib.BuildWorkfile(
+ root_node=root_node,
+ root_path=root_path,
+ nodes=nuke_script.getNodes(),
+ **kwargs
+ )
diff --git a/pype/nukestudio/workio.py b/pype/nukestudio/workio.py
index 7fbd85a708..eadd8322cd 100644
--- a/pype/nukestudio/workio.py
+++ b/pype/nukestudio/workio.py
@@ -1,19 +1,22 @@
-"""Host API required Work Files tool"""
import os
+
import hiero
+from avalon import api
+
def file_extensions():
return [".hrox"]
def has_unsaved_changes():
- return hiero.core.projects()[-1]
+ # There are no methods for querying unsaved changes to a project, so
+ # enforcing to always save.
+ return True
def save(filepath):
project = hiero.core.projects()[-1]
-
if project:
project.saveAs(filepath)
else:
@@ -22,40 +25,20 @@ def save(filepath):
def open(filepath):
- try:
- hiero.core.openProject(filepath)
- return True
- except Exception as e:
- try:
- from PySide.QtGui import *
- from PySide.QtCore import *
- except:
- from PySide2.QtGui import *
- from PySide2.QtWidgets import *
- from PySide2.QtCore import *
-
- prompt = "Cannot open the selected file: `{}`".format(e)
- hiero.core.log.error(prompt)
- dialog = QMessageBox.critical(
- hiero.ui.mainWindow(), "Error", unicode(prompt))
+ hiero.core.openProject(filepath)
+ return True
def current_file():
- import os
- import hiero
-
current_file = hiero.core.projects()[-1].path()
normalised = os.path.normpath(current_file)
# Unsaved current file
- if normalised is '':
- return "NOT SAVED"
+ if normalised == "":
+ return None
return normalised
-
def work_root():
- from avalon import api
-
return os.path.normpath(api.Session["AVALON_WORKDIR"]).replace("\\", "/")
diff --git a/pype/plugin.py b/pype/plugin.py
index c77b9927e1..a3460e693e 100644
--- a/pype/plugin.py
+++ b/pype/plugin.py
@@ -2,13 +2,54 @@ import tempfile
import os
import pyblish.api
+from pypeapp import config
+import inspect
+
ValidatePipelineOrder = pyblish.api.ValidatorOrder + 0.05
ValidateContentsOrder = pyblish.api.ValidatorOrder + 0.1
ValidateSceneOrder = pyblish.api.ValidatorOrder + 0.2
ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3
-class Extractor(pyblish.api.InstancePlugin):
+def imprint_attributes(plugin):
+ """
+ Load presets by class and set them as attributes (if found)
+
+ :param plugin: plugin instance
+ :type plugin: instance
+ """
+ file = inspect.getfile(plugin.__class__)
+ file = os.path.normpath(file)
+ plugin_kind = file.split(os.path.sep)[-2:-1][0]
+ plugin_host = file.split(os.path.sep)[-3:-2][0]
+ plugin_name = type(plugin).__name__
+ try:
+ config_data = config.get_presets()['plugins'][plugin_host][plugin_kind][plugin_name] # noqa: E501
+ except KeyError:
+ print("preset not found")
+ return
+
+ for option, value in config_data.items():
+ if option == "enabled" and value is False:
+ setattr(plugin, "active", False)
+ else:
+ setattr(plugin, option, value)
+ print("setting {}: {} on {}".format(option, value, plugin_name))
+
+
+class ContextPlugin(pyblish.api.ContextPlugin):
+ def process(cls, *args, **kwargs):
+ imprint_attributes(cls)
+ super(ContextPlugin, cls).process(cls, *args, **kwargs)
+
+
+class InstancePlugin(pyblish.api.InstancePlugin):
+ def process(cls, *args, **kwargs):
+ imprint_attributes(cls)
+ super(ContextPlugin, cls).process(cls, *args, **kwargs)
+
+
+class Extractor(InstancePlugin):
"""Extractor base class.
The extractor base class implements a "staging_dir" function used to
diff --git a/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py b/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py
index 5f0516c593..25c641c168 100644
--- a/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py
+++ b/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py
@@ -106,11 +106,11 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
tasks_to_create = []
for child in entity['children']:
if child.entity_type.lower() == 'task':
- existing_tasks.append(child['name'])
+ existing_tasks.append(child['name'].lower())
# existing_tasks.append(child['type']['name'])
for task in tasks:
- if task in existing_tasks:
+ if task.lower() in existing_tasks:
print("Task {} already exists".format(task))
continue
tasks_to_create.append(task)
diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py
index 1ffda4fdfa..b1569aaa45 100644
--- a/pype/plugins/global/publish/extract_burnin.py
+++ b/pype/plugins/global/publish/extract_burnin.py
@@ -94,7 +94,8 @@ class ExtractBurnin(pype.api.Extractor):
args = [executable, scriptpath, json_data]
self.log.debug("Executing: {}".format(args))
- pype.api.subprocess(args)
+ output = pype.api.subprocess(args)
+ self.log.debug("Output: {}".format(output))
repre_update = {
"files": movieFileBurnin,
diff --git a/pype/plugins/global/publish/integrate_hierarchy_avalon.py b/pype/plugins/global/publish/extract_hierarchy_avalon.py
similarity index 95%
rename from pype/plugins/global/publish/integrate_hierarchy_avalon.py
rename to pype/plugins/global/publish/extract_hierarchy_avalon.py
index c01cb2d26a..778263f29a 100644
--- a/pype/plugins/global/publish/integrate_hierarchy_avalon.py
+++ b/pype/plugins/global/publish/extract_hierarchy_avalon.py
@@ -2,11 +2,11 @@ import pyblish.api
from avalon import io
-class IntegrateHierarchyToAvalon(pyblish.api.ContextPlugin):
+class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
"""Create entities in Avalon based on collected data."""
- order = pyblish.api.IntegratorOrder - 0.1
- label = "Integrate Hierarchy To Avalon"
+ order = pyblish.api.ExtractorOrder - 0.01
+ label = "Extract Hierarchy To Avalon"
families = ["clip", "shot"]
def process(self, context):
diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py
index bf53fa87d4..7e67ef7bab 100644
--- a/pype/plugins/global/publish/extract_review.py
+++ b/pype/plugins/global/publish/extract_review.py
@@ -170,8 +170,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
subprcs_cmd = " ".join(mov_args)
# run subprocess
- self.log.debug("{}".format(subprcs_cmd))
- pype.api.subprocess(subprcs_cmd)
+ self.log.debug("Executing: {}".format(subprcs_cmd))
+ output = pype.api.subprocess(subprcs_cmd)
+ self.log.debug("Output: {}".format(output))
# create representation data
repre_new.update({
diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py
index c416cf3fc7..e5d8007d70 100644
--- a/pype/plugins/global/publish/integrate_new.py
+++ b/pype/plugins/global/publish/integrate_new.py
@@ -63,6 +63,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"rig",
"plate",
"look",
+ "lut",
"audio"
]
exclude_families = ["clip"]
diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py
index 19989d7d8f..8d352b8872 100644
--- a/pype/plugins/global/publish/submit_publish_job.py
+++ b/pype/plugins/global/publish/submit_publish_job.py
@@ -231,28 +231,24 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"""
# Get a submission job
data = instance.data.copy()
- job = instance.data.get("deadlineSubmissionJob")
+ render_job = data.pop("deadlineSubmissionJob")
submission_type = "deadline"
- if not job:
+ if not render_job:
# No deadline job. Try Muster: musterSubmissionJob
- job = data.pop("musterSubmissionJob")
+ render_job = data.pop("musterSubmissionJob")
submission_type = "muster"
- if not job:
+ if not render_job:
raise RuntimeError("Can't continue without valid Deadline "
"or Muster submission prior to this "
"plug-in.")
if submission_type == "deadline":
- render_job = data.pop("deadlineSubmissionJob")
self.DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
"http://localhost:8082")
assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
- self._submit_deadline_post_job(instance, job)
-
- if submission_type == "muster":
- render_job = data.pop("musterSubmissionJob")
+ self._submit_deadline_post_job(instance, render_job)
asset = data.get("asset") or api.Session["AVALON_ASSET"]
subset = data["subset"]
diff --git a/pype/plugins/maya/publish/submit_maya_muster.py b/pype/plugins/maya/publish/submit_maya_muster.py
index 0099cbcaeb..84ad890de1 100644
--- a/pype/plugins/maya/publish/submit_maya_muster.py
+++ b/pype/plugins/maya/publish/submit_maya_muster.py
@@ -7,15 +7,30 @@ import pyblish.api
import pype.maya.lib as lib
import appdirs
import platform
+from pypeapp import config
-# mapping between Maya rendere names and Muster template names
-muster_maya_mapping = {
- "arnold": "Maya Arnold",
- "mentalray": "Maya Mr",
- "renderman": "Maya Renderman",
- "redshift": "Maya Redshift"
-}
+# mapping between Maya renderer names and Muster template ids
+def _get_template_id(renderer):
+ """
+ Return muster template ID based on renderer name.
+
+ :param renderer: renderer name
+ :type renderer: str
+ :returns: muster template id
+ :rtype: int
+ """
+
+ templates = config.get_presets()["muster"]["templates_mapping"]
+ if not templates:
+ raise RuntimeError(("Muster template mapping missing in pype-config "
+ "`presets/muster/templates_mapping.json`"))
+ try:
+ template_id = templates[renderer]
+ except KeyError:
+ raise RuntimeError("Unmapped renderer - missing template id")
+
+ return template_id
def _get_script():
@@ -213,12 +228,10 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
:rtype: int
:raises: Exception if template ID isn't found
"""
- try:
- self.log.info("Trying to find template for [{}]".format(renderer))
- mapped = muster_maya_mapping.get(renderer)
- return self._templates.get(mapped)
- except ValueError:
- raise Exception('Unimplemented renderer {}'.format(renderer))
+ self.log.info("Trying to find template for [{}]".format(renderer))
+ mapped = _get_template_id(renderer)
+ self.log.info("got id [{}]".format(mapped))
+ return self._templates.get(mapped)
def _submit(self, payload):
"""
@@ -253,15 +266,15 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")
if self.MUSTER_REST_URL is None:
- self.log.debug(
+ self.log.error(
"\"MUSTER_REST_URL\" is not found. Skipping "
- "\"{}\".".format(instance)
+ "[{}]".format(instance)
)
- return
+ raise RuntimeError("MUSTER_REST_URL not set")
self._load_credentials()
self._authenticate()
- self._get_templates()
+ # self._get_templates()
context = instance.context
workspace = context.data["workspaceDir"]
@@ -349,7 +362,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
"platform": 0,
"job": {
"jobName": jobname,
- "templateId": self._resolve_template(
+ "templateId": self._get_template_id(
instance.data["renderer"]),
"chunksInterleave": 2,
"chunksPriority": "0",
diff --git a/pype/plugins/maya/publish/validate_attributes.py b/pype/plugins/maya/publish/validate_attributes.py
index 654df1ce72..6ecebfa107 100644
--- a/pype/plugins/maya/publish/validate_attributes.py
+++ b/pype/plugins/maya/publish/validate_attributes.py
@@ -20,6 +20,7 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
label = "Attributes"
hosts = ["maya"]
actions = [pype.api.RepairContextAction]
+ optional = True
def process(self, context):
# Check for preset existence.
@@ -74,8 +75,12 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
presets_to_validate = attributes[name]
for attribute in node.listAttr():
- if attribute.attrName() in presets_to_validate:
- expected = presets_to_validate[attribute.attrName()]
+ names = [attribute.shortName(), attribute.longName()]
+ attribute_name = list(
+ set(names) & set(presets_to_validate.keys())
+ )
+ if attribute_name:
+ expected = presets_to_validate[attribute_name[0]]
if attribute.get() != expected:
invalid_attributes.append(
{
diff --git a/pype/plugins/maya/publish/validate_look_sets.py b/pype/plugins/maya/publish/validate_look_sets.py
index cfa499c763..ebc39bd3ba 100644
--- a/pype/plugins/maya/publish/validate_look_sets.py
+++ b/pype/plugins/maya/publish/validate_look_sets.py
@@ -75,11 +75,11 @@ class ValidateLookSets(pyblish.api.InstancePlugin):
if missing_sets:
for set in missing_sets:
if '_SET' not in set:
- # A set of this node is not coming along, this is wrong!
- cls.log.error("Missing sets '{}' for node "
- "'{}'".format(missing_sets, node))
- invalid.append(node)
- continue
+ # A set of this node is not coming along, this is wrong!
+ cls.log.error("Missing sets '{}' for node "
+ "'{}'".format(missing_sets, node))
+ invalid.append(node)
+ continue
# Ensure the node is in the sets that are collected
for shaderset, data in relationships.items():
diff --git a/pype/plugins/maya/publish/validate_scene_set_workspace.py b/pype/plugins/maya/publish/validate_scene_set_workspace.py
index 778c7eae86..bda397cf2a 100644
--- a/pype/plugins/maya/publish/validate_scene_set_workspace.py
+++ b/pype/plugins/maya/publish/validate_scene_set_workspace.py
@@ -12,7 +12,7 @@ def is_subdir(path, root_dir):
root_dir = os.path.realpath(root_dir)
# If not on same drive
- if os.path.splitdrive(path)[0] != os.path.splitdrive(root_dir)[0]:
+ if os.path.splitdrive(path)[0].lower() != os.path.splitdrive(root_dir)[0].lower(): # noqa: E501
return False
# Get 'relative path' (can contain ../ which means going up)
diff --git a/pype/plugins/nuke/create/create_write.py b/pype/plugins/nuke/create/create_write.py
index 03107238b5..c3da555259 100644
--- a/pype/plugins/nuke/create/create_write.py
+++ b/pype/plugins/nuke/create/create_write.py
@@ -69,9 +69,7 @@ class CreateWriteRender(avalon.nuke.Creator):
write_data.update({
"fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}"})
- create_write_node(self.data["subset"], write_data)
-
- return
+ return create_write_node(self.data["subset"], write_data)
class CreateWritePrerender(avalon.nuke.Creator):
diff --git a/pype/plugins/nuke/load/load_luts.py b/pype/plugins/nuke/load/load_luts.py
new file mode 100644
index 0000000000..7e1302fffe
--- /dev/null
+++ b/pype/plugins/nuke/load/load_luts.py
@@ -0,0 +1,317 @@
+from avalon import api, style, io
+import nuke
+import json
+from collections import OrderedDict
+
+
+class LoadLuts(api.Loader):
+ """Loading colorspace soft effect exported from nukestudio"""
+
+ representations = ["lutJson"]
+ families = ["lut"]
+
+ label = "Load Luts - nodes"
+ order = 0
+ icon = "cc"
+ color = style.colors.light
+
+ def load(self, context, name, namespace, data):
+ """
+ Loading function to get the soft effects to particular read node
+
+ Arguments:
+ context (dict): context of version
+ name (str): name of the version
+ namespace (str): asset name
+ data (dict): compulsory attribute > not used
+
+ Returns:
+ nuke node: containerised nuke node object
+ """
+ # import dependencies
+ from avalon.nuke import containerise
+
+ # get main variables
+ version = context['version']
+ version_data = version.get("data", {})
+ vname = version.get("name", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
+ workfile_first_frame = int(nuke.root()["first_frame"].getValue())
+ namespace = namespace or context['asset']['name']
+ colorspace = version_data.get("colorspace", None)
+ object_name = "{}_{}".format(name, namespace)
+
+ # prepare data for imprinting
+ # add additional metadata from the version to imprint to Avalon knob
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
+
+ data_imprint = {"frameStart": first,
+ "frameEnd": last,
+ "version": vname,
+ "colorspaceInput": colorspace,
+ "objectName": object_name}
+
+ for k in add_keys:
+ data_imprint.update({k: version_data[k]})
+
+ # getting file path
+ file = self.fname.replace("\\", "/")
+
+ # getting data from json file with unicode conversion
+ with open(file, "r") as f:
+ json_f = {self.byteify(key): self.byteify(value)
+ for key, value in json.load(f).iteritems()}
+
+ # get correct order of nodes by positions on track and subtrack
+ nodes_order = self.reorder_nodes(json_f["effects"])
+
+ # adding nodes to node graph
+ # just in case we are in group lets jump out of it
+ nuke.endGroup()
+
+ GN = nuke.createNode("Group")
+
+ GN["name"].setValue(object_name)
+
+ # adding content to the group node
+ with GN:
+ pre_node = nuke.createNode("Input")
+ pre_node["name"].setValue("rgb")
+
+ for ef_name, ef_val in nodes_order.items():
+ node = nuke.createNode(ef_val["class"])
+ for k, v in ef_val["node"].items():
+ if isinstance(v, list) and len(v) > 4:
+ node[k].setAnimated()
+ for i, value in enumerate(v):
+ if isinstance(value, list):
+ for ci, cv in enumerate(value):
+ node[k].setValueAt(
+ cv,
+ (workfile_first_frame + i),
+ ci)
+ else:
+ node[k].setValueAt(
+ value,
+ (workfile_first_frame + i))
+ else:
+ node[k].setValue(v)
+ node.setInput(0, pre_node)
+ pre_node = node
+
+ output = nuke.createNode("Output")
+ output.setInput(0, pre_node)
+
+ # try to find parent read node
+ self.connect_read_node(GN, namespace, json_f["assignTo"])
+
+ GN["tile_color"].setValue(int("0x3469ffff", 16))
+
+ self.log.info("Loaded lut setup: `{}`".format(GN["name"].value()))
+
+ return containerise(
+ node=GN,
+ name=name,
+ namespace=namespace,
+ context=context,
+ loader=self.__class__.__name__,
+ data=data_imprint)
+
+ def update(self, container, representation):
+ """Update the Loader's path
+
+ Nuke automatically tries to reset some variables when changing
+ the loader's path to a new file. These automatic changes are to its
+ inputs:
+
+ """
+
+ from avalon.nuke import (
+ update_container
+ )
+ # get main variables
+ # Get version from io
+ version = io.find_one({
+ "type": "version",
+ "_id": representation["parent"]
+ })
+ # get corresponding node
+ GN = nuke.toNode(container['objectName'])
+
+ file = api.get_representation_path(representation).replace("\\", "/")
+ name = container['name']
+ version_data = version.get("data", {})
+ vname = version.get("name", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
+ workfile_first_frame = int(nuke.root()["first_frame"].getValue())
+ namespace = container['namespace']
+ colorspace = version_data.get("colorspace", None)
+ object_name = "{}_{}".format(name, namespace)
+
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
+
+ data_imprint = {"representation": str(representation["_id"]),
+ "frameStart": first,
+ "frameEnd": last,
+ "version": vname,
+ "colorspaceInput": colorspace,
+ "objectName": object_name}
+
+ for k in add_keys:
+ data_imprint.update({k: version_data[k]})
+
+ # Update the imprinted representation
+ update_container(
+ GN,
+ data_imprint
+ )
+
+ # getting data from json file with unicode conversion
+ with open(file, "r") as f:
+ json_f = {self.byteify(key): self.byteify(value)
+ for key, value in json.load(f).iteritems()}
+
+ # get correct order of nodes by positions on track and subtrack
+ nodes_order = self.reorder_nodes(json_f["effects"])
+
+ # adding nodes to node graph
+ # just in case we are in group lets jump out of it
+ nuke.endGroup()
+
+ # adding content to the group node
+ with GN:
+ # first remove all nodes
+ [nuke.delete(n) for n in nuke.allNodes()]
+
+ # create input node
+ pre_node = nuke.createNode("Input")
+ pre_node["name"].setValue("rgb")
+
+ for ef_name, ef_val in nodes_order.items():
+ node = nuke.createNode(ef_val["class"])
+ for k, v in ef_val["node"].items():
+ if isinstance(v, list) and len(v) > 3:
+ node[k].setAnimated()
+ for i, value in enumerate(v):
+ if isinstance(value, list):
+ for ci, cv in enumerate(value):
+ node[k].setValueAt(
+ cv,
+ (workfile_first_frame + i),
+ ci)
+ else:
+ node[k].setValueAt(
+ value,
+ (workfile_first_frame + i))
+ else:
+ node[k].setValue(v)
+ node.setInput(0, pre_node)
+ pre_node = node
+
+ # create output node
+ output = nuke.createNode("Output")
+ output.setInput(0, pre_node)
+
+ # try to find parent read node
+ self.connect_read_node(GN, namespace, json_f["assignTo"])
+
+ # get all versions in list
+ versions = io.find({
+ "type": "version",
+ "parent": version["parent"]
+ }).distinct('name')
+
+ max_version = max(versions)
+
+ # change color of node
+ if version.get("name") not in [max_version]:
+ GN["tile_color"].setValue(int("0xd84f20ff", 16))
+ else:
+ GN["tile_color"].setValue(int("0x3469ffff", 16))
+
+ self.log.info("udated to version: {}".format(version.get("name")))
+
+ def connect_read_node(self, group_node, asset, subset):
+ """
+ Finds read node and selects it
+
+ Arguments:
+ asset (str): asset name
+
+ Returns:
+ nuke node: node is selected
+ None: if nothing found
+ """
+ search_name = "{0}_{1}".format(asset, subset)
+ node = [n for n in nuke.allNodes() if search_name in n["name"].value()]
+ if len(node) > 0:
+ rn = node[0]
+ else:
+ rn = None
+
+ # Parent read node has been found
+ # solving connections
+ if rn:
+ dep_nodes = rn.dependent()
+
+ if len(dep_nodes) > 0:
+ for dn in dep_nodes:
+ dn.setInput(0, group_node)
+
+ group_node.setInput(0, rn)
+ group_node.autoplace()
+
+ def reorder_nodes(self, data):
+ new_order = OrderedDict()
+ trackNums = [v["trackIndex"] for k, v in data.items()]
+ subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
+
+ for trackIndex in range(
+ min(trackNums), max(trackNums) + 1):
+ for subTrackIndex in range(
+ min(subTrackNums), max(subTrackNums) + 1):
+ item = self.get_item(data, trackIndex, subTrackIndex)
+ if item is not {}:
+ new_order.update(item)
+ return new_order
+
+ def get_item(self, data, trackIndex, subTrackIndex):
+ return {key: val for key, val in data.items()
+ if subTrackIndex == val["subTrackIndex"]
+ if trackIndex == val["trackIndex"]}
+
+ def byteify(self, input):
+ """
+ Converts unicode strings to strings
+ It goes trought all dictionary
+
+ Arguments:
+ input (dict/str): input
+
+ Returns:
+ dict: with fixed values and keys
+
+ """
+
+ if isinstance(input, dict):
+ return {self.byteify(key): self.byteify(value)
+ for key, value in input.iteritems()}
+ elif isinstance(input, list):
+ return [self.byteify(element) for element in input]
+ elif isinstance(input, unicode):
+ return input.encode('utf-8')
+ else:
+ return input
+
+ def switch(self, container, representation):
+ self.update(container, representation)
+
+ def remove(self, container):
+ from avalon.nuke import viewer_update_and_undo_stop
+ node = nuke.toNode(container['objectName'])
+ with viewer_update_and_undo_stop():
+ nuke.delete(node)
diff --git a/pype/plugins/nuke/load/load_luts_ip.py b/pype/plugins/nuke/load/load_luts_ip.py
new file mode 100644
index 0000000000..b0a30d78e4
--- /dev/null
+++ b/pype/plugins/nuke/load/load_luts_ip.py
@@ -0,0 +1,330 @@
+from avalon import api, style, io
+import nuke
+import json
+from collections import OrderedDict
+from pype.nuke import lib
+
+class LoadLutsInputProcess(api.Loader):
+ """Loading colorspace soft effect exported from nukestudio"""
+
+ representations = ["lutJson"]
+ families = ["lut"]
+
+ label = "Load Luts - Input Process"
+ order = 0
+ icon = "eye"
+ color = style.colors.alert
+
+ def load(self, context, name, namespace, data):
+ """
+ Loading function to get the soft effects to particular read node
+
+ Arguments:
+ context (dict): context of version
+ name (str): name of the version
+ namespace (str): asset name
+ data (dict): compulsory attribute > not used
+
+ Returns:
+ nuke node: containerised nuke node object
+ """
+ # import dependencies
+ from avalon.nuke import containerise
+
+ # get main variables
+ version = context['version']
+ version_data = version.get("data", {})
+ vname = version.get("name", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
+ workfile_first_frame = int(nuke.root()["first_frame"].getValue())
+ namespace = namespace or context['asset']['name']
+ colorspace = version_data.get("colorspace", None)
+ object_name = "{}_{}".format(name, namespace)
+
+ # prepare data for imprinting
+ # add additional metadata from the version to imprint to Avalon knob
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
+
+ data_imprint = {"frameStart": first,
+ "frameEnd": last,
+ "version": vname,
+ "colorspaceInput": colorspace,
+ "objectName": object_name}
+
+ for k in add_keys:
+ data_imprint.update({k: version_data[k]})
+
+ # getting file path
+ file = self.fname.replace("\\", "/")
+
+ # getting data from json file with unicode conversion
+ with open(file, "r") as f:
+ json_f = {self.byteify(key): self.byteify(value)
+ for key, value in json.load(f).iteritems()}
+
+ # get correct order of nodes by positions on track and subtrack
+ nodes_order = self.reorder_nodes(json_f["effects"])
+
+ # adding nodes to node graph
+ # just in case we are in group lets jump out of it
+ nuke.endGroup()
+
+ GN = nuke.createNode("Group")
+
+ GN["name"].setValue(object_name)
+
+ # adding content to the group node
+ with GN:
+ pre_node = nuke.createNode("Input")
+ pre_node["name"].setValue("rgb")
+
+ for ef_name, ef_val in nodes_order.items():
+ node = nuke.createNode(ef_val["class"])
+ for k, v in ef_val["node"].items():
+ if isinstance(v, list) and len(v) > 4:
+ node[k].setAnimated()
+ for i, value in enumerate(v):
+ if isinstance(value, list):
+ for ci, cv in enumerate(value):
+ node[k].setValueAt(
+ cv,
+ (workfile_first_frame + i),
+ ci)
+ else:
+ node[k].setValueAt(
+ value,
+ (workfile_first_frame + i))
+ else:
+ node[k].setValue(v)
+ node.setInput(0, pre_node)
+ pre_node = node
+
+ output = nuke.createNode("Output")
+ output.setInput(0, pre_node)
+
+ # try to place it under Viewer1
+ if not self.connect_active_viewer(GN):
+ nuke.delete(GN)
+ return
+
+ GN["tile_color"].setValue(int("0x3469ffff", 16))
+
+ self.log.info("Loaded lut setup: `{}`".format(GN["name"].value()))
+
+ return containerise(
+ node=GN,
+ name=name,
+ namespace=namespace,
+ context=context,
+ loader=self.__class__.__name__,
+ data=data_imprint)
+
+ def update(self, container, representation):
+ """Update the Loader's path
+
+ Nuke automatically tries to reset some variables when changing
+ the loader's path to a new file. These automatic changes are to its
+ inputs:
+
+ """
+
+ from avalon.nuke import (
+ update_container
+ )
+ # get main variables
+ # Get version from io
+ version = io.find_one({
+ "type": "version",
+ "_id": representation["parent"]
+ })
+ # get corresponding node
+ GN = nuke.toNode(container['objectName'])
+
+ file = api.get_representation_path(representation).replace("\\", "/")
+ name = container['name']
+ version_data = version.get("data", {})
+ vname = version.get("name", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
+ workfile_first_frame = int(nuke.root()["first_frame"].getValue())
+ namespace = container['namespace']
+ colorspace = version_data.get("colorspace", None)
+ object_name = "{}_{}".format(name, namespace)
+
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
+
+ data_imprint = {"representation": str(representation["_id"]),
+ "frameStart": first,
+ "frameEnd": last,
+ "version": vname,
+ "colorspaceInput": colorspace,
+ "objectName": object_name}
+
+ for k in add_keys:
+ data_imprint.update({k: version_data[k]})
+
+ # Update the imprinted representation
+ update_container(
+ GN,
+ data_imprint
+ )
+
+ # getting data from json file with unicode conversion
+ with open(file, "r") as f:
+ json_f = {self.byteify(key): self.byteify(value)
+ for key, value in json.load(f).iteritems()}
+
+ # get correct order of nodes by positions on track and subtrack
+ nodes_order = self.reorder_nodes(json_f["effects"])
+
+ # adding nodes to node graph
+ # just in case we are in group lets jump out of it
+ nuke.endGroup()
+
+ # adding content to the group node
+ with GN:
+ # first remove all nodes
+ [nuke.delete(n) for n in nuke.allNodes()]
+
+ # create input node
+ pre_node = nuke.createNode("Input")
+ pre_node["name"].setValue("rgb")
+
+ for ef_name, ef_val in nodes_order.items():
+ node = nuke.createNode(ef_val["class"])
+ for k, v in ef_val["node"].items():
+ if isinstance(v, list) and len(v) > 3:
+ node[k].setAnimated()
+ for i, value in enumerate(v):
+ if isinstance(value, list):
+ for ci, cv in enumerate(value):
+ node[k].setValueAt(
+ cv,
+ (workfile_first_frame + i),
+ ci)
+ else:
+ node[k].setValueAt(
+ value,
+ (workfile_first_frame + i))
+ else:
+ node[k].setValue(v)
+ node.setInput(0, pre_node)
+ pre_node = node
+
+ # create output node
+ output = nuke.createNode("Output")
+ output.setInput(0, pre_node)
+
+ # try to place it under Viewer1
+ if not self.connect_active_viewer(GN):
+ nuke.delete(GN)
+ return
+
+ # get all versions in list
+ versions = io.find({
+ "type": "version",
+ "parent": version["parent"]
+ }).distinct('name')
+
+ max_version = max(versions)
+
+ # change color of node
+ if version.get("name") not in [max_version]:
+ GN["tile_color"].setValue(int("0xd84f20ff", 16))
+ else:
+ GN["tile_color"].setValue(int("0x3469ffff", 16))
+
+ self.log.info("udated to version: {}".format(version.get("name")))
+
+ def connect_active_viewer(self, group_node):
+ """
+ Finds Active viewer and
+ place the node under it, also adds
+ name of group into Input Process of the viewer
+
+ Arguments:
+ group_node (nuke node): nuke group node object
+
+ """
+ group_node_name = group_node["name"].value()
+
+ viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
+ if len(viewer) > 0:
+ viewer = viewer[0]
+ else:
+ self.log.error("Please create Viewer node before you run this action again")
+ return None
+
+ # get coordinates of Viewer1
+ xpos = viewer["xpos"].value()
+ ypos = viewer["ypos"].value()
+
+ ypos += 150
+
+ viewer["ypos"].setValue(ypos)
+
+ # set coordinates to group node
+ group_node["xpos"].setValue(xpos)
+ group_node["ypos"].setValue(ypos + 50)
+
+ # add group node name to Viewer Input Process
+ viewer["input_process_node"].setValue(group_node_name)
+
+ # put backdrop under
+ lib.create_backdrop(label="Input Process", layer=2, nodes=[viewer, group_node], color="0x7c7faaff")
+
+ return True
+
+ def reorder_nodes(self, data):
+ new_order = OrderedDict()
+ trackNums = [v["trackIndex"] for k, v in data.items()]
+ subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
+
+ for trackIndex in range(
+ min(trackNums), max(trackNums) + 1):
+ for subTrackIndex in range(
+ min(subTrackNums), max(subTrackNums) + 1):
+ item = self.get_item(data, trackIndex, subTrackIndex)
+ if item is not {}:
+ new_order.update(item)
+ return new_order
+
+ def get_item(self, data, trackIndex, subTrackIndex):
+ return {key: val for key, val in data.items()
+ if subTrackIndex == val["subTrackIndex"]
+ if trackIndex == val["trackIndex"]}
+
+ def byteify(self, input):
+ """
+ Converts unicode strings to strings
+ It goes trought all dictionary
+
+ Arguments:
+ input (dict/str): input
+
+ Returns:
+ dict: with fixed values and keys
+
+ """
+
+ if isinstance(input, dict):
+ return {self.byteify(key): self.byteify(value)
+ for key, value in input.iteritems()}
+ elif isinstance(input, list):
+ return [self.byteify(element) for element in input]
+ elif isinstance(input, unicode):
+ return input.encode('utf-8')
+ else:
+ return input
+
+ def switch(self, container, representation):
+ self.update(container, representation)
+
+ def remove(self, container):
+ from avalon.nuke import viewer_update_and_undo_stop
+ node = nuke.toNode(container['objectName'])
+ with viewer_update_and_undo_stop():
+ nuke.delete(node)
diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py
index 58aee315a6..e6daaaff8a 100644
--- a/pype/plugins/nuke/load/load_mov.py
+++ b/pype/plugins/nuke/load/load_mov.py
@@ -101,7 +101,8 @@ class LoadMov(api.Loader):
handles = version_data.get("handles", None)
handle_start = version_data.get("handleStart", None)
handle_end = version_data.get("handleEnd", None)
-
+ repr_cont = context["representation"]["context"]
+
# fix handle start and end if none are available
if not handle_start and not handle_end:
handle_start = handles
@@ -119,9 +120,11 @@ class LoadMov(api.Loader):
file = self.fname.replace("\\", "/")
log.info("file: {}\n".format(self.fname))
- read_name = "Read"
- read_name += '_' + context["representation"]["context"]["subset"]
- read_name += '_' + context["representation"]["name"]
+ read_name = "Read_{0}_{1}_{2}".format(
+ repr_cont["asset"],
+ repr_cont["subset"],
+ repr_cont["representation"])
+
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py
index 5fd43d3481..2946857e09 100644
--- a/pype/plugins/nuke/load/load_sequence.py
+++ b/pype/plugins/nuke/load/load_sequence.py
@@ -96,6 +96,8 @@ class LoadSequence(api.Loader):
self.first_frame = int(nuke.root()["first_frame"].getValue())
self.handle_start = version_data.get("handleStart", 0)
+ self.handle_start = version_data.get("handleStart", 0)
+ self.handle_end = version_data.get("handleEnd", 0)
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
@@ -104,10 +106,17 @@ class LoadSequence(api.Loader):
if namespace is None:
namespace = context['asset']['name']
+ first -= self.handle_start
+ last += self.handle_end
+
file = self.fname.replace("\\", "/")
log.info("file: {}\n".format(self.fname))
- read_name = "Read_" + context["representation"]["context"]["subset"]
+ repr_cont = context["representation"]["context"]
+ read_name = "Read_{0}_{1}_{2}".format(
+ repr_cont["asset"],
+ repr_cont["subset"],
+ repr_cont["representation"])
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
@@ -227,7 +236,8 @@ class LoadSequence(api.Loader):
self.first_frame = int(nuke.root()["first_frame"].getValue())
self.handle_start = version_data.get("handleStart", 0)
-
+ self.handle_end = version_data.get("handleEnd", 0)
+
first = version_data.get("frameStart", None)
last = version_data.get("frameEnd", None)
@@ -237,6 +247,9 @@ class LoadSequence(api.Loader):
"{} ({})".format(node['name'].value(), representation))
first = 0
+ first -= self.handle_start
+ last += self.handle_end
+
# Update the loader's path whilst preserving some values
with preserve_trim(node):
node["file"].setValue(file["path"])
diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py
index 7104e3bd05..29ae6cb929 100644
--- a/pype/plugins/nuke/publish/collect_writes.py
+++ b/pype/plugins/nuke/publish/collect_writes.py
@@ -101,6 +101,11 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"fps": instance.context.data["fps"]
}
+ group_node = [x for x in instance if x.Class() == "Group"][0]
+ deadlineChunkSize = 1
+ if "deadlineChunkSize" in group_node.knobs():
+ deadlineChunkSize = group_node["deadlineChunkSize"].value()
+
instance.data.update({
"versionData": version_data,
"path": path,
@@ -112,6 +117,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
"frameEnd": last_frame,
"outputType": output_type,
"colorspace": node["colorspace"].value(),
+ "deadlineChunkSize": deadlineChunkSize
})
self.log.debug("instance.data: {}".format(instance.data))
diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py
index 0017de3ec4..ef971f3a37 100644
--- a/pype/plugins/nuke/publish/submit_nuke_deadline.py
+++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py
@@ -84,6 +84,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
start=int(instance.data["frameStart"]),
end=int(instance.data["frameEnd"])
),
+ "ChunkSize": instance.data["deadlineChunkSize"],
"Comment": comment,
diff --git a/pype/plugins/nuke/publish/validate_write_deadline_tab.py b/pype/plugins/nuke/publish/validate_write_deadline_tab.py
new file mode 100644
index 0000000000..0c222a164a
--- /dev/null
+++ b/pype/plugins/nuke/publish/validate_write_deadline_tab.py
@@ -0,0 +1,42 @@
+import pyblish.api
+import pype.nuke.lib
+
+
+class RepairNukeWriteDeadlineTab(pyblish.api.Action):
+
+ label = "Repair"
+ icon = "wrench"
+ on = "failed"
+
+ def process(self, context, plugin):
+
+ # Get the errored instances
+ failed = []
+ for result in context.data["results"]:
+ if (result["error"] is not None and result["instance"] is not None
+ and result["instance"] not in failed):
+ failed.append(result["instance"])
+
+ # Apply pyblish.logic to get the instances for the plug-in
+ instances = pyblish.api.instances_by_plugin(failed, plugin)
+
+ for instance in instances:
+ group_node = [x for x in instance if x.Class() == "Group"][0]
+ pype.nuke.lib.add_deadline_tab(group_node)
+
+
+class ValidateNukeWriteDeadlineTab(pyblish.api.InstancePlugin):
+ """Ensure Deadline tab is present and current."""
+
+ order = pyblish.api.ValidatorOrder
+ label = "Deadline Tab"
+ hosts = ["nuke"]
+ optional = True
+ families = ["write"]
+ actions = [RepairNukeWriteDeadlineTab]
+
+ def process(self, instance):
+ group_node = [x for x in instance if x.Class() == "Group"][0]
+
+ msg = "Deadline tab missing on \"{}\"".format(group_node.name())
+ assert "Deadline" in group_node.knobs(), msg
diff --git a/pype/plugins/nukestudio/publish/collect_metadata.py b/pype/plugins/nukestudio/_unused/collect_metadata.py
similarity index 100%
rename from pype/plugins/nukestudio/publish/collect_metadata.py
rename to pype/plugins/nukestudio/_unused/collect_metadata.py
diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py
index da71e2ab50..7a400909fd 100644
--- a/pype/plugins/nukestudio/publish/collect_clips.py
+++ b/pype/plugins/nukestudio/publish/collect_clips.py
@@ -20,83 +20,114 @@ class CollectClips(api.ContextPlugin):
projectdata = context.data["projectData"]
version = context.data.get("version", "001")
- instances_data = []
- for item in context.data.get("selection", []):
- # Skip audio track items
- # Try/Except is to handle items types, like EffectTrackItem
- try:
- media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
- if str(item.mediaType()) != media_type:
+ sequence = context.data.get("activeSequence")
+ selection = context.data.get("selection")
+
+ track_effects = dict()
+
+ # collect all trackItems as instances
+ for track_index, video_track in enumerate(sequence.videoTracks()):
+ items = video_track.items()
+ sub_items = video_track.subTrackItems()
+
+ for item in items:
+ # compare with selection or if disabled
+ if item not in selection or not item.isEnabled():
continue
- except:
+
+ # Skip audio track items
+ # Try/Except is to handle items types, like EffectTrackItem
+ try:
+ media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
+ if str(item.mediaType()) != media_type:
+ continue
+ except Exception:
+ continue
+
+ asset = item.name()
+ track = item.parent()
+ source = item.source().mediaSource()
+ source_path = source.firstpath()
+ effects = [f for f in item.linkedItems() if f.isEnabled()]
+
+ # If source is *.nk its a comp effect and we need to fetch the
+ # write node output. This should be improved by parsing the script
+ # rather than opening it.
+ if source_path.endswith(".nk"):
+ nuke.scriptOpen(source_path)
+ # There should noly be one.
+ write_node = nuke.allNodes(filter="Write")[0]
+ path = nuke.filename(write_node)
+
+ if "%" in path:
+ # Get start frame from Nuke script and use the item source
+ # in/out, because you can have multiple shots covered with
+ # one nuke script.
+ start_frame = int(nuke.root()["first_frame"].getValue())
+ if write_node["use_limit"].getValue():
+ start_frame = int(write_node["first"].getValue())
+
+ path = path % (start_frame + item.sourceIn())
+
+ source_path = path
+ self.log.debug(
+ "Fetched source path \"{}\" from \"{}\" in "
+ "\"{}\".".format(
+ source_path, write_node.name(), source.firstpath()
+ )
+ )
+
+ try:
+ head, padding, ext = os.path.basename(source_path).split(".")
+ source_first_frame = int(padding)
+ except Exception:
+ source_first_frame = 0
+
+ data = {"name": "{0}_{1}".format(track.name(), item.name()),
+ "item": item,
+ "source": source,
+ "sourcePath": source_path,
+ "track": track.name(),
+ "trackIndex": track_index,
+ "sourceFirst": source_first_frame,
+ "effects": effects,
+ "sourceIn": int(item.sourceIn()),
+ "sourceOut": int(item.sourceOut()),
+ "clipIn": int(item.timelineIn()),
+ "clipOut": int(item.timelineOut()),
+ "asset": asset,
+ "family": "clip",
+ "families": [],
+ "handles": 0,
+ "handleStart": projectdata.get("handles", 0),
+ "handleEnd": projectdata.get("handles", 0),
+ "version": int(version)}
+
+ instance = context.create_instance(**data)
+
+ self.log.info("Created instance: {}".format(instance))
+ self.log.debug(">> effects: {}".format(instance.data["effects"]))
+
+ context.data["assetsShared"][asset] = dict()
+
+ # from now we are collecting only subtrackitems on
+ # track with no video items
+ if len(items) > 0:
continue
- track = item.parent()
- source = item.source().mediaSource()
- source_path = source.firstpath()
+ # create list in track key
+ # get all subTrackItems and add it to context
+ track_effects[track_index] = list()
- # If source is *.nk its a comp effect and we need to fetch the
- # write node output. This should be improved by parsing the script
- # rather than opening it.
- if source_path.endswith(".nk"):
- nuke.scriptOpen(source_path)
- # There should noly be one.
- write_node = nuke.allNodes(filter="Write")[0]
- path = nuke.filename(write_node)
+ # collect all subtrack items
+ for sitem in sub_items:
+ # unwrap from tuple >> it is always tuple with one item
+ sitem = sitem[0]
+ # checking if not enabled
+ if not sitem.isEnabled():
+ continue
- if "%" in path:
- # Get start frame from Nuke script and use the item source
- # in/out, because you can have multiple shots covered with
- # one nuke script.
- start_frame = int(nuke.root()["first_frame"].getValue())
- if write_node["use_limit"].getValue():
- start_frame = int(write_node["first"].getValue())
+ track_effects[track_index].append(sitem)
- path = path % (start_frame + item.sourceIn())
-
- source_path = path
- self.log.debug(
- "Fetched source path \"{}\" from \"{}\" in "
- "\"{}\".".format(
- source_path, write_node.name(), source.firstpath()
- )
- )
-
- try:
- head, padding, ext = os.path.basename(source_path).split(".")
- source_first_frame = int(padding)
- except:
- source_first_frame = 0
-
- instances_data.append(
- {
- "name": "{0}_{1}".format(track.name(), item.name()),
- "item": item,
- "source": source,
- "sourcePath": source_path,
- "track": track.name(),
- "sourceFirst": source_first_frame,
- "sourceIn": int(item.sourceIn()),
- "sourceOut": int(item.sourceOut()),
- "clipIn": int(item.timelineIn()),
- "clipOut": int(item.timelineOut())
- }
- )
-
- for data in instances_data:
- data.update(
- {
- "asset": data["item"].name(),
- "family": "clip",
- "families": [],
- "handles": 0,
- "handleStart": projectdata.get("handles", 0),
- "handleEnd": projectdata.get("handles", 0),
- "version": int(version)
- }
- )
- instance = context.create_instance(**data)
- self.log.debug(
- "Created instance with data: {}".format(instance.data)
- )
- context.data["assetsShared"][data["asset"]] = dict()
+ context.data["trackEffects"] = track_effects
+ self.log.debug(">> sub_track_items: `{}`".format(track_effects))
diff --git a/pype/plugins/nukestudio/publish/collect_effects.py b/pype/plugins/nukestudio/publish/collect_effects.py
new file mode 100644
index 0000000000..0aee0adf2e
--- /dev/null
+++ b/pype/plugins/nukestudio/publish/collect_effects.py
@@ -0,0 +1,100 @@
+import pyblish.api
+import re
+
+
+class CollectVideoTracksLuts(pyblish.api.InstancePlugin):
+ """Collect video tracks effects into context."""
+
+ order = pyblish.api.CollectorOrder + 0.1015
+ label = "Collect Soft Lut Effects"
+ families = ["clip"]
+
+ def process(self, instance):
+
+ self.log.debug("Finding soft effect for subset: `{}`".format(instance.data.get("subset")))
+
+ # taking active sequence
+ subset = instance.data.get("subset")
+
+ if not subset:
+ return
+
+ track_effects = instance.context.data.get("trackEffects", {})
+ track_index = instance.data["trackIndex"]
+ effects = instance.data["effects"]
+
+ # creating context attribute
+ self.effects = {"assignTo": subset, "effects": dict()}
+
+ for sitem in effects:
+ self.add_effect(instance, track_index, sitem)
+
+ for t_index, sitems in track_effects.items():
+ for sitem in sitems:
+ if not t_index > track_index:
+ continue
+ self.log.debug(">> sitem: `{}`".format(sitem))
+ self.add_effect(instance, t_index, sitem)
+
+ if self.effects["effects"]:
+ instance.data["effectTrackItems"] = self.effects
+
+ if len(instance.data.get("effectTrackItems", {}).keys()) > 0:
+ instance.data["families"] += ["lut"]
+ self.log.debug("effects.keys: {}".format(instance.data.get("effectTrackItems", {}).keys()))
+ self.log.debug("effects: {}".format(instance.data.get("effectTrackItems", {})))
+
+ def add_effect(self, instance, track_index, item):
+ track = item.parentTrack().name()
+ # node serialization
+ node = item.node()
+ node_serialized = self.node_serialisation(instance, node)
+
+ # collect timelineIn/Out
+ effect_t_in = int(item.timelineIn())
+ effect_t_out = int(item.timelineOut())
+
+ node_name = item.name()
+ node_class = re.sub(r"\d+", "", node_name)
+
+ self.effects["effects"].update({node_name: {
+ "class": node_class,
+ "timelineIn": effect_t_in,
+ "timelineOut": effect_t_out,
+ "subTrackIndex": item.subTrackIndex(),
+ "trackIndex": track_index,
+ "track": track,
+ "node": node_serialized
+ }})
+
+ def node_serialisation(self, instance, node):
+ node_serialized = {}
+ timeline_in_h = instance.data["clipInH"]
+ timeline_out_h = instance.data["clipOutH"]
+
+ # adding ignoring knob keys
+ _ignoring_keys = ['invert_mask', 'help', 'mask',
+ 'xpos', 'ypos', 'layer', 'process_mask', 'channel',
+ 'channels', 'maskChannelMask', 'maskChannelInput',
+ 'note_font', 'note_font_size', 'unpremult',
+ 'postage_stamp_frame', 'maskChannel', 'export_cc',
+ 'select_cccid', 'mix', 'version', 'matrix']
+
+ # loop trough all knobs and collect not ignored
+ # and any with any value
+ for knob in node.knobs().keys():
+ # skip nodes in ignore keys
+ if knob in _ignoring_keys:
+ continue
+
+ # get animation if node is animated
+ if node[knob].isAnimated():
+ # grab animation including handles
+ knob_anim = [node[knob].getValueAt(i)
+ for i in range(timeline_in_h, timeline_out_h + 1)]
+
+ node_serialized[knob] = knob_anim
+ else:
+ node_serialized[knob] = node[knob].value()
+
+ return node_serialized
diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py
index 536abf5ba4..5f29837d80 100644
--- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py
+++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py
@@ -38,6 +38,10 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
tags = instance.data.get("tags", None)
clip = instance.data["item"]
asset = instance.data.get("asset")
+ sequence = context.data['activeSequence']
+ width = int(sequence.format().width())
+ height = int(sequence.format().height())
+ pixel_aspect = sequence.format().pixelAspect()
# build data for inner nukestudio project property
data = {
@@ -157,6 +161,9 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"asset": asset,
"hierarchy": hierarchy,
"parents": parents,
+ "width": width,
+ "height": height,
+ "pixelAspect": pixel_aspect,
"tasks": instance.data["tasks"]
})
@@ -191,7 +198,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
def process(self, context):
instances = context[:]
- sequence = context.data['activeSequence']
+
# create hierarchyContext attr if context has none
temp_context = {}
@@ -216,6 +223,9 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
+ instance.data["width"] = s_asset_data["width"]
+ instance.data["height"] = s_asset_data["height"]
+ instance.data["pixelAspect"] = s_asset_data["pixelAspect"]
# adding frame start if any on instance
start_frame = s_asset_data.get("startingFrame")
@@ -265,16 +275,10 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
# adding SourceResolution if Tag was present
if instance.data.get("main"):
- width = int(sequence.format().width())
- height = int(sequence.format().height())
- pixel_aspect = sequence.format().pixelAspect()
- self.log.info("Sequence Width,Height,PixelAspect are: `{0},{1},{2}`".format(
- width, height, pixel_aspect))
-
in_info['custom_attributes'].update({
- "resolutionWidth": width,
- "resolutionHeight": height,
- "pixelAspect": pixel_aspect
+ "resolutionWidth": instance.data["width"],
+ "resolutionHeight": instance.data["height"],
+ "pixelAspect": instance.data["pixelAspect"]
})
in_info['tasks'] = instance.data['tasks']
diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py
index 9843307f14..2ebbfde551 100644
--- a/pype/plugins/nukestudio/publish/collect_plates.py
+++ b/pype/plugins/nukestudio/publish/collect_plates.py
@@ -66,11 +66,14 @@ class CollectPlates(api.InstancePlugin):
item = instance.data["item"]
width = int(item.source().mediaSource().width())
height = int(item.source().mediaSource().height())
- self.log.info("Source Width and Height are: `{0} x {1}`".format(
- width, height))
+ pixel_aspect = int(item.source().mediaSource().pixelAspect())
+
+ self.log.info("Source Width and Height are: `{0} x {1} : {2}`".format(
+ width, height, pixel_aspect))
data.update({
"width": width,
- "height": height
+ "height": height,
+ "pixelAspect": pixel_aspect
})
self.log.debug("Creating instance with name: {}".format(data["name"]))
@@ -123,7 +126,7 @@ class CollectPlatesData(api.InstancePlugin):
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart",
"frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut",
- "clipInH", "clipOutH", "asset", "track", "version"
+ "clipInH", "clipOutH", "asset", "track", "version", "width", "height", "pixelAspect"
]
# pass data to version
@@ -133,6 +136,7 @@ class CollectPlatesData(api.InstancePlugin):
version_data.update({
"handles": version_data['handleStart'],
"colorspace": item.sourceMediaColourTransform(),
+ "colorspaceScript": instance.context.data["colorspace"],
"families": [f for f in families if 'ftrack' not in f],
"subset": name,
"fps": instance.context.data["fps"]
@@ -171,6 +175,8 @@ class CollectPlatesData(api.InstancePlugin):
if os.path.exists(mov_path):
# adding mov into the representations
self.log.debug("__ mov_path: {}".format(mov_path))
+ instance.data["label"] += " - review"
+
plates_mov_representation = {
'files': mov_file,
'stagingDir': staging_dir,
diff --git a/pype/plugins/nukestudio/publish/collect_selection.py b/pype/plugins/nukestudio/publish/collect_selection.py
index ec8d513de8..28a529d560 100644
--- a/pype/plugins/nukestudio/publish/collect_selection.py
+++ b/pype/plugins/nukestudio/publish/collect_selection.py
@@ -14,12 +14,4 @@ class CollectSelection(pyblish.api.ContextPlugin):
self.log.debug("selection: {}".format(selection))
- # if not selection:
- # self.log.debug(
- # "Nothing is selected. Collecting all items from sequence "
- # "\"{}\"".format(hiero.ui.activeSequence())
- # )
- # for track in hiero.ui.activeSequence().items():
- # selection.extend(track.items())
-
context.data["selection"] = selection
diff --git a/pype/plugins/nukestudio/publish/collect_shots.py b/pype/plugins/nukestudio/publish/collect_shots.py
index 928dfd645b..77346d2ec3 100644
--- a/pype/plugins/nukestudio/publish/collect_shots.py
+++ b/pype/plugins/nukestudio/publish/collect_shots.py
@@ -1,7 +1,7 @@
from pyblish import api
-class CollectShots(api.ContextPlugin):
+class CollectShots(api.InstancePlugin):
"""Collect Shot from Clip."""
# Run just before CollectClipSubsets
@@ -10,63 +10,63 @@ class CollectShots(api.ContextPlugin):
hosts = ["nukestudio"]
families = ["clip"]
- def process(self, context):
- for instance in context[:]:
- # Exclude non-tagged instances.
- tagged = False
- for tag in instance.data["tags"]:
- if tag["name"].lower() == "hierarchy":
- tagged = True
+ def process(self, instance):
+ self.log.debug(
+ "Skipping \"{}\" because its not tagged with "
+ "\"Hierarchy\"".format(instance))
+ # Exclude non-tagged instances.
+ tagged = False
+ for tag in instance.data["tags"]:
+ if tag["name"].lower() == "hierarchy":
+ tagged = True
- if not tagged:
- self.log.debug(
- "Skipping \"{}\" because its not tagged with "
- "\"Hierarchy\"".format(instance)
- )
- continue
-
- # Collect data.
- data = {}
- for key, value in instance.data.iteritems():
- data[key] = value
-
- # Collect comments.
- data["comments"] = []
-
- # Exclude non-tagged instances.
- for tag in instance.data["tags"]:
- if tag["name"].lower() == "comment":
- data["comments"].append(
- tag.metadata().dict()["tag.note"]
- )
-
- # Find tags on the source clip.
- tags = instance.data["item"].source().tags()
- for tag in tags:
- if tag.name().lower() == "comment":
- data["comments"].append(
- tag.metadata().dict()["tag.note"]
- )
-
- data["family"] = "shot"
- data["families"] = []
-
- data["subset"] = data["family"] + "Main"
-
- data["name"] = data["subset"] + "_" + data["asset"]
-
- data["label"] = (
- "{} - {} - tasks:{} - assetbuilds:{} - comments:{}".format(
- data["asset"],
- data["subset"],
- data["tasks"],
- [x["name"] for x in data.get("assetbuilds", [])],
- len(data["comments"])
- )
+ if not tagged:
+ self.log.debug(
+ "Skipping \"{}\" because its not tagged with "
+ "\"Hierarchy\"".format(instance)
)
+ return
- # Create instance.
- self.log.debug("Creating instance with: {}".format(data["name"]))
- instance.context.create_instance(**data)
+ # Collect data.
+ data = {}
+ for key, value in instance.data.iteritems():
+ data[key] = value
- self.log.debug("_ context: {}".format(context[:]))
+ # Collect comments.
+ data["comments"] = []
+
+ # Exclude non-tagged instances.
+ for tag in instance.data["tags"]:
+ if tag["name"].lower() == "comment":
+ data["comments"].append(
+ tag.metadata().dict()["tag.note"]
+ )
+
+ # Find tags on the source clip.
+ tags = instance.data["item"].source().tags()
+ for tag in tags:
+ if tag.name().lower() == "comment":
+ data["comments"].append(
+ tag.metadata().dict()["tag.note"]
+ )
+
+ data["family"] = "shot"
+ data["families"] = []
+
+ data["subset"] = data["family"] + "Main"
+
+ data["name"] = data["subset"] + "_" + data["asset"]
+
+ data["label"] = (
+ "{} - {} - tasks:{} - assetbuilds:{} - comments:{}".format(
+ data["asset"],
+ data["subset"],
+ data["tasks"],
+ [x["name"] for x in data.get("assetbuilds", [])],
+ len(data["comments"])
+ )
+ )
+
+ # Create instance.
+ self.log.debug("Creating instance with: {}".format(data["name"]))
+ instance.context.create_instance(**data)
diff --git a/pype/plugins/nukestudio/publish/collect_tag_framestart.py b/pype/plugins/nukestudio/publish/collect_tag_framestart.py
index c73a2dd1ee..1342d996ab 100644
--- a/pype/plugins/nukestudio/publish/collect_tag_framestart.py
+++ b/pype/plugins/nukestudio/publish/collect_tag_framestart.py
@@ -19,13 +19,14 @@ class CollectClipTagFrameStart(api.InstancePlugin):
# gets only task family tags and collect labels
if "frameStart" in t_family:
- t_value = t_metadata.get("tag.value", "")
+ t_value = t_metadata.get("tag.value", None)
# backward compatibility
- t_number = t_metadata.get("tag.number", "")
+ t_number = t_metadata.get("tag.number", None)
+ start_frame = t_number or t_value
try:
- start_frame = int(t_number) or int(t_value)
+ start_frame = int(start_frame)
except ValueError:
if "source" in t_value:
source_first = instance.data["sourceFirst"]
diff --git a/pype/plugins/nukestudio/publish/extract_effects.py b/pype/plugins/nukestudio/publish/extract_effects.py
new file mode 100644
index 0000000000..5e2d95d943
--- /dev/null
+++ b/pype/plugins/nukestudio/publish/extract_effects.py
@@ -0,0 +1,231 @@
+# from pype import plugins
+import os
+import json
+import re
+import pyblish.api
+import tempfile
+from avalon import io, api
+
+class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
+ """Collect video tracks effects into context."""
+
+ order = pyblish.api.ExtractorOrder
+ label = "Export Soft Lut Effects"
+ families = ["lut"]
+
+ def process(self, instance):
+ item = instance.data["item"]
+ effects = instance.data.get("effectTrackItems")
+
+ instance.data["families"] = [f for f in instance.data.get("families", []) if f not in ["lut"]]
+
+ self.log.debug("___ instance.data[families]: `{}`".format(instance.data["families"]))
+
+ # skip any without effects
+ if not effects:
+ return
+
+ subset = instance.data.get("subset")
+ subset_split = re.findall(r'[A-Z][^A-Z]*', subset)
+
+ if len(subset_split) > 0:
+ root_name = subset.replace(subset_split[0], "")
+ subset_split.insert(0, root_name.capitalize())
+
+ subset_split.insert(0, "lut")
+
+ self.log.debug("creating staging dir")
+ # staging_dir = self.staging_dir(instance)
+
+ # TODO: only provisory will be replace by function
+ staging_dir = instance.data.get('stagingDir', None)
+
+ if not staging_dir:
+ staging_dir = os.path.normpath(
+ tempfile.mkdtemp(prefix="pyblish_tmp_")
+ )
+ instance.data['stagingDir'] = staging_dir
+
+ self.log.debug("creating staging dir: `{}`".format(staging_dir))
+
+ transfers = list()
+ if "transfers" not in instance.data:
+ instance.data["transfers"] = list()
+
+ name = "".join(subset_split)
+ ext = "json"
+ file = name + "." + ext
+
+ # create new instance and inherit data
+ data = {}
+ for key, value in instance.data.iteritems():
+ data[key] = value
+
+ # change names
+ data["subset"] = name
+ data["family"] = "lut"
+ data["families"] = []
+ data["name"] = data["subset"] + "_" + data["asset"]
+ data["label"] = "{} - {} - ({})".format(
+ data['asset'], data["subset"], os.path.splitext(file)[1]
+ )
+ data["source"] = data["sourcePath"]
+
+ # create new instance
+ instance = instance.context.create_instance(**data)
+
+ dst_dir = self.resource_destination_dir(instance)
+
+ # change paths in effects to files
+ for k, effect in effects["effects"].items():
+ trn = self.copy_linked_files(effect, dst_dir)
+ if trn:
+ transfers.append((trn[0], trn[1]))
+
+ instance.data["transfers"].extend(transfers)
+ self.log.debug("_ transfers: `{}`".format(
+ instance.data["transfers"]))
+
+ # create representations
+ instance.data["representations"] = list()
+
+ transfer_data = [
+ "handleStart", "handleEnd", "sourceIn", "sourceOut",
+ "frameStart", "frameEnd", "sourceInH", "sourceOutH",
+ "clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track",
+ "version"
+ ]
+
+ # pass data to version
+ version_data = dict()
+ version_data.update({k: instance.data[k] for k in transfer_data})
+
+ # add to data of representation
+ version_data.update({
+ "handles": version_data['handleStart'],
+ "colorspace": item.sourceMediaColourTransform(),
+ "colorspaceScript": instance.context.data["colorspace"],
+ "families": ["plate", "lut"],
+ "subset": name,
+ "fps": instance.context.data["fps"]
+ })
+ instance.data["versionData"] = version_data
+
+ representation = {
+ 'files': file,
+ 'stagingDir': staging_dir,
+ 'name': "lut" + ext.title(),
+ 'ext': ext
+ }
+ instance.data["representations"].append(representation)
+
+ self.log.debug("_ representations: `{}`".format(
+ instance.data["representations"]))
+
+ self.log.debug("_ version_data: `{}`".format(
+ instance.data["versionData"]))
+
+ with open(os.path.join(staging_dir, file), "w") as outfile:
+ outfile.write(json.dumps(effects, indent=4, sort_keys=True))
+
+ return
+
+ def copy_linked_files(self, effect, dst_dir):
+ for k, v in effect["node"].items():
+ if k in "file" and v is not '':
+ base_name = os.path.basename(v)
+ dst = os.path.join(dst_dir, base_name).replace("\\", "/")
+
+ # add it to the json
+ effect["node"][k] = dst
+ return (v, dst)
+
+ def resource_destination_dir(self, instance):
+ anatomy = instance.context.data['anatomy']
+ self.create_destination_template(instance, anatomy)
+
+ return os.path.join(
+ instance.data["assumedDestination"],
+ "resources"
+ )
+
+ def create_destination_template(self, instance, anatomy):
+ """Create a filepath based on the current data available
+
+ Example template:
+ {root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
+ {subset}.{representation}
+ Args:
+ instance: the instance to publish
+
+ Returns:
+ file path (str)
+ """
+
+ # get all the stuff from the database
+ subset_name = instance.data["subset"]
+ self.log.info(subset_name)
+ asset_name = instance.data["asset"]
+ project_name = api.Session["AVALON_PROJECT"]
+ a_template = anatomy.templates
+
+ project = io.find_one({"type": "project",
+ "name": project_name},
+ projection={"config": True, "data": True})
+
+ template = a_template['publish']['path']
+ # anatomy = instance.context.data['anatomy']
+
+ asset = io.find_one({"type": "asset",
+ "name": asset_name,
+ "parent": project["_id"]})
+
+ assert asset, ("No asset found by the name '{}' "
+ "in project '{}'".format(asset_name, project_name))
+ silo = asset['silo']
+
+ subset = io.find_one({"type": "subset",
+ "name": subset_name,
+ "parent": asset["_id"]})
+
+ # assume there is no version yet, we start at `1`
+ version = None
+ version_number = 1
+ if subset is not None:
+ version = io.find_one({"type": "version",
+ "parent": subset["_id"]},
+ sort=[("name", -1)])
+
+ # if there is a subset there ought to be version
+ if version is not None:
+ version_number += version["name"]
+
+ if instance.data.get('version'):
+ version_number = int(instance.data.get('version'))
+
+ padding = int(a_template['render']['padding'])
+
+ hierarchy = asset['data']['parents']
+ if hierarchy:
+ # hierarchy = os.path.sep.join(hierarchy)
+ hierarchy = "/".join(hierarchy)
+
+ template_data = {"root": api.Session["AVALON_PROJECTS"],
+ "project": {"name": project_name,
+ "code": project['data']['code']},
+ "silo": silo,
+ "family": instance.data['family'],
+ "asset": asset_name,
+ "subset": subset_name,
+ "frame": ('#' * padding),
+ "version": version_number,
+ "hierarchy": hierarchy,
+ "representation": "TEMP"}
+
+ instance.data["assumedTemplateData"] = template_data
+ self.log.info(template_data)
+ instance.data["template"] = template
+ # We take the parent folder of representation 'filepath'
+ instance.data["assumedDestination"] = os.path.dirname(
+ anatomy.format(template_data)["publish"]["path"]
+ )
diff --git a/pype/tests/test_avalon_plugin_presets.py b/pype/tests/test_avalon_plugin_presets.py
new file mode 100644
index 0000000000..7f023ea358
--- /dev/null
+++ b/pype/tests/test_avalon_plugin_presets.py
@@ -0,0 +1,40 @@
+import avalon.api as api
+import pype
+
+
+class MyTestCreator(api.Creator):
+
+ my_test_property = "A"
+
+ def __init__(self, name, asset, options=None, data=None):
+ super(MyTestCreator, self).__init__(self, name, asset,
+ options=None, data=None)
+
+
+# this is hack like no other - we need to inject our own avalon host
+# and bypass all its validation. Avalon hosts are modules that needs
+# `ls` callable as attribute. Voila:
+class Test:
+ __name__ = "test"
+ ls = len
+
+ def __call__(self):
+ pass
+
+
+def test_avalon_plugin_presets(monkeypatch, printer):
+
+ pype.install()
+ api.register_host(Test())
+ api.register_plugin(api.Creator, MyTestCreator)
+ plugins = api.discover(api.Creator)
+ printer("Test if we got our test plugin")
+ assert MyTestCreator in plugins
+ for p in plugins:
+ if p.__name__ == "MyTestCreator":
+ printer("Test if we have overriden existing property")
+ assert p.my_test_property == "B"
+ printer("Test if we have overriden superclass property")
+ assert p.active is False
+ printer("Test if we have added new property")
+ assert p.new_property == "new"
diff --git a/pype/tests/test_pyblish_filter.py b/pype/tests/test_pyblish_filter.py
index 8d747e63df..cf3d5d6015 100644
--- a/pype/tests/test_pyblish_filter.py
+++ b/pype/tests/test_pyblish_filter.py
@@ -18,7 +18,7 @@ def test_pyblish_plugin_filter_modifier(printer, monkeypatch):
assert len(plugins) == 0
paths = pyblish.api.registered_paths()
printer("Test if we have no registered plugin paths")
- print(paths)
+ assert len(paths) == 0
class MyTestPlugin(pyblish.api.InstancePlugin):
my_test_property = 1
diff --git a/pype/widgets/popup.py b/pype/widgets/popup.py
index 8f28dc5269..7c0fa0f5c5 100644
--- a/pype/widgets/popup.py
+++ b/pype/widgets/popup.py
@@ -124,6 +124,26 @@ class Popup2(Popup):
fix = self.widgets["show"]
fix.setText("Fix")
+ def calculate_window_geometry(self):
+ """Respond to status changes
+
+ On creation, align window with screen bottom right.
+
+ """
+ parent_widget = self.parent()
+
+ app = QtWidgets.QApplication.instance()
+ if parent_widget:
+ screen = app.desktop().screenNumber(parent_widget)
+ else:
+ screen = app.desktop().screenNumber(app.desktop().cursor().pos())
+ center_point = app.desktop().screenGeometry(screen).center()
+
+ frame_geo = self.frameGeometry()
+ frame_geo.moveCenter(center_point)
+
+ return frame_geo
+
@contextlib.contextmanager
def application():
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/1_add_handles_end.png b/setup/nukestudio/hiero_plugin_path/Icons/1_add_handles_end.png
index 31c41d1ac6..4561745d66 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/1_add_handles_end.png and b/setup/nukestudio/hiero_plugin_path/Icons/1_add_handles_end.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/2_add_handles.png b/setup/nukestudio/hiero_plugin_path/Icons/2_add_handles.png
index ab911c5ebc..bb4c1802aa 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/2_add_handles.png and b/setup/nukestudio/hiero_plugin_path/Icons/2_add_handles.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/3D.png b/setup/nukestudio/hiero_plugin_path/Icons/3D.png
index 4ace8911df..2de7a72775 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/3D.png and b/setup/nukestudio/hiero_plugin_path/Icons/3D.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/3_add_handles_start.png b/setup/nukestudio/hiero_plugin_path/Icons/3_add_handles_start.png
index 4cdc09b541..c98e4f74f1 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/3_add_handles_start.png and b/setup/nukestudio/hiero_plugin_path/Icons/3_add_handles_start.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/4_2D.png b/setup/nukestudio/hiero_plugin_path/Icons/4_2D.png
index 418272517f..18555698fe 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/4_2D.png and b/setup/nukestudio/hiero_plugin_path/Icons/4_2D.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/edit.png b/setup/nukestudio/hiero_plugin_path/Icons/edit.png
index e0ba3c102f..97e42054e7 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/edit.png and b/setup/nukestudio/hiero_plugin_path/Icons/edit.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/fusion.png b/setup/nukestudio/hiero_plugin_path/Icons/fusion.png
index 208c1279cf..2e498edd69 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/fusion.png and b/setup/nukestudio/hiero_plugin_path/Icons/fusion.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/hierarchy.png b/setup/nukestudio/hiero_plugin_path/Icons/hierarchy.png
index 68ea352885..6acf39ced5 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/hierarchy.png and b/setup/nukestudio/hiero_plugin_path/Icons/hierarchy.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/houdini.png b/setup/nukestudio/hiero_plugin_path/Icons/houdini.png
index 128eac262a..d8c842dd17 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/houdini.png and b/setup/nukestudio/hiero_plugin_path/Icons/houdini.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/lense.png b/setup/nukestudio/hiero_plugin_path/Icons/lense.png
index 2eb2da982f..255b1753ed 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/lense.png and b/setup/nukestudio/hiero_plugin_path/Icons/lense.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/lense1.png b/setup/nukestudio/hiero_plugin_path/Icons/lense1.png
index f76354f48c..1ad1264807 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/lense1.png and b/setup/nukestudio/hiero_plugin_path/Icons/lense1.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/maya.png b/setup/nukestudio/hiero_plugin_path/Icons/maya.png
index 7dd1453c60..fcfa47ae4f 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/maya.png and b/setup/nukestudio/hiero_plugin_path/Icons/maya.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/nuke.png b/setup/nukestudio/hiero_plugin_path/Icons/nuke.png
index 9d9dc4104c..107796914b 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/nuke.png and b/setup/nukestudio/hiero_plugin_path/Icons/nuke.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/resolution.png b/setup/nukestudio/hiero_plugin_path/Icons/resolution.png
index 9904a60532..83803fc36d 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/resolution.png and b/setup/nukestudio/hiero_plugin_path/Icons/resolution.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/retiming.png b/setup/nukestudio/hiero_plugin_path/Icons/retiming.png
index 4487ac0422..1c6f22e02c 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/retiming.png and b/setup/nukestudio/hiero_plugin_path/Icons/retiming.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/review.png b/setup/nukestudio/hiero_plugin_path/Icons/review.png
index 49f28c492c..0d894b6987 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/review.png and b/setup/nukestudio/hiero_plugin_path/Icons/review.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/volume.png b/setup/nukestudio/hiero_plugin_path/Icons/volume.png
index 47119dc98b..e5e1200653 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/volume.png and b/setup/nukestudio/hiero_plugin_path/Icons/volume.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/z_layer_bg.png b/setup/nukestudio/hiero_plugin_path/Icons/z_layer_bg.png
index d01fe683e5..51742b5df2 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/z_layer_bg.png and b/setup/nukestudio/hiero_plugin_path/Icons/z_layer_bg.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/z_layer_fg.png b/setup/nukestudio/hiero_plugin_path/Icons/z_layer_fg.png
index a1d5751622..01e5f4f816 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/z_layer_fg.png and b/setup/nukestudio/hiero_plugin_path/Icons/z_layer_fg.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/z_layer_main.png b/setup/nukestudio/hiero_plugin_path/Icons/z_layer_main.png
index 0fe806d86e..0ffb939a7f 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/z_layer_main.png and b/setup/nukestudio/hiero_plugin_path/Icons/z_layer_main.png differ