Merge branch 'develop' into feature/nukestudio_comments
# Conflicts: # pype/plugins/nukestudio/publish/collect_shots.py
|
|
@ -3,6 +3,8 @@ import os
|
|||
from pyblish import api as pyblish
|
||||
from avalon import api as avalon
|
||||
from .lib import filter_pyblish_plugins
|
||||
from pypeapp import config
|
||||
|
||||
|
||||
import logging
|
||||
log = logging.getLogger(__name__)
|
||||
|
|
@ -16,6 +18,51 @@ PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
|
|||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "global", "publish")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "global", "load")
|
||||
|
||||
# we are monkey patching `avalon.api.discover()` to allow us to load
|
||||
# plugin presets on plugins being discovered by avalon. Little bit of
|
||||
# hacking, but it allows us to add out own features without need
|
||||
# to modify upstream code.
|
||||
|
||||
_original_discover = avalon.discover
|
||||
|
||||
|
||||
def patched_discover(superclass):
|
||||
"""
|
||||
Monkey patched version of :func:`avalon.api.discover()`. It allows
|
||||
us to load presets on plugins being discovered.
|
||||
"""
|
||||
# run original discover and get plugins
|
||||
plugins = _original_discover(superclass)
|
||||
|
||||
# determine host application to use for finding presets
|
||||
host = avalon.registered_host().__name__.split(".")[-1]
|
||||
|
||||
# map plugin superclass to preset json. Currenly suppoted is load and
|
||||
# create (avalon.api.Loader and avalon.api.Creator)
|
||||
plugin_type = "undefined"
|
||||
if superclass.__name__.split(".")[-1] == "Loader":
|
||||
plugin_type = "load"
|
||||
elif superclass.__name__.split(".")[-1] == "Creator":
|
||||
plugin_type = "create"
|
||||
|
||||
print(">>> trying to find presets for {}:{} ...".format(host, plugin_type))
|
||||
try:
|
||||
config_data = config.get_presets()['plugins'][host][plugin_type]
|
||||
except KeyError:
|
||||
print("*** no presets found.")
|
||||
else:
|
||||
for plugin in plugins:
|
||||
if plugin.__name__ in config_data:
|
||||
print(">>> We have preset for {}".format(plugin.__name__))
|
||||
for option, value in config_data[plugin.__name__].items():
|
||||
if option == "enabled" and value is False:
|
||||
setattr(plugin, "active", False)
|
||||
print(" - is disabled by preset")
|
||||
else:
|
||||
setattr(plugin, option, value)
|
||||
print(" - setting `{}`: `{}`".format(option, value))
|
||||
return plugins
|
||||
|
||||
|
||||
def install():
|
||||
log.info("Registering global plug-ins..")
|
||||
|
|
@ -23,6 +70,9 @@ def install():
|
|||
pyblish.register_discovery_filter(filter_pyblish_plugins)
|
||||
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
|
||||
|
||||
# apply monkey patched discover to original one
|
||||
avalon.discover = patched_discover
|
||||
|
||||
|
||||
def uninstall():
|
||||
log.info("Deregistering global plug-ins..")
|
||||
|
|
@ -30,3 +80,6 @@ def uninstall():
|
|||
pyblish.deregister_discovery_filter(filter_pyblish_plugins)
|
||||
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
|
||||
log.info("Global plug-ins unregistred")
|
||||
|
||||
# restore original discover
|
||||
avalon.discover = _original_discover
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ from .lib import (
|
|||
get_asset,
|
||||
get_project,
|
||||
get_hierarchy,
|
||||
get_subsets,
|
||||
get_version_from_path,
|
||||
modified_environ,
|
||||
add_tool_to_environment
|
||||
|
|
@ -53,6 +54,7 @@ __all__ = [
|
|||
"get_project",
|
||||
"get_hierarchy",
|
||||
"get_asset",
|
||||
"get_subsets",
|
||||
"get_version_from_path",
|
||||
"modified_environ",
|
||||
"add_tool_to_environment",
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
from .clockify_api import ClockifyAPI
|
||||
from .widget_settings import ClockifySettings
|
||||
from .widget_message import MessageWidget
|
||||
from .clockify import ClockifyModule
|
||||
|
||||
__all__ = [
|
||||
'ClockifyAPI',
|
||||
'ClockifySettings',
|
||||
'ClockifyModule'
|
||||
"ClockifyAPI",
|
||||
"ClockifySettings",
|
||||
"ClockifyModule",
|
||||
"MessageWidget"
|
||||
]
|
||||
|
||||
def tray_init(tray_widget, main_widget):
|
||||
|
|
|
|||
|
|
@ -1,16 +1,19 @@
|
|||
import os
|
||||
import threading
|
||||
from pypeapp import style
|
||||
from pypeapp import style, Logger
|
||||
from Qt import QtWidgets
|
||||
from pype.clockify import ClockifySettings, ClockifyAPI
|
||||
from . import ClockifySettings, ClockifyAPI, MessageWidget
|
||||
|
||||
|
||||
class ClockifyModule:
|
||||
|
||||
def __init__(self, main_parent=None, parent=None):
|
||||
self.log = Logger().get_logger(self.__class__.__name__, "PypeTray")
|
||||
|
||||
self.main_parent = main_parent
|
||||
self.parent = parent
|
||||
self.clockapi = ClockifyAPI()
|
||||
self.message_widget = None
|
||||
self.widget_settings = ClockifySettings(main_parent, self)
|
||||
self.widget_settings_required = None
|
||||
|
||||
|
|
@ -74,6 +77,7 @@ class ClockifyModule:
|
|||
self.timer_manager.start_timers(data)
|
||||
|
||||
def timer_stopped(self):
|
||||
self.bool_timer_run = False
|
||||
if hasattr(self, 'timer_manager'):
|
||||
self.timer_manager.stop_timers()
|
||||
|
||||
|
|
@ -102,26 +106,37 @@ class ClockifyModule:
|
|||
if self.bool_timer_run != bool_timer_run:
|
||||
if self.bool_timer_run is True:
|
||||
self.timer_stopped()
|
||||
else:
|
||||
elif self.bool_timer_run is False:
|
||||
actual_timer = self.clockapi.get_in_progress()
|
||||
if not actual_timer:
|
||||
continue
|
||||
|
||||
actual_project_id = actual_timer["projectId"]
|
||||
project = self.clockapi.get_project_by_id(
|
||||
actual_project_id
|
||||
)
|
||||
actual_proj_id = actual_timer["projectId"]
|
||||
if not actual_proj_id:
|
||||
continue
|
||||
|
||||
project = self.clockapi.get_project_by_id(actual_proj_id)
|
||||
if project and project.get("code") == 501:
|
||||
continue
|
||||
|
||||
project_name = project["name"]
|
||||
|
||||
actual_timer_hierarchy = actual_timer["description"]
|
||||
hierarchy_items = actual_timer_hierarchy.split("/")
|
||||
# Each pype timer must have at least 2 items!
|
||||
if len(hierarchy_items) < 2:
|
||||
continue
|
||||
task_name = hierarchy_items[-1]
|
||||
hierarchy = hierarchy_items[:-1]
|
||||
|
||||
task_type = None
|
||||
if len(actual_timer.get("tags", [])) > 0:
|
||||
task_type = actual_timer["tags"][0].get("name")
|
||||
data = {
|
||||
"task_name": task_name,
|
||||
"hierarchy": hierarchy,
|
||||
"project_name": project_name
|
||||
"project_name": project_name,
|
||||
"task_type": task_type
|
||||
}
|
||||
|
||||
self.timer_started(data)
|
||||
|
|
@ -134,9 +149,23 @@ class ClockifyModule:
|
|||
self.clockapi.finish_time_entry()
|
||||
if self.bool_timer_run:
|
||||
self.timer_stopped()
|
||||
self.bool_timer_run = False
|
||||
|
||||
def signed_in(self):
|
||||
if hasattr(self, 'timer_manager'):
|
||||
if not self.timer_manager:
|
||||
return
|
||||
|
||||
if not self.timer_manager.last_task:
|
||||
return
|
||||
|
||||
if self.timer_manager.is_running:
|
||||
self.start_timer_manager(self.timer_manager.last_task)
|
||||
|
||||
def start_timer(self, input_data):
|
||||
# If not api key is not entered then skip
|
||||
if not self.clockapi.get_api_key():
|
||||
return
|
||||
|
||||
actual_timer = self.clockapi.get_in_progress()
|
||||
actual_timer_hierarchy = None
|
||||
actual_project_id = None
|
||||
|
|
@ -144,11 +173,31 @@ class ClockifyModule:
|
|||
actual_timer_hierarchy = actual_timer.get("description")
|
||||
actual_project_id = actual_timer.get("projectId")
|
||||
|
||||
# Concatenate hierarchy and task to get description
|
||||
desc_items = [val for val in input_data.get("hierarchy", [])]
|
||||
desc_items.append(input_data["task_name"])
|
||||
description = "/".join(desc_items)
|
||||
|
||||
project_id = self.clockapi.get_project_id(input_data["project_name"])
|
||||
# Check project existence
|
||||
project_name = input_data["project_name"]
|
||||
project_id = self.clockapi.get_project_id(project_name)
|
||||
if not project_id:
|
||||
self.log.warning((
|
||||
"Project \"{}\" was not found in Clockify. Timer won't start."
|
||||
).format(project_name))
|
||||
|
||||
msg = (
|
||||
"Project <b>\"{}\"</b> is not in Clockify Workspace <b>\"{}\"</b>."
|
||||
"<br><br>Please inform your Project Manager."
|
||||
).format(project_name, str(self.clockapi.workspace))
|
||||
|
||||
self.message_widget = MessageWidget(
|
||||
self.main_parent, msg, "Clockify - Info Message"
|
||||
)
|
||||
self.message_widget.closed.connect(self.on_message_widget_close)
|
||||
self.message_widget.show()
|
||||
|
||||
return
|
||||
|
||||
if (
|
||||
actual_timer is not None and
|
||||
|
|
@ -158,7 +207,7 @@ class ClockifyModule:
|
|||
return
|
||||
|
||||
tag_ids = []
|
||||
task_tag_id = self.clockapi.get_tag_id(input_data["task_name"])
|
||||
task_tag_id = self.clockapi.get_tag_id(input_data["task_type"])
|
||||
if task_tag_id is not None:
|
||||
tag_ids.append(task_tag_id)
|
||||
|
||||
|
|
@ -166,6 +215,9 @@ class ClockifyModule:
|
|||
description, project_id, tag_ids=tag_ids
|
||||
)
|
||||
|
||||
def on_message_widget_close(self):
|
||||
self.message_widget = None
|
||||
|
||||
# Definition of Tray menu
|
||||
def tray_menu(self, parent_menu):
|
||||
# Menu for Tray App
|
||||
|
|
|
|||
|
|
@ -25,6 +25,7 @@ class ClockifyAPI(metaclass=Singleton):
|
|||
fpath = os.path.join(app_dir, file_name)
|
||||
admin_permission_names = ['WORKSPACE_OWN', 'WORKSPACE_ADMIN']
|
||||
master_parent = None
|
||||
workspace = None
|
||||
workspace_id = None
|
||||
|
||||
def set_master(self, master_parent):
|
||||
|
|
@ -43,6 +44,8 @@ class ClockifyAPI(metaclass=Singleton):
|
|||
if api_key is not None and self.validate_api_key(api_key) is True:
|
||||
self.headers["X-Api-Key"] = api_key
|
||||
self.set_workspace()
|
||||
if self.master_parent:
|
||||
self.master_parent.signed_in()
|
||||
return True
|
||||
return False
|
||||
|
||||
|
|
|
|||
|
|
@ -1,69 +0,0 @@
|
|||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
from pype.vendor import ftrack_api
|
||||
from pype.ftrack import BaseAction
|
||||
from pype.clockify import ClockifyAPI
|
||||
|
||||
|
||||
class StartClockify(BaseAction):
|
||||
'''Starts timer on clockify.'''
|
||||
|
||||
#: Action identifier.
|
||||
identifier = 'clockify.start.timer'
|
||||
#: Action label.
|
||||
label = 'Clockify - Start timer'
|
||||
#: Action description.
|
||||
description = 'Starts timer on clockify'
|
||||
#: roles that are allowed to register this action
|
||||
icon = '{}/app_icons/clockify.png'.format(
|
||||
os.environ.get('PYPE_STATICS_SERVER', '')
|
||||
)
|
||||
#: Clockify api
|
||||
clockapi = ClockifyAPI()
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
if len(entities) != 1:
|
||||
return False
|
||||
if entities[0].entity_type.lower() != 'task':
|
||||
return False
|
||||
if self.clockapi.workspace_id is None:
|
||||
return False
|
||||
return True
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
task = entities[0]
|
||||
task_name = task['type']['name']
|
||||
project_name = task['project']['full_name']
|
||||
|
||||
def get_parents(entity):
|
||||
output = []
|
||||
if entity.entity_type.lower() == 'project':
|
||||
return output
|
||||
output.extend(get_parents(entity['parent']))
|
||||
output.append(entity['name'])
|
||||
|
||||
return output
|
||||
|
||||
desc_items = get_parents(task['parent'])
|
||||
desc_items.append(task['name'])
|
||||
description = '/'.join(desc_items)
|
||||
project_id = self.clockapi.get_project_id(project_name)
|
||||
tag_ids = []
|
||||
tag_ids.append(self.clockapi.get_tag_id(task_name))
|
||||
self.clockapi.start_time_entry(
|
||||
description, project_id, tag_ids=tag_ids
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def register(session, **kw):
|
||||
'''Register plugin. Called when used as an plugin.'''
|
||||
|
||||
if not isinstance(session, ftrack_api.session.Session):
|
||||
return
|
||||
|
||||
StartClockify(session).register()
|
||||
|
|
@ -17,10 +17,8 @@ class SyncClocify(BaseAction):
|
|||
label = 'Sync To Clockify'
|
||||
#: Action description.
|
||||
description = 'Synchronise data to Clockify workspace'
|
||||
#: priority
|
||||
priority = 100
|
||||
#: roles that are allowed to register this action
|
||||
role_list = ['Pypeclub', 'Administrator']
|
||||
role_list = ["Pypeclub", "Administrator", "project Manager"]
|
||||
#: icon
|
||||
icon = '{}/app_icons/clockify-white.png'.format(
|
||||
os.environ.get('PYPE_STATICS_SERVER', '')
|
||||
|
|
@ -28,16 +26,22 @@ class SyncClocify(BaseAction):
|
|||
#: CLockifyApi
|
||||
clockapi = ClockifyAPI()
|
||||
|
||||
def register(self):
|
||||
def preregister(self):
|
||||
if self.clockapi.workspace_id is None:
|
||||
raise ValueError('Clockify Workspace or API key are not set!')
|
||||
return "Clockify Workspace or API key are not set!"
|
||||
|
||||
if self.clockapi.validate_workspace_perm() is False:
|
||||
raise MissingPermision('Clockify')
|
||||
super().register()
|
||||
|
||||
return True
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
''' Validation '''
|
||||
if len(entities) != 1:
|
||||
return False
|
||||
|
||||
if entities[0].entity_type.lower() != "project":
|
||||
return False
|
||||
return True
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
|
|
|
|||
91
pype/clockify/widget_message.py
Normal file
|
|
@ -0,0 +1,91 @@
|
|||
from Qt import QtCore, QtGui, QtWidgets
|
||||
from pypeapp import style
|
||||
|
||||
|
||||
class MessageWidget(QtWidgets.QWidget):
|
||||
|
||||
SIZE_W = 300
|
||||
SIZE_H = 130
|
||||
|
||||
closed = QtCore.Signal()
|
||||
|
||||
def __init__(self, parent=None, messages=[], title="Message"):
|
||||
|
||||
super(MessageWidget, self).__init__()
|
||||
|
||||
self._parent = parent
|
||||
|
||||
# Icon
|
||||
if parent and hasattr(parent, 'icon'):
|
||||
self.setWindowIcon(parent.icon)
|
||||
else:
|
||||
from pypeapp.resources import get_resource
|
||||
self.setWindowIcon(QtGui.QIcon(get_resource('icon.png')))
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.WindowCloseButtonHint |
|
||||
QtCore.Qt.WindowMinimizeButtonHint
|
||||
)
|
||||
|
||||
# Font
|
||||
self.font = QtGui.QFont()
|
||||
self.font.setFamily("DejaVu Sans Condensed")
|
||||
self.font.setPointSize(9)
|
||||
self.font.setBold(True)
|
||||
self.font.setWeight(50)
|
||||
self.font.setKerning(True)
|
||||
|
||||
# Size setting
|
||||
self.resize(self.SIZE_W, self.SIZE_H)
|
||||
self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H))
|
||||
self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100))
|
||||
|
||||
# Style
|
||||
self.setStyleSheet(style.load_stylesheet())
|
||||
|
||||
self.setLayout(self._ui_layout(messages))
|
||||
self.setWindowTitle(title)
|
||||
|
||||
def _ui_layout(self, messages):
|
||||
if not messages:
|
||||
messages = ["*Misssing messages (This is a bug)*", ]
|
||||
|
||||
elif not isinstance(messages, (tuple, list)):
|
||||
messages = [messages, ]
|
||||
|
||||
main_layout = QtWidgets.QVBoxLayout(self)
|
||||
|
||||
labels = []
|
||||
for message in messages:
|
||||
label = QtWidgets.QLabel(message)
|
||||
label.setFont(self.font)
|
||||
label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
|
||||
label.setTextFormat(QtCore.Qt.RichText)
|
||||
label.setWordWrap(True)
|
||||
|
||||
labels.append(label)
|
||||
main_layout.addWidget(label)
|
||||
|
||||
btn_close = QtWidgets.QPushButton("Close")
|
||||
btn_close.setToolTip('Close this window')
|
||||
btn_close.clicked.connect(self.on_close_clicked)
|
||||
|
||||
btn_group = QtWidgets.QHBoxLayout()
|
||||
btn_group.addStretch(1)
|
||||
btn_group.addWidget(btn_close)
|
||||
|
||||
main_layout.addLayout(btn_group)
|
||||
|
||||
self.labels = labels
|
||||
self.btn_group = btn_group
|
||||
self.btn_close = btn_close
|
||||
self.main_layout = main_layout
|
||||
|
||||
return main_layout
|
||||
|
||||
def on_close_clicked(self):
|
||||
self.close()
|
||||
|
||||
def close(self, *args, **kwargs):
|
||||
self.closed.emit()
|
||||
super(MessageWidget, self).close(*args, **kwargs)
|
||||
|
|
@ -2,7 +2,7 @@ import os
|
|||
|
||||
from pype.vendor import ftrack_api
|
||||
from pype.ftrack import BaseAction
|
||||
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
|
||||
from pype.ftrack.lib.io_nonsingleton import DbConnector
|
||||
|
||||
|
||||
class AttributesRemapper(BaseAction):
|
||||
|
|
@ -275,7 +275,7 @@ class AttributesRemapper(BaseAction):
|
|||
message = {'type': 'label', 'value': '<p>{}</p>'.format(value)}
|
||||
items.append(message)
|
||||
|
||||
self.show_interface(event, items, title)
|
||||
self.show_interface(items=items, title=title, event=event)
|
||||
|
||||
def register(session, plugins_presets={}):
|
||||
'''Register plugin. Called when used as an plugin.'''
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import re
|
|||
from pype.vendor import ftrack_api
|
||||
from pype.ftrack import BaseAction
|
||||
from avalon import lib as avalonlib
|
||||
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
|
||||
from pype.ftrack.lib.io_nonsingleton import DbConnector
|
||||
from pypeapp import config, Anatomy
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -23,10 +23,10 @@ class CustomAttributeDoctor(BaseAction):
|
|||
icon = '{}/ftrack/action_icons/PypeDoctor.svg'.format(
|
||||
os.environ.get('PYPE_STATICS_SERVER', '')
|
||||
)
|
||||
hierarchical_ca = ['handle_start', 'handle_end', 'fstart', 'fend']
|
||||
hierarchical_ca = ['handleStart', 'handleEnd', 'frameStart', 'frameEnd']
|
||||
hierarchical_alternatives = {
|
||||
'handle_start': 'handles',
|
||||
'handle_end': 'handles'
|
||||
'handleStart': 'handles',
|
||||
'handleEnd': 'handles'
|
||||
}
|
||||
|
||||
# Roles for new custom attributes
|
||||
|
|
@ -34,22 +34,22 @@ class CustomAttributeDoctor(BaseAction):
|
|||
write_roles = ['ALL',]
|
||||
|
||||
data_ca = {
|
||||
'handle_start': {
|
||||
'handleStart': {
|
||||
'label': 'Frame handles start',
|
||||
'type': 'number',
|
||||
'config': json.dumps({'isdecimal': False})
|
||||
},
|
||||
'handle_end': {
|
||||
'handleEnd': {
|
||||
'label': 'Frame handles end',
|
||||
'type': 'number',
|
||||
'config': json.dumps({'isdecimal': False})
|
||||
},
|
||||
'fstart': {
|
||||
'frameStart': {
|
||||
'label': 'Frame start',
|
||||
'type': 'number',
|
||||
'config': json.dumps({'isdecimal': False})
|
||||
},
|
||||
'fend': {
|
||||
'frameEnd': {
|
||||
'label': 'Frame end',
|
||||
'type': 'number',
|
||||
'config': json.dumps({'isdecimal': False})
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ from bson.objectid import ObjectId
|
|||
import argparse
|
||||
from pype.vendor import ftrack_api
|
||||
from pype.ftrack import BaseAction
|
||||
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
|
||||
from pype.ftrack.lib.io_nonsingleton import DbConnector
|
||||
|
||||
|
||||
class DeleteAsset(BaseAction):
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import logging
|
|||
import argparse
|
||||
from pype.vendor import ftrack_api
|
||||
from pype.ftrack import BaseAction
|
||||
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
|
||||
from pype.ftrack.lib.io_nonsingleton import DbConnector
|
||||
|
||||
|
||||
class AssetsRemover(BaseAction):
|
||||
|
|
|
|||
|
|
@ -1,11 +1,14 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
from ruamel import yaml
|
||||
from pype.vendor import ftrack_api
|
||||
from pype.ftrack import BaseAction
|
||||
from pypeapp import config
|
||||
from pype.ftrack.lib import get_avalon_attr
|
||||
|
||||
from pype.vendor.ftrack_api import session as fa_session
|
||||
|
||||
|
||||
class PrepareProject(BaseAction):
|
||||
'''Edit meta data action.'''
|
||||
|
|
@ -22,6 +25,9 @@ class PrepareProject(BaseAction):
|
|||
os.environ.get('PYPE_STATICS_SERVER', '')
|
||||
)
|
||||
|
||||
# Key to store info about trigerring create folder structure
|
||||
create_project_structure_key = "create_folder_structure"
|
||||
|
||||
def discover(self, session, entities, event):
|
||||
''' Validation '''
|
||||
if len(entities) != 1:
|
||||
|
|
@ -41,9 +47,9 @@ class PrepareProject(BaseAction):
|
|||
|
||||
self.log.debug("Loading custom attributes")
|
||||
cust_attrs, hier_cust_attrs = get_avalon_attr(session, True)
|
||||
project_defaults = config.get_presets().get("ftrack", {}).get(
|
||||
"project_defaults", {}
|
||||
)
|
||||
project_defaults = config.get_presets(
|
||||
entities[0]["full_name"]
|
||||
).get("ftrack", {}).get("project_defaults", {})
|
||||
|
||||
self.log.debug("Preparing data which will be shown")
|
||||
attributes_to_set = {}
|
||||
|
|
@ -74,8 +80,29 @@ class PrepareProject(BaseAction):
|
|||
str([key for key in attributes_to_set])
|
||||
))
|
||||
|
||||
title = "Set Attribute values"
|
||||
item_splitter = {'type': 'label', 'value': '---'}
|
||||
title = "Prepare Project"
|
||||
items = []
|
||||
|
||||
# Ask if want to trigger Action Create Folder Structure
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": "<h3>Want to create basic Folder Structure?</h3>"
|
||||
})
|
||||
|
||||
items.append({
|
||||
"name": self.create_project_structure_key,
|
||||
"type": "boolean",
|
||||
"value": False,
|
||||
"label": "Check if Yes"
|
||||
})
|
||||
|
||||
items.append(item_splitter)
|
||||
items.append({
|
||||
"type": "label",
|
||||
"value": "<h3>Set basic Attributes:</h3>"
|
||||
})
|
||||
|
||||
multiselect_enumerators = []
|
||||
|
||||
# This item will be last (before enumerators)
|
||||
|
|
@ -88,8 +115,6 @@ class PrepareProject(BaseAction):
|
|||
"label": "AutoSync to Avalon"
|
||||
}
|
||||
|
||||
item_splitter = {'type': 'label', 'value': '---'}
|
||||
|
||||
for key, in_data in attributes_to_set.items():
|
||||
attr = in_data["object"]
|
||||
|
||||
|
|
@ -195,6 +220,10 @@ class PrepareProject(BaseAction):
|
|||
return
|
||||
|
||||
in_data = event['data']['values']
|
||||
|
||||
# pop out info about creating project structure
|
||||
create_proj_struct = in_data.pop(self.create_project_structure_key)
|
||||
|
||||
# Find hidden items for multiselect enumerators
|
||||
keys_to_process = []
|
||||
for key in in_data:
|
||||
|
|
@ -228,8 +257,117 @@ class PrepareProject(BaseAction):
|
|||
|
||||
session.commit()
|
||||
|
||||
# Create project structure
|
||||
self.create_project_specific_config(entities[0]["full_name"], in_data)
|
||||
|
||||
# Trigger Create Project Structure action
|
||||
if create_proj_struct is True:
|
||||
self.trigger_action("create.project.structure", event)
|
||||
|
||||
return True
|
||||
|
||||
def create_project_specific_config(self, project_name, json_data):
|
||||
self.log.debug("*** Creating project specifig configs ***")
|
||||
|
||||
path_proj_configs = os.environ.get('PYPE_PROJECT_CONFIGS', "")
|
||||
|
||||
# Skip if PYPE_PROJECT_CONFIGS is not set
|
||||
# TODO show user OS message
|
||||
if not path_proj_configs:
|
||||
self.log.warning((
|
||||
"Environment variable \"PYPE_PROJECT_CONFIGS\" is not set."
|
||||
" Project specific config can't be set."
|
||||
))
|
||||
return
|
||||
|
||||
path_proj_configs = os.path.normpath(path_proj_configs)
|
||||
# Skip if path does not exist
|
||||
# TODO create if not exist?!!!
|
||||
if not os.path.exists(path_proj_configs):
|
||||
self.log.warning((
|
||||
"Path set in Environment variable \"PYPE_PROJECT_CONFIGS\""
|
||||
" Does not exist."
|
||||
))
|
||||
return
|
||||
|
||||
project_specific_path = os.path.normpath(
|
||||
os.path.join(path_proj_configs, project_name)
|
||||
)
|
||||
if not os.path.exists(project_specific_path):
|
||||
os.makedirs(project_specific_path)
|
||||
self.log.debug((
|
||||
"Project specific config folder for project \"{}\" created."
|
||||
).format(project_name))
|
||||
|
||||
# Anatomy ####################################
|
||||
self.log.debug("--- Processing Anatomy Begins: ---")
|
||||
|
||||
anatomy_dir = os.path.normpath(os.path.join(
|
||||
project_specific_path, "anatomy"
|
||||
))
|
||||
anatomy_path = os.path.normpath(os.path.join(
|
||||
anatomy_dir, "default.yaml"
|
||||
))
|
||||
|
||||
anatomy = None
|
||||
if os.path.exists(anatomy_path):
|
||||
self.log.debug(
|
||||
"Anatomy file already exist. Trying to read: \"{}\"".format(
|
||||
anatomy_path
|
||||
)
|
||||
)
|
||||
# Try to load data
|
||||
with open(anatomy_path, 'r') as file_stream:
|
||||
try:
|
||||
anatomy = yaml.load(file_stream, Loader=yaml.loader.Loader)
|
||||
self.log.debug("Reading Anatomy file was successful")
|
||||
except yaml.YAMLError as exc:
|
||||
self.log.warning(
|
||||
"Reading Yaml file failed: \"{}\"".format(anatomy_path),
|
||||
exc_info=True
|
||||
)
|
||||
|
||||
if not anatomy:
|
||||
self.log.debug("Anatomy is not set. Duplicating default.")
|
||||
# Create Anatomy folder
|
||||
if not os.path.exists(anatomy_dir):
|
||||
self.log.debug(
|
||||
"Creating Anatomy folder: \"{}\"".format(anatomy_dir)
|
||||
)
|
||||
os.makedirs(anatomy_dir)
|
||||
|
||||
source_items = [
|
||||
os.environ["PYPE_CONFIG"], "anatomy", "default.yaml"
|
||||
]
|
||||
|
||||
source_path = os.path.normpath(os.path.join(*source_items))
|
||||
with open(source_path, 'r') as file_stream:
|
||||
source_data = file_stream.read()
|
||||
|
||||
with open(anatomy_path, 'w') as file_stream:
|
||||
file_stream.write(source_data)
|
||||
|
||||
# Presets ####################################
|
||||
self.log.debug("--- Processing Presets Begins: ---")
|
||||
|
||||
project_defaults_dir = os.path.normpath(os.path.join(*[
|
||||
project_specific_path, "presets", "ftrack"
|
||||
]))
|
||||
project_defaults_path = os.path.normpath(os.path.join(*[
|
||||
project_defaults_dir, "project_defaults.json"
|
||||
]))
|
||||
# Create folder if not exist
|
||||
if not os.path.exists(project_defaults_dir):
|
||||
self.log.debug("Creating Ftrack Presets folder: \"{}\"".format(
|
||||
project_defaults_dir
|
||||
))
|
||||
os.makedirs(project_defaults_dir)
|
||||
|
||||
with open(project_defaults_path, 'w') as file_stream:
|
||||
json.dump(json_data, file_stream, indent=4)
|
||||
|
||||
self.log.debug("*** Creating project specifig configs Finished ***")
|
||||
|
||||
|
||||
def register(session, plugins_presets={}):
|
||||
'''Register plugin. Called when used as an plugin.'''
|
||||
|
|
|
|||
|
|
@ -19,55 +19,19 @@ class StartTimer(BaseAction):
|
|||
entity = entities[0]
|
||||
if entity.entity_type.lower() != 'task':
|
||||
return
|
||||
self.start_ftrack_timer(entity)
|
||||
try:
|
||||
self.start_clockify_timer(entity)
|
||||
except Exception:
|
||||
self.log.warning(
|
||||
'Failed starting Clockify timer for task: ' + entity['name']
|
||||
)
|
||||
return
|
||||
|
||||
def start_ftrack_timer(self, task):
|
||||
user_query = 'User where username is "{}"'.format(self.session.api_user)
|
||||
user = self.session.query(user_query).one()
|
||||
self.log.info('Starting Ftrack timer for task: ' + task['name'])
|
||||
user.start_timer(task, force=True)
|
||||
user = self.session.query(
|
||||
"User where username is \"{}\"".format(self.session.api_user)
|
||||
).one()
|
||||
|
||||
user.start_timer(entity, force=True)
|
||||
self.session.commit()
|
||||
|
||||
def start_clockify_timer(self, task):
|
||||
# Validate Clockify settings if Clockify is required
|
||||
clockify_timer = os.environ.get('CLOCKIFY_WORKSPACE', None)
|
||||
if clockify_timer is None:
|
||||
return
|
||||
|
||||
from pype.clockify import ClockifyAPI
|
||||
clockapi = ClockifyAPI()
|
||||
if clockapi.verify_api() is False:
|
||||
return
|
||||
task_type = task['type']['name']
|
||||
project_name = task['project']['full_name']
|
||||
|
||||
def get_parents(entity):
|
||||
output = []
|
||||
if entity.entity_type.lower() == 'project':
|
||||
return output
|
||||
output.extend(get_parents(entity['parent']))
|
||||
output.append(entity['name'])
|
||||
|
||||
return output
|
||||
|
||||
desc_items = get_parents(task['parent'])
|
||||
desc_items.append(task['name'])
|
||||
description = '/'.join(desc_items)
|
||||
|
||||
project_id = clockapi.get_project_id(project_name)
|
||||
tag_ids = []
|
||||
tag_ids.append(clockapi.get_tag_id(task_type))
|
||||
clockapi.start_time_entry(
|
||||
description, project_id, tag_ids=tag_ids
|
||||
|
||||
self.log.info(
|
||||
"Starting Ftrack timer for task: {}".format(entity['name'])
|
||||
)
|
||||
self.log.info('Starting Clockify timer for task: ' + task['name'])
|
||||
|
||||
return
|
||||
|
||||
|
||||
def register(session, plugins_presets={}):
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import collections
|
|||
|
||||
from pype.vendor import ftrack_api
|
||||
from pype.ftrack import BaseAction, lib
|
||||
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
|
||||
from pype.ftrack.lib.io_nonsingleton import DbConnector
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -178,17 +178,7 @@ class SyncToAvalon(BaseAction):
|
|||
job['status'] = 'failed'
|
||||
session.commit()
|
||||
|
||||
event = fa_session.ftrack_api.event.base.Event(
|
||||
topic='ftrack.action.launch',
|
||||
data=dict(
|
||||
actionIdentifier='sync.hierarchical.attrs.local',
|
||||
selection=event['data']['selection']
|
||||
),
|
||||
source=dict(
|
||||
user=event['source']['user']
|
||||
)
|
||||
)
|
||||
session.event_hub.publish(event, on_error='ignore')
|
||||
self.trigger_action("sync.hierarchical.attrs.local", event)
|
||||
|
||||
if len(message) > 0:
|
||||
message = "Unable to sync: {}".format(message)
|
||||
|
|
|
|||
|
|
@ -29,18 +29,10 @@ class ActionAskWhereIRun(BaseAction):
|
|||
return True
|
||||
|
||||
def launch(self, session, entities, event):
|
||||
event = fa_session.ftrack_api.event.base.Event(
|
||||
topic='ftrack.action.launch',
|
||||
data=dict(
|
||||
actionIdentifier="show.where.i.run",
|
||||
selection=event["data"]["selection"],
|
||||
event_hub_id=session.event_hub.id
|
||||
),
|
||||
source=dict(
|
||||
user=dict(username=session.api_user)
|
||||
)
|
||||
more_data = {"event_hub_id": session.event_hub.id}
|
||||
self.trigger_action(
|
||||
"show.where.i.run", event, additional_event_data=more_data
|
||||
)
|
||||
session.event_hub.publish(event, on_error='ignore')
|
||||
|
||||
return True
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ import collections
|
|||
from pypeapp import config
|
||||
from pype.vendor import ftrack_api
|
||||
from pype.ftrack import BaseAction, lib
|
||||
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
|
||||
from pype.ftrack.lib.io_nonsingleton import DbConnector
|
||||
from bson.objectid import ObjectId
|
||||
|
||||
|
||||
|
|
@ -222,7 +222,11 @@ class SyncHierarchicalAttrs(BaseAction):
|
|||
session.commit()
|
||||
|
||||
if self.interface_messages:
|
||||
self.show_interface_from_dict(self.interface_messages, event)
|
||||
self.show_interface_from_dict(
|
||||
messages=self.interface_messages,
|
||||
title="something went wrong",
|
||||
event=event
|
||||
)
|
||||
|
||||
return True
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ from pype.ftrack import BaseAction, lib
|
|||
from pype.vendor.ftrack_api import session as fa_session
|
||||
|
||||
|
||||
class Sync_To_Avalon(BaseAction):
|
||||
class SyncToAvalon(BaseAction):
|
||||
'''
|
||||
Synchronizing data action - from Ftrack to Avalon DB
|
||||
|
||||
|
|
@ -207,18 +207,8 @@ class Sync_To_Avalon(BaseAction):
|
|||
job['status'] = 'failed'
|
||||
|
||||
session.commit()
|
||||
|
||||
event = fa_session.ftrack_api.event.base.Event(
|
||||
topic='ftrack.action.launch',
|
||||
data=dict(
|
||||
actionIdentifier='sync.hierarchical.attrs',
|
||||
selection=event['data']['selection']
|
||||
),
|
||||
source=dict(
|
||||
user=event['source']['user']
|
||||
)
|
||||
)
|
||||
session.event_hub.publish(event, on_error='ignore')
|
||||
|
||||
self.trigger_action("sync.hierarchical.attrs", event)
|
||||
|
||||
if len(message) > 0:
|
||||
message = "Unable to sync: {}".format(message)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
|
||||
from pype.ftrack.lib.io_nonsingleton import DbConnector
|
||||
|
||||
from pype.vendor import ftrack_api
|
||||
from pype.ftrack import BaseEvent, lib
|
||||
|
|
@ -23,7 +23,10 @@ class SyncHierarchicalAttrs(BaseEvent):
|
|||
if not keys:
|
||||
continue
|
||||
|
||||
entity = session.get(ent['entity_type'], ent['entityId'])
|
||||
if not ent['entityType'] in ['task', 'show']:
|
||||
continue
|
||||
|
||||
entity = session.get(self._get_entity_type(ent), ent['entityId'])
|
||||
processable.append(ent)
|
||||
processable_ent[ent['entityId']] = entity
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from pype.vendor import ftrack_api
|
||||
from pype.ftrack import BaseEvent, lib
|
||||
from avalon.tools.libraryloader.io_nonsingleton import DbConnector
|
||||
from pype.ftrack.lib.io_nonsingleton import DbConnector
|
||||
from bson.objectid import ObjectId
|
||||
from pypeapp import config
|
||||
from pypeapp import Anatomy
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ class FtrackServer:
|
|||
msg = 'Loading of file "{}" failed ({})'.format(
|
||||
file, str(e)
|
||||
)
|
||||
log.warning(msg)
|
||||
log.warning(msg, exc_info=e)
|
||||
|
||||
if len(register_functions_dict) < 1:
|
||||
raise Exception
|
||||
|
|
|
|||
|
|
@ -94,13 +94,12 @@ class BaseHandler(object):
|
|||
def launch_log(self, func):
|
||||
@functools.wraps(func)
|
||||
def wrapper_launch(*args, **kwargs):
|
||||
label = self.__class__.__name__
|
||||
if hasattr(self, 'label'):
|
||||
label = self.label
|
||||
if hasattr(self, 'variant'):
|
||||
label = '{} {}'.format(self.label, self.variant)
|
||||
else:
|
||||
label = self.label
|
||||
else:
|
||||
label = self.__class__.__name__
|
||||
if self.variant is not None:
|
||||
label = '{} {}'.format(self.label, self.variant)
|
||||
|
||||
self.log.info(('{} "{}": Launched').format(self.type, label))
|
||||
try:
|
||||
|
|
@ -141,6 +140,13 @@ class BaseHandler(object):
|
|||
|
||||
# Custom validations
|
||||
result = self.preregister()
|
||||
if result is None:
|
||||
self.log.debug((
|
||||
"\"{}\" 'preregister' method returned 'None'. Expected it"
|
||||
" didn't fail and continue as preregister returned True."
|
||||
).format(self.__class__.__name__))
|
||||
return
|
||||
|
||||
if result is True:
|
||||
return
|
||||
msg = "Pre-register conditions were not met"
|
||||
|
|
@ -321,30 +327,13 @@ class BaseHandler(object):
|
|||
|
||||
# Launch preactions
|
||||
for preaction in self.preactions:
|
||||
event = fa_session.ftrack_api.event.base.Event(
|
||||
topic='ftrack.action.launch',
|
||||
data=dict(
|
||||
actionIdentifier=preaction,
|
||||
selection=selection
|
||||
),
|
||||
source=dict(
|
||||
user=dict(username=session.api_user)
|
||||
)
|
||||
)
|
||||
session.event_hub.publish(event, on_error='ignore')
|
||||
self.trigger_action(preaction, event)
|
||||
|
||||
# Relaunch this action
|
||||
event = fa_session.ftrack_api.event.base.Event(
|
||||
topic='ftrack.action.launch',
|
||||
data=dict(
|
||||
actionIdentifier=self.identifier,
|
||||
selection=selection,
|
||||
preactions_launched=True
|
||||
),
|
||||
source=dict(
|
||||
user=dict(username=session.api_user)
|
||||
)
|
||||
additional_data = {"preactions_launched": True}
|
||||
self.trigger_action(
|
||||
self.identifier, event, additional_event_data=additional_data
|
||||
)
|
||||
session.event_hub.publish(event, on_error='ignore')
|
||||
|
||||
return False
|
||||
|
||||
|
|
@ -505,7 +494,8 @@ class BaseHandler(object):
|
|||
)
|
||||
|
||||
def show_interface_from_dict(
|
||||
self, messages, title="", event=None, user=None, username=None, user_id=None
|
||||
self, messages, title="", event=None,
|
||||
user=None, username=None, user_id=None
|
||||
):
|
||||
if not messages:
|
||||
self.log.debug("No messages to show! (messages dict is empty)")
|
||||
|
|
@ -532,3 +522,60 @@ class BaseHandler(object):
|
|||
items.append(message)
|
||||
|
||||
self.show_interface(items, title, event, user, username, user_id)
|
||||
|
||||
def trigger_action(
|
||||
self, action_name, event=None, session=None,
|
||||
selection=None, user_data=None,
|
||||
topic="ftrack.action.launch", additional_event_data={},
|
||||
on_error="ignore"
|
||||
):
|
||||
self.log.debug("Triggering action \"{}\" Begins".format(action_name))
|
||||
|
||||
if not session:
|
||||
session = self.session
|
||||
|
||||
# Getting selection and user data
|
||||
_selection = None
|
||||
_user_data = None
|
||||
|
||||
if event:
|
||||
_selection = event.get("data", {}).get("selection")
|
||||
_user_data = event.get("source", {}).get("user")
|
||||
|
||||
if selection is not None:
|
||||
_selection = selection
|
||||
|
||||
if user_data is not None:
|
||||
_user_data = user_data
|
||||
|
||||
# Without selection and user data skip triggering
|
||||
msg = "Can't trigger \"{}\" action without {}."
|
||||
if _selection is None:
|
||||
self.log.error(msg.format(action_name, "selection"))
|
||||
return
|
||||
|
||||
if _user_data is None:
|
||||
self.log.error(msg.format(action_name, "user data"))
|
||||
return
|
||||
|
||||
_event_data = {
|
||||
"actionIdentifier": action_name,
|
||||
"selection": _selection
|
||||
}
|
||||
|
||||
# Add additional data
|
||||
if additional_event_data:
|
||||
_event_data.update(additional_event_data)
|
||||
|
||||
# Create and trigger event
|
||||
session.event_hub.publish(
|
||||
fa_session.ftrack_api.event.base.Event(
|
||||
topic=topic,
|
||||
data=_event_data,
|
||||
source=dict(user=_user_data)
|
||||
),
|
||||
on_error=on_error
|
||||
)
|
||||
self.log.debug(
|
||||
"Action \"{}\" Triggered successfully".format(action_name)
|
||||
)
|
||||
|
|
|
|||
433
pype/ftrack/lib/io_nonsingleton.py
Normal file
|
|
@ -0,0 +1,433 @@
|
|||
"""
|
||||
Wrapper around interactions with the database
|
||||
|
||||
Copy of io module in avalon-core.
|
||||
- In this case not working as singleton with api.Session!
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
import errno
|
||||
import shutil
|
||||
import logging
|
||||
import tempfile
|
||||
import functools
|
||||
import contextlib
|
||||
|
||||
from avalon import schema
|
||||
from avalon.vendor import requests
|
||||
|
||||
# Third-party dependencies
|
||||
import pymongo
|
||||
|
||||
|
||||
def auto_reconnect(func):
|
||||
"""Handling auto reconnect in 3 retry times"""
|
||||
@functools.wraps(func)
|
||||
def decorated(*args, **kwargs):
|
||||
object = args[0]
|
||||
for retry in range(3):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except pymongo.errors.AutoReconnect:
|
||||
object.log.error("Reconnecting..")
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
raise
|
||||
|
||||
return decorated
|
||||
|
||||
|
||||
class DbConnector(object):
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def __init__(self):
|
||||
self.Session = {}
|
||||
self._mongo_client = None
|
||||
self._sentry_client = None
|
||||
self._sentry_logging_handler = None
|
||||
self._database = None
|
||||
self._is_installed = False
|
||||
|
||||
def install(self):
|
||||
"""Establish a persistent connection to the database"""
|
||||
if self._is_installed:
|
||||
return
|
||||
|
||||
logging.basicConfig()
|
||||
self.Session.update(self._from_environment())
|
||||
|
||||
timeout = int(self.Session["AVALON_TIMEOUT"])
|
||||
self._mongo_client = pymongo.MongoClient(
|
||||
self.Session["AVALON_MONGO"], serverSelectionTimeoutMS=timeout)
|
||||
|
||||
for retry in range(3):
|
||||
try:
|
||||
t1 = time.time()
|
||||
self._mongo_client.server_info()
|
||||
|
||||
except Exception:
|
||||
self.log.error("Retrying..")
|
||||
time.sleep(1)
|
||||
timeout *= 1.5
|
||||
|
||||
else:
|
||||
break
|
||||
|
||||
else:
|
||||
raise IOError(
|
||||
"ERROR: Couldn't connect to %s in "
|
||||
"less than %.3f ms" % (self.Session["AVALON_MONGO"], timeout))
|
||||
|
||||
self.log.info("Connected to %s, delay %.3f s" % (
|
||||
self.Session["AVALON_MONGO"], time.time() - t1))
|
||||
|
||||
self._install_sentry()
|
||||
|
||||
self._database = self._mongo_client[self.Session["AVALON_DB"]]
|
||||
self._is_installed = True
|
||||
|
||||
def _install_sentry(self):
|
||||
if "AVALON_SENTRY" not in self.Session:
|
||||
return
|
||||
|
||||
try:
|
||||
from raven import Client
|
||||
from raven.handlers.logging import SentryHandler
|
||||
from raven.conf import setup_logging
|
||||
except ImportError:
|
||||
# Note: There was a Sentry address in this Session
|
||||
return self.log.warning("Sentry disabled, raven not installed")
|
||||
|
||||
client = Client(self.Session["AVALON_SENTRY"])
|
||||
|
||||
# Transmit log messages to Sentry
|
||||
handler = SentryHandler(client)
|
||||
handler.setLevel(logging.WARNING)
|
||||
|
||||
setup_logging(handler)
|
||||
|
||||
self._sentry_client = client
|
||||
self._sentry_logging_handler = handler
|
||||
self.log.info(
|
||||
"Connected to Sentry @ %s" % self.Session["AVALON_SENTRY"]
|
||||
)
|
||||
|
||||
def _from_environment(self):
|
||||
Session = {
|
||||
item[0]: os.getenv(item[0], item[1])
|
||||
for item in (
|
||||
# Root directory of projects on disk
|
||||
("AVALON_PROJECTS", None),
|
||||
|
||||
# Name of current Project
|
||||
("AVALON_PROJECT", ""),
|
||||
|
||||
# Name of current Asset
|
||||
("AVALON_ASSET", ""),
|
||||
|
||||
# Name of current silo
|
||||
("AVALON_SILO", ""),
|
||||
|
||||
# Name of current task
|
||||
("AVALON_TASK", None),
|
||||
|
||||
# Name of current app
|
||||
("AVALON_APP", None),
|
||||
|
||||
# Path to working directory
|
||||
("AVALON_WORKDIR", None),
|
||||
|
||||
# Name of current Config
|
||||
# TODO(marcus): Establish a suitable default config
|
||||
("AVALON_CONFIG", "no_config"),
|
||||
|
||||
# Name of Avalon in graphical user interfaces
|
||||
# Use this to customise the visual appearance of Avalon
|
||||
# to better integrate with your surrounding pipeline
|
||||
("AVALON_LABEL", "Avalon"),
|
||||
|
||||
# Used during any connections to the outside world
|
||||
("AVALON_TIMEOUT", "1000"),
|
||||
|
||||
# Address to Asset Database
|
||||
("AVALON_MONGO", "mongodb://localhost:27017"),
|
||||
|
||||
# Name of database used in MongoDB
|
||||
("AVALON_DB", "avalon"),
|
||||
|
||||
# Address to Sentry
|
||||
("AVALON_SENTRY", None),
|
||||
|
||||
# Address to Deadline Web Service
|
||||
# E.g. http://192.167.0.1:8082
|
||||
("AVALON_DEADLINE", None),
|
||||
|
||||
# Enable features not necessarily stable. The user's own risk
|
||||
("AVALON_EARLY_ADOPTER", None),
|
||||
|
||||
# Address of central asset repository, contains
|
||||
# the following interface:
|
||||
# /upload
|
||||
# /download
|
||||
# /manager (optional)
|
||||
("AVALON_LOCATION", "http://127.0.0.1"),
|
||||
|
||||
# Boolean of whether to upload published material
|
||||
# to central asset repository
|
||||
("AVALON_UPLOAD", None),
|
||||
|
||||
# Generic username and password
|
||||
("AVALON_USERNAME", "avalon"),
|
||||
("AVALON_PASSWORD", "secret"),
|
||||
|
||||
# Unique identifier for instances in working files
|
||||
("AVALON_INSTANCE_ID", "avalon.instance"),
|
||||
("AVALON_CONTAINER_ID", "avalon.container"),
|
||||
|
||||
# Enable debugging
|
||||
("AVALON_DEBUG", None),
|
||||
|
||||
) if os.getenv(item[0], item[1]) is not None
|
||||
}
|
||||
|
||||
Session["schema"] = "avalon-core:session-1.0"
|
||||
try:
|
||||
schema.validate(Session)
|
||||
except schema.ValidationError as e:
|
||||
# TODO(marcus): Make this mandatory
|
||||
self.log.warning(e)
|
||||
|
||||
return Session
|
||||
|
||||
def uninstall(self):
|
||||
"""Close any connection to the database"""
|
||||
try:
|
||||
self._mongo_client.close()
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
self._mongo_client = None
|
||||
self._database = None
|
||||
self._is_installed = False
|
||||
|
||||
def active_project(self):
|
||||
"""Return the name of the active project"""
|
||||
return self.Session["AVALON_PROJECT"]
|
||||
|
||||
def activate_project(self, project_name):
|
||||
self.Session["AVALON_PROJECT"] = project_name
|
||||
|
||||
def projects(self):
|
||||
"""List available projects
|
||||
|
||||
Returns:
|
||||
list of project documents
|
||||
|
||||
"""
|
||||
|
||||
collection_names = self.collections()
|
||||
for project in collection_names:
|
||||
if project in ("system.indexes",):
|
||||
continue
|
||||
|
||||
# Each collection will have exactly one project document
|
||||
document = self.find_project(project)
|
||||
|
||||
if document is not None:
|
||||
yield document
|
||||
|
||||
def locate(self, path):
|
||||
"""Traverse a hierarchy from top-to-bottom
|
||||
|
||||
Example:
|
||||
representation = locate(["hulk", "Bruce", "modelDefault", 1, "ma"])
|
||||
|
||||
Returns:
|
||||
representation (ObjectId)
|
||||
|
||||
"""
|
||||
|
||||
components = zip(
|
||||
("project", "asset", "subset", "version", "representation"),
|
||||
path
|
||||
)
|
||||
|
||||
parent = None
|
||||
for type_, name in components:
|
||||
latest = (type_ == "version") and name in (None, -1)
|
||||
|
||||
try:
|
||||
if latest:
|
||||
parent = self.find_one(
|
||||
filter={
|
||||
"type": type_,
|
||||
"parent": parent
|
||||
},
|
||||
projection={"_id": 1},
|
||||
sort=[("name", -1)]
|
||||
)["_id"]
|
||||
else:
|
||||
parent = self.find_one(
|
||||
filter={
|
||||
"type": type_,
|
||||
"name": name,
|
||||
"parent": parent
|
||||
},
|
||||
projection={"_id": 1},
|
||||
)["_id"]
|
||||
|
||||
except TypeError:
|
||||
return None
|
||||
|
||||
return parent
|
||||
|
||||
@auto_reconnect
|
||||
def collections(self):
|
||||
return self._database.collection_names()
|
||||
|
||||
@auto_reconnect
|
||||
def find_project(self, project):
|
||||
return self._database[project].find_one({"type": "project"})
|
||||
|
||||
@auto_reconnect
|
||||
def insert_one(self, item):
|
||||
assert isinstance(item, dict), "item must be of type <dict>"
|
||||
schema.validate(item)
|
||||
return self._database[self.Session["AVALON_PROJECT"]].insert_one(item)
|
||||
|
||||
@auto_reconnect
|
||||
def insert_many(self, items, ordered=True):
|
||||
# check if all items are valid
|
||||
assert isinstance(items, list), "`items` must be of type <list>"
|
||||
for item in items:
|
||||
assert isinstance(item, dict), "`item` must be of type <dict>"
|
||||
schema.validate(item)
|
||||
|
||||
return self._database[self.Session["AVALON_PROJECT"]].insert_many(
|
||||
items,
|
||||
ordered=ordered)
|
||||
|
||||
@auto_reconnect
|
||||
def find(self, filter, projection=None, sort=None):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].find(
|
||||
filter=filter,
|
||||
projection=projection,
|
||||
sort=sort
|
||||
)
|
||||
|
||||
@auto_reconnect
|
||||
def find_one(self, filter, projection=None, sort=None):
|
||||
assert isinstance(filter, dict), "filter must be <dict>"
|
||||
|
||||
return self._database[self.Session["AVALON_PROJECT"]].find_one(
|
||||
filter=filter,
|
||||
projection=projection,
|
||||
sort=sort
|
||||
)
|
||||
|
||||
@auto_reconnect
|
||||
def save(self, *args, **kwargs):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].save(
|
||||
*args, **kwargs)
|
||||
|
||||
@auto_reconnect
|
||||
def replace_one(self, filter, replacement):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].replace_one(
|
||||
filter, replacement)
|
||||
|
||||
@auto_reconnect
|
||||
def update_many(self, filter, update):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].update_many(
|
||||
filter, update)
|
||||
|
||||
@auto_reconnect
|
||||
def distinct(self, *args, **kwargs):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].distinct(
|
||||
*args, **kwargs)
|
||||
|
||||
@auto_reconnect
|
||||
def drop(self, *args, **kwargs):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].drop(
|
||||
*args, **kwargs)
|
||||
|
||||
@auto_reconnect
|
||||
def delete_many(self, *args, **kwargs):
|
||||
return self._database[self.Session["AVALON_PROJECT"]].delete_many(
|
||||
*args, **kwargs)
|
||||
|
||||
def parenthood(self, document):
|
||||
assert document is not None, "This is a bug"
|
||||
|
||||
parents = list()
|
||||
|
||||
while document.get("parent") is not None:
|
||||
document = self.find_one({"_id": document["parent"]})
|
||||
|
||||
if document is None:
|
||||
break
|
||||
|
||||
parents.append(document)
|
||||
|
||||
return parents
|
||||
|
||||
@contextlib.contextmanager
|
||||
def tempdir(self):
|
||||
tempdir = tempfile.mkdtemp()
|
||||
try:
|
||||
yield tempdir
|
||||
finally:
|
||||
shutil.rmtree(tempdir)
|
||||
|
||||
def download(self, src, dst):
|
||||
"""Download `src` to `dst`
|
||||
|
||||
Arguments:
|
||||
src (str): URL to source file
|
||||
dst (str): Absolute path to destination file
|
||||
|
||||
Yields tuple (progress, error):
|
||||
progress (int): Between 0-100
|
||||
error (Exception): Any exception raised when first making connection
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
response = requests.get(
|
||||
src,
|
||||
stream=True,
|
||||
auth=requests.auth.HTTPBasicAuth(
|
||||
self.Session["AVALON_USERNAME"],
|
||||
self.Session["AVALON_PASSWORD"]
|
||||
)
|
||||
)
|
||||
except requests.ConnectionError as e:
|
||||
yield None, e
|
||||
return
|
||||
|
||||
with self.tempdir() as dirname:
|
||||
tmp = os.path.join(dirname, os.path.basename(src))
|
||||
|
||||
with open(tmp, "wb") as f:
|
||||
total_length = response.headers.get("content-length")
|
||||
|
||||
if total_length is None: # no content length header
|
||||
f.write(response.content)
|
||||
else:
|
||||
downloaded = 0
|
||||
total_length = int(total_length)
|
||||
for data in response.iter_content(chunk_size=4096):
|
||||
downloaded += len(data)
|
||||
f.write(data)
|
||||
|
||||
yield int(100.0 * downloaded / total_length), None
|
||||
|
||||
try:
|
||||
os.makedirs(os.path.dirname(dst))
|
||||
except OSError as e:
|
||||
# An already existing destination directory is fine.
|
||||
if e.errno != errno.EEXIST:
|
||||
raise
|
||||
|
||||
shutil.copy(tmp, dst)
|
||||
95
pype/lib.py
|
|
@ -5,6 +5,7 @@ import importlib
|
|||
import itertools
|
||||
import contextlib
|
||||
import subprocess
|
||||
import inspect
|
||||
|
||||
from .vendor import pather
|
||||
from .vendor.pather.error import ParseError
|
||||
|
|
@ -31,7 +32,9 @@ def _subprocess(args):
|
|||
output = proc.communicate()[0]
|
||||
|
||||
if proc.returncode != 0:
|
||||
log.error(output)
|
||||
raise ValueError("\"{}\" was not successful: {}".format(args, output))
|
||||
return output
|
||||
|
||||
|
||||
def get_hierarchy(asset_name=None):
|
||||
|
|
@ -421,7 +424,7 @@ def get_version_from_path(file):
|
|||
v: version number in string ('001')
|
||||
|
||||
"""
|
||||
pattern = re.compile(r"[\._]v([0-9]*)")
|
||||
pattern = re.compile(r"[\._]v([0-9]+)")
|
||||
try:
|
||||
return pattern.findall(file)[0]
|
||||
except IndexError:
|
||||
|
|
@ -467,9 +470,7 @@ def filter_pyblish_plugins(plugins):
|
|||
|
||||
host = api.current_host()
|
||||
|
||||
presets = config.get_presets().get('plugins', {}).get(host, {}).get(
|
||||
"publish", {}
|
||||
)
|
||||
presets = config.get_presets().get('plugins', {})
|
||||
|
||||
# iterate over plugins
|
||||
for plugin in plugins[:]:
|
||||
|
|
@ -477,10 +478,20 @@ def filter_pyblish_plugins(plugins):
|
|||
if not presets:
|
||||
continue
|
||||
|
||||
file = os.path.normpath(inspect.getsourcefile(plugin))
|
||||
file = os.path.normpath(file)
|
||||
|
||||
# host determined from path
|
||||
host_from_file = file.split(os.path.sep)[-3:-2][0]
|
||||
plugin_kind = file.split(os.path.sep)[-2:-1][0]
|
||||
|
||||
try:
|
||||
config_data = presets[plugin.__name__] # noqa: E501
|
||||
config_data = presets[host]["publish"][plugin.__name__]
|
||||
except KeyError:
|
||||
continue
|
||||
try:
|
||||
config_data = presets[host_from_file][plugin_kind][plugin.__name__] # noqa: E501
|
||||
except KeyError:
|
||||
continue
|
||||
|
||||
for option, value in config_data.items():
|
||||
if option == "enabled" and value is False:
|
||||
|
|
@ -492,6 +503,72 @@ def filter_pyblish_plugins(plugins):
|
|||
|
||||
setattr(plugin, option, value)
|
||||
|
||||
# Remove already processed plugins from dictionary
|
||||
# WARNING Requires plugins with unique names
|
||||
presets.pop(plugin.__name__)
|
||||
|
||||
def get_subsets(asset_name,
|
||||
regex_filter=None,
|
||||
version=None,
|
||||
representations=["exr", "dpx"]):
|
||||
"""
|
||||
Query subsets with filter on name.
|
||||
|
||||
The method will return all found subsets and its defined version and subsets. Version could be specified with number. Representation can be filtered.
|
||||
|
||||
Arguments:
|
||||
asset_name (str): asset (shot) name
|
||||
regex_filter (raw): raw string with filter pattern
|
||||
version (str or int): `last` or number of version
|
||||
representations (list): list for all representations
|
||||
|
||||
Returns:
|
||||
dict: subsets with version and representaions in keys
|
||||
"""
|
||||
from avalon import io
|
||||
|
||||
# query asset from db
|
||||
asset_io = io.find_one({"type": "asset",
|
||||
"name": asset_name})
|
||||
|
||||
# check if anything returned
|
||||
assert asset_io, "Asset not existing. \
|
||||
Check correct name: `{}`".format(asset_name)
|
||||
|
||||
# create subsets query filter
|
||||
filter_query = {"type": "subset", "parent": asset_io["_id"]}
|
||||
|
||||
# add reggex filter string into query filter
|
||||
if regex_filter:
|
||||
filter_query.update({"name": {"$regex": r"{}".format(regex_filter)}})
|
||||
else:
|
||||
filter_query.update({"name": {"$regex": r'.*'}})
|
||||
|
||||
# query all assets
|
||||
subsets = [s for s in io.find(filter_query)]
|
||||
|
||||
assert subsets, "No subsets found. Check correct filter. Try this for start `r'.*'`: asset: `{}`".format(asset_name)
|
||||
|
||||
output_dict = {}
|
||||
# Process subsets
|
||||
for subset in subsets:
|
||||
if not version:
|
||||
version_sel = io.find_one({"type": "version",
|
||||
"parent": subset["_id"]},
|
||||
sort=[("name", -1)])
|
||||
else:
|
||||
assert isinstance(version, int), "version needs to be `int` type"
|
||||
version_sel = io.find_one({"type": "version",
|
||||
"parent": subset["_id"],
|
||||
"name": int(version)})
|
||||
|
||||
find_dict = {"type": "representation",
|
||||
"parent": version_sel["_id"]}
|
||||
|
||||
filter_repr = {"$or": [{"name": repr} for repr in representations]}
|
||||
|
||||
find_dict.update(filter_repr)
|
||||
repres_out = [i for i in io.find(find_dict)]
|
||||
|
||||
if len(repres_out) > 0:
|
||||
output_dict[subset["name"]] = {"version": version_sel,
|
||||
"representaions": repres_out}
|
||||
|
||||
return output_dict
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ def install():
|
|||
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
|
||||
|
||||
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
workfile_settings = lib.WorkfileSettings()
|
||||
# Disable all families except for the ones we explicitly want to see
|
||||
family_states = [
|
||||
"write",
|
||||
|
|
@ -121,7 +121,7 @@ def install():
|
|||
nuke.addOnCreate(launch_workfiles_app, nodeClass="Root")
|
||||
|
||||
# Set context settings.
|
||||
nuke.addOnCreate(lib.set_context_settings, nodeClass="Root")
|
||||
nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root")
|
||||
|
||||
menu.install()
|
||||
|
||||
|
|
|
|||
888
pype/nuke/lib.py
|
|
@ -9,7 +9,7 @@ log = Logger().get_logger(__name__, "nuke")
|
|||
def install():
|
||||
menubar = nuke.menu("Nuke")
|
||||
menu = menubar.findItem(Session["AVALON_LABEL"])
|
||||
|
||||
workfile_settings = lib.WorkfileSettings()
|
||||
# replace reset resolution from avalon core to pype's
|
||||
name = "Reset Resolution"
|
||||
new_name = "Set Resolution"
|
||||
|
|
@ -20,7 +20,7 @@ def install():
|
|||
log.debug("Changing Item: {}".format(rm_item))
|
||||
# rm_item[1].setEnabled(False)
|
||||
menu.removeItem(rm_item[1].name())
|
||||
menu.addCommand(new_name, lib.reset_resolution, index=(rm_item[0]))
|
||||
menu.addCommand(new_name, workfile_settings.reset_resolution, index=(rm_item[0]))
|
||||
|
||||
# replace reset frame range from avalon core to pype's
|
||||
name = "Reset Frame Range"
|
||||
|
|
@ -31,20 +31,28 @@ def install():
|
|||
log.debug("Changing Item: {}".format(rm_item))
|
||||
# rm_item[1].setEnabled(False)
|
||||
menu.removeItem(rm_item[1].name())
|
||||
menu.addCommand(new_name, lib.reset_frame_range_handles, index=(rm_item[0]))
|
||||
menu.addCommand(new_name, workfile_settings.reset_frame_range_handles, index=(rm_item[0]))
|
||||
|
||||
# add colorspace menu item
|
||||
name = "Set colorspace"
|
||||
menu.addCommand(
|
||||
name, lib.set_colorspace,
|
||||
name, workfile_settings.set_colorspace,
|
||||
index=(rm_item[0]+2)
|
||||
)
|
||||
log.debug("Adding menu item: {}".format(name))
|
||||
|
||||
# add workfile builder menu item
|
||||
name = "Build First Workfile.."
|
||||
menu.addCommand(
|
||||
name, lib.BuildWorkfile().process,
|
||||
index=(rm_item[0]+7)
|
||||
)
|
||||
log.debug("Adding menu item: {}".format(name))
|
||||
|
||||
# add item that applies all setting above
|
||||
name = "Apply all settings"
|
||||
menu.addCommand(
|
||||
name, lib.set_context_settings, index=(rm_item[0]+3)
|
||||
name, workfile_settings.set_context_settings, index=(rm_item[0]+3)
|
||||
)
|
||||
log.debug("Adding menu item: {}".format(name))
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
import os
|
||||
from pypeapp import Logger
|
||||
import hiero
|
||||
from avalon.tools import workfiles
|
||||
from avalon import api as avalon
|
||||
from pyblish import api as pyblish
|
||||
|
||||
|
|
|
|||
|
|
@ -189,7 +189,7 @@ def add_submission():
|
|||
|
||||
class PublishAction(QtWidgets.QAction):
|
||||
"""
|
||||
Action with is showing as menu item
|
||||
Action with is showing as menu item
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
|
|
@ -287,3 +287,59 @@ def _show_no_gui():
|
|||
|
||||
messagebox.setStandardButtons(messagebox.Ok)
|
||||
messagebox.exec_()
|
||||
|
||||
|
||||
def CreateNukeWorkfile(nodes=None,
|
||||
nodes_effects=None,
|
||||
to_timeline=False,
|
||||
**kwargs):
|
||||
''' Creating nuke workfile with particular version with given nodes
|
||||
Also it is creating timeline track items as precomps.
|
||||
|
||||
Arguments:
|
||||
nodes(list of dict): each key in dict is knob order is important
|
||||
to_timeline(type): will build trackItem with metadata
|
||||
|
||||
Returns:
|
||||
bool: True if done
|
||||
|
||||
Raises:
|
||||
Exception: with traceback
|
||||
|
||||
'''
|
||||
import hiero.core
|
||||
from avalon.nuke import imprint
|
||||
from pype.nuke import (
|
||||
lib as nklib
|
||||
)
|
||||
|
||||
# check if the file exists if does then Raise "File exists!"
|
||||
if os.path.exists(filepath):
|
||||
raise FileExistsError("File already exists: `{}`".format(filepath))
|
||||
|
||||
# if no representations matching then
|
||||
# Raise "no representations to be build"
|
||||
if len(representations) == 0:
|
||||
raise AttributeError("Missing list of `representations`")
|
||||
|
||||
# check nodes input
|
||||
if len(nodes) == 0:
|
||||
log.warning("Missing list of `nodes`")
|
||||
|
||||
# create temp nk file
|
||||
nuke_script = hiero.core.nuke.ScriptWriter()
|
||||
|
||||
# create root node and save all metadata
|
||||
root_node = hiero.core.nuke.RootNode()
|
||||
|
||||
root_path = os.environ["AVALON_PROJECTS"]
|
||||
|
||||
nuke_script.addNode(root_node)
|
||||
|
||||
# here to call pype.nuke.lib.BuildWorkfile
|
||||
script_builder = nklib.BuildWorkfile(
|
||||
root_node=root_node,
|
||||
root_path=root_path,
|
||||
nodes=nuke_script.getNodes(),
|
||||
**kwargs
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,19 +1,22 @@
|
|||
"""Host API required Work Files tool"""
|
||||
import os
|
||||
|
||||
import hiero
|
||||
|
||||
from avalon import api
|
||||
|
||||
|
||||
def file_extensions():
|
||||
return [".hrox"]
|
||||
|
||||
|
||||
def has_unsaved_changes():
|
||||
return hiero.core.projects()[-1]
|
||||
# There are no methods for querying unsaved changes to a project, so
|
||||
# enforcing to always save.
|
||||
return True
|
||||
|
||||
|
||||
def save(filepath):
|
||||
project = hiero.core.projects()[-1]
|
||||
|
||||
if project:
|
||||
project.saveAs(filepath)
|
||||
else:
|
||||
|
|
@ -22,40 +25,20 @@ def save(filepath):
|
|||
|
||||
|
||||
def open(filepath):
|
||||
try:
|
||||
hiero.core.openProject(filepath)
|
||||
return True
|
||||
except Exception as e:
|
||||
try:
|
||||
from PySide.QtGui import *
|
||||
from PySide.QtCore import *
|
||||
except:
|
||||
from PySide2.QtGui import *
|
||||
from PySide2.QtWidgets import *
|
||||
from PySide2.QtCore import *
|
||||
|
||||
prompt = "Cannot open the selected file: `{}`".format(e)
|
||||
hiero.core.log.error(prompt)
|
||||
dialog = QMessageBox.critical(
|
||||
hiero.ui.mainWindow(), "Error", unicode(prompt))
|
||||
hiero.core.openProject(filepath)
|
||||
return True
|
||||
|
||||
|
||||
def current_file():
|
||||
import os
|
||||
import hiero
|
||||
|
||||
current_file = hiero.core.projects()[-1].path()
|
||||
normalised = os.path.normpath(current_file)
|
||||
|
||||
# Unsaved current file
|
||||
if normalised is '':
|
||||
return "NOT SAVED"
|
||||
if normalised == "":
|
||||
return None
|
||||
|
||||
return normalised
|
||||
|
||||
|
||||
|
||||
def work_root():
|
||||
from avalon import api
|
||||
|
||||
return os.path.normpath(api.Session["AVALON_WORKDIR"]).replace("\\", "/")
|
||||
|
|
|
|||
|
|
@ -2,13 +2,54 @@ import tempfile
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
from pypeapp import config
|
||||
import inspect
|
||||
|
||||
ValidatePipelineOrder = pyblish.api.ValidatorOrder + 0.05
|
||||
ValidateContentsOrder = pyblish.api.ValidatorOrder + 0.1
|
||||
ValidateSceneOrder = pyblish.api.ValidatorOrder + 0.2
|
||||
ValidateMeshOrder = pyblish.api.ValidatorOrder + 0.3
|
||||
|
||||
|
||||
class Extractor(pyblish.api.InstancePlugin):
|
||||
def imprint_attributes(plugin):
|
||||
"""
|
||||
Load presets by class and set them as attributes (if found)
|
||||
|
||||
:param plugin: plugin instance
|
||||
:type plugin: instance
|
||||
"""
|
||||
file = inspect.getfile(plugin.__class__)
|
||||
file = os.path.normpath(file)
|
||||
plugin_kind = file.split(os.path.sep)[-2:-1][0]
|
||||
plugin_host = file.split(os.path.sep)[-3:-2][0]
|
||||
plugin_name = type(plugin).__name__
|
||||
try:
|
||||
config_data = config.get_presets()['plugins'][plugin_host][plugin_kind][plugin_name] # noqa: E501
|
||||
except KeyError:
|
||||
print("preset not found")
|
||||
return
|
||||
|
||||
for option, value in config_data.items():
|
||||
if option == "enabled" and value is False:
|
||||
setattr(plugin, "active", False)
|
||||
else:
|
||||
setattr(plugin, option, value)
|
||||
print("setting {}: {} on {}".format(option, value, plugin_name))
|
||||
|
||||
|
||||
class ContextPlugin(pyblish.api.ContextPlugin):
|
||||
def process(cls, *args, **kwargs):
|
||||
imprint_attributes(cls)
|
||||
super(ContextPlugin, cls).process(cls, *args, **kwargs)
|
||||
|
||||
|
||||
class InstancePlugin(pyblish.api.InstancePlugin):
|
||||
def process(cls, *args, **kwargs):
|
||||
imprint_attributes(cls)
|
||||
super(ContextPlugin, cls).process(cls, *args, **kwargs)
|
||||
|
||||
|
||||
class Extractor(InstancePlugin):
|
||||
"""Extractor base class.
|
||||
|
||||
The extractor base class implements a "staging_dir" function used to
|
||||
|
|
|
|||
|
|
@ -106,11 +106,11 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
|
|||
tasks_to_create = []
|
||||
for child in entity['children']:
|
||||
if child.entity_type.lower() == 'task':
|
||||
existing_tasks.append(child['name'])
|
||||
existing_tasks.append(child['name'].lower())
|
||||
# existing_tasks.append(child['type']['name'])
|
||||
|
||||
for task in tasks:
|
||||
if task in existing_tasks:
|
||||
if task.lower() in existing_tasks:
|
||||
print("Task {} already exists".format(task))
|
||||
continue
|
||||
tasks_to_create.append(task)
|
||||
|
|
|
|||
|
|
@ -94,7 +94,8 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
|
||||
args = [executable, scriptpath, json_data]
|
||||
self.log.debug("Executing: {}".format(args))
|
||||
pype.api.subprocess(args)
|
||||
output = pype.api.subprocess(args)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
repre_update = {
|
||||
"files": movieFileBurnin,
|
||||
|
|
|
|||
|
|
@ -2,11 +2,11 @@ import pyblish.api
|
|||
from avalon import io
|
||||
|
||||
|
||||
class IntegrateHierarchyToAvalon(pyblish.api.ContextPlugin):
|
||||
class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
||||
"""Create entities in Avalon based on collected data."""
|
||||
|
||||
order = pyblish.api.IntegratorOrder - 0.1
|
||||
label = "Integrate Hierarchy To Avalon"
|
||||
order = pyblish.api.ExtractorOrder - 0.01
|
||||
label = "Extract Hierarchy To Avalon"
|
||||
families = ["clip", "shot"]
|
||||
|
||||
def process(self, context):
|
||||
|
|
@ -170,8 +170,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
subprcs_cmd = " ".join(mov_args)
|
||||
|
||||
# run subprocess
|
||||
self.log.debug("{}".format(subprcs_cmd))
|
||||
pype.api.subprocess(subprcs_cmd)
|
||||
self.log.debug("Executing: {}".format(subprcs_cmd))
|
||||
output = pype.api.subprocess(subprcs_cmd)
|
||||
self.log.debug("Output: {}".format(output))
|
||||
|
||||
# create representation data
|
||||
repre_new.update({
|
||||
|
|
|
|||
|
|
@ -63,6 +63,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"rig",
|
||||
"plate",
|
||||
"look",
|
||||
"lut",
|
||||
"audio"
|
||||
]
|
||||
exclude_families = ["clip"]
|
||||
|
|
|
|||
|
|
@ -231,28 +231,24 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
# Get a submission job
|
||||
data = instance.data.copy()
|
||||
job = instance.data.get("deadlineSubmissionJob")
|
||||
render_job = data.pop("deadlineSubmissionJob")
|
||||
submission_type = "deadline"
|
||||
|
||||
if not job:
|
||||
if not render_job:
|
||||
# No deadline job. Try Muster: musterSubmissionJob
|
||||
job = data.pop("musterSubmissionJob")
|
||||
render_job = data.pop("musterSubmissionJob")
|
||||
submission_type = "muster"
|
||||
if not job:
|
||||
if not render_job:
|
||||
raise RuntimeError("Can't continue without valid Deadline "
|
||||
"or Muster submission prior to this "
|
||||
"plug-in.")
|
||||
|
||||
if submission_type == "deadline":
|
||||
render_job = data.pop("deadlineSubmissionJob")
|
||||
self.DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
|
||||
"http://localhost:8082")
|
||||
assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
|
||||
|
||||
self._submit_deadline_post_job(instance, job)
|
||||
|
||||
if submission_type == "muster":
|
||||
render_job = data.pop("musterSubmissionJob")
|
||||
self._submit_deadline_post_job(instance, render_job)
|
||||
|
||||
asset = data.get("asset") or api.Session["AVALON_ASSET"]
|
||||
subset = data["subset"]
|
||||
|
|
|
|||
|
|
@ -7,15 +7,30 @@ import pyblish.api
|
|||
import pype.maya.lib as lib
|
||||
import appdirs
|
||||
import platform
|
||||
from pypeapp import config
|
||||
|
||||
|
||||
# mapping between Maya rendere names and Muster template names
|
||||
muster_maya_mapping = {
|
||||
"arnold": "Maya Arnold",
|
||||
"mentalray": "Maya Mr",
|
||||
"renderman": "Maya Renderman",
|
||||
"redshift": "Maya Redshift"
|
||||
}
|
||||
# mapping between Maya renderer names and Muster template ids
|
||||
def _get_template_id(renderer):
|
||||
"""
|
||||
Return muster template ID based on renderer name.
|
||||
|
||||
:param renderer: renderer name
|
||||
:type renderer: str
|
||||
:returns: muster template id
|
||||
:rtype: int
|
||||
"""
|
||||
|
||||
templates = config.get_presets()["muster"]["templates_mapping"]
|
||||
if not templates:
|
||||
raise RuntimeError(("Muster template mapping missing in pype-config "
|
||||
"`presets/muster/templates_mapping.json`"))
|
||||
try:
|
||||
template_id = templates[renderer]
|
||||
except KeyError:
|
||||
raise RuntimeError("Unmapped renderer - missing template id")
|
||||
|
||||
return template_id
|
||||
|
||||
|
||||
def _get_script():
|
||||
|
|
@ -213,12 +228,10 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
:rtype: int
|
||||
:raises: Exception if template ID isn't found
|
||||
"""
|
||||
try:
|
||||
self.log.info("Trying to find template for [{}]".format(renderer))
|
||||
mapped = muster_maya_mapping.get(renderer)
|
||||
return self._templates.get(mapped)
|
||||
except ValueError:
|
||||
raise Exception('Unimplemented renderer {}'.format(renderer))
|
||||
self.log.info("Trying to find template for [{}]".format(renderer))
|
||||
mapped = _get_template_id(renderer)
|
||||
self.log.info("got id [{}]".format(mapped))
|
||||
return self._templates.get(mapped)
|
||||
|
||||
def _submit(self, payload):
|
||||
"""
|
||||
|
|
@ -253,15 +266,15 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
self.MUSTER_REST_URL = os.environ.get("MUSTER_REST_URL")
|
||||
|
||||
if self.MUSTER_REST_URL is None:
|
||||
self.log.debug(
|
||||
self.log.error(
|
||||
"\"MUSTER_REST_URL\" is not found. Skipping "
|
||||
"\"{}\".".format(instance)
|
||||
"[{}]".format(instance)
|
||||
)
|
||||
return
|
||||
raise RuntimeError("MUSTER_REST_URL not set")
|
||||
|
||||
self._load_credentials()
|
||||
self._authenticate()
|
||||
self._get_templates()
|
||||
# self._get_templates()
|
||||
|
||||
context = instance.context
|
||||
workspace = context.data["workspaceDir"]
|
||||
|
|
@ -349,7 +362,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
|
|||
"platform": 0,
|
||||
"job": {
|
||||
"jobName": jobname,
|
||||
"templateId": self._resolve_template(
|
||||
"templateId": self._get_template_id(
|
||||
instance.data["renderer"]),
|
||||
"chunksInterleave": 2,
|
||||
"chunksPriority": "0",
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
|
|||
label = "Attributes"
|
||||
hosts = ["maya"]
|
||||
actions = [pype.api.RepairContextAction]
|
||||
optional = True
|
||||
|
||||
def process(self, context):
|
||||
# Check for preset existence.
|
||||
|
|
@ -74,8 +75,12 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
|
|||
|
||||
presets_to_validate = attributes[name]
|
||||
for attribute in node.listAttr():
|
||||
if attribute.attrName() in presets_to_validate:
|
||||
expected = presets_to_validate[attribute.attrName()]
|
||||
names = [attribute.shortName(), attribute.longName()]
|
||||
attribute_name = list(
|
||||
set(names) & set(presets_to_validate.keys())
|
||||
)
|
||||
if attribute_name:
|
||||
expected = presets_to_validate[attribute_name[0]]
|
||||
if attribute.get() != expected:
|
||||
invalid_attributes.append(
|
||||
{
|
||||
|
|
|
|||
|
|
@ -75,11 +75,11 @@ class ValidateLookSets(pyblish.api.InstancePlugin):
|
|||
if missing_sets:
|
||||
for set in missing_sets:
|
||||
if '_SET' not in set:
|
||||
# A set of this node is not coming along, this is wrong!
|
||||
cls.log.error("Missing sets '{}' for node "
|
||||
"'{}'".format(missing_sets, node))
|
||||
invalid.append(node)
|
||||
continue
|
||||
# A set of this node is not coming along, this is wrong!
|
||||
cls.log.error("Missing sets '{}' for node "
|
||||
"'{}'".format(missing_sets, node))
|
||||
invalid.append(node)
|
||||
continue
|
||||
|
||||
# Ensure the node is in the sets that are collected
|
||||
for shaderset, data in relationships.items():
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ def is_subdir(path, root_dir):
|
|||
root_dir = os.path.realpath(root_dir)
|
||||
|
||||
# If not on same drive
|
||||
if os.path.splitdrive(path)[0] != os.path.splitdrive(root_dir)[0]:
|
||||
if os.path.splitdrive(path)[0].lower() != os.path.splitdrive(root_dir)[0].lower(): # noqa: E501
|
||||
return False
|
||||
|
||||
# Get 'relative path' (can contain ../ which means going up)
|
||||
|
|
|
|||
|
|
@ -69,9 +69,7 @@ class CreateWriteRender(avalon.nuke.Creator):
|
|||
write_data.update({
|
||||
"fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}"})
|
||||
|
||||
create_write_node(self.data["subset"], write_data)
|
||||
|
||||
return
|
||||
return create_write_node(self.data["subset"], write_data)
|
||||
|
||||
|
||||
class CreateWritePrerender(avalon.nuke.Creator):
|
||||
|
|
|
|||
317
pype/plugins/nuke/load/load_luts.py
Normal file
|
|
@ -0,0 +1,317 @@
|
|||
from avalon import api, style, io
|
||||
import nuke
|
||||
import json
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
class LoadLuts(api.Loader):
|
||||
"""Loading colorspace soft effect exported from nukestudio"""
|
||||
|
||||
representations = ["lutJson"]
|
||||
families = ["lut"]
|
||||
|
||||
label = "Load Luts - nodes"
|
||||
order = 0
|
||||
icon = "cc"
|
||||
color = style.colors.light
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
"""
|
||||
Loading function to get the soft effects to particular read node
|
||||
|
||||
Arguments:
|
||||
context (dict): context of version
|
||||
name (str): name of the version
|
||||
namespace (str): asset name
|
||||
data (dict): compulsory attribute > not used
|
||||
|
||||
Returns:
|
||||
nuke node: containerised nuke node object
|
||||
"""
|
||||
# import dependencies
|
||||
from avalon.nuke import containerise
|
||||
|
||||
# get main variables
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
vname = version.get("name", None)
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
namespace = namespace or context['asset']['name']
|
||||
colorspace = version_data.get("colorspace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
# prepare data for imprinting
|
||||
# add additional metadata from the version to imprint to Avalon knob
|
||||
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
|
||||
"source", "author", "fps"]
|
||||
|
||||
data_imprint = {"frameStart": first,
|
||||
"frameEnd": last,
|
||||
"version": vname,
|
||||
"colorspaceInput": colorspace,
|
||||
"objectName": object_name}
|
||||
|
||||
for k in add_keys:
|
||||
data_imprint.update({k: version_data[k]})
|
||||
|
||||
# getting file path
|
||||
file = self.fname.replace("\\", "/")
|
||||
|
||||
# getting data from json file with unicode conversion
|
||||
with open(file, "r") as f:
|
||||
json_f = {self.byteify(key): self.byteify(value)
|
||||
for key, value in json.load(f).iteritems()}
|
||||
|
||||
# get correct order of nodes by positions on track and subtrack
|
||||
nodes_order = self.reorder_nodes(json_f["effects"])
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
GN = nuke.createNode("Group")
|
||||
|
||||
GN["name"].setValue(object_name)
|
||||
|
||||
# adding content to the group node
|
||||
with GN:
|
||||
pre_node = nuke.createNode("Input")
|
||||
pre_node["name"].setValue("rgb")
|
||||
|
||||
for ef_name, ef_val in nodes_order.items():
|
||||
node = nuke.createNode(ef_val["class"])
|
||||
for k, v in ef_val["node"].items():
|
||||
if isinstance(v, list) and len(v) > 4:
|
||||
node[k].setAnimated()
|
||||
for i, value in enumerate(v):
|
||||
if isinstance(value, list):
|
||||
for ci, cv in enumerate(value):
|
||||
node[k].setValueAt(
|
||||
cv,
|
||||
(workfile_first_frame + i),
|
||||
ci)
|
||||
else:
|
||||
node[k].setValueAt(
|
||||
value,
|
||||
(workfile_first_frame + i))
|
||||
else:
|
||||
node[k].setValue(v)
|
||||
node.setInput(0, pre_node)
|
||||
pre_node = node
|
||||
|
||||
output = nuke.createNode("Output")
|
||||
output.setInput(0, pre_node)
|
||||
|
||||
# try to find parent read node
|
||||
self.connect_read_node(GN, namespace, json_f["assignTo"])
|
||||
|
||||
GN["tile_color"].setValue(int("0x3469ffff", 16))
|
||||
|
||||
self.log.info("Loaded lut setup: `{}`".format(GN["name"].value()))
|
||||
|
||||
return containerise(
|
||||
node=GN,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
loader=self.__class__.__name__,
|
||||
data=data_imprint)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update the Loader's path
|
||||
|
||||
Nuke automatically tries to reset some variables when changing
|
||||
the loader's path to a new file. These automatic changes are to its
|
||||
inputs:
|
||||
|
||||
"""
|
||||
|
||||
from avalon.nuke import (
|
||||
update_container
|
||||
)
|
||||
# get main variables
|
||||
# Get version from io
|
||||
version = io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
# get corresponding node
|
||||
GN = nuke.toNode(container['objectName'])
|
||||
|
||||
file = api.get_representation_path(representation).replace("\\", "/")
|
||||
name = container['name']
|
||||
version_data = version.get("data", {})
|
||||
vname = version.get("name", None)
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
namespace = container['namespace']
|
||||
colorspace = version_data.get("colorspace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
|
||||
"source", "author", "fps"]
|
||||
|
||||
data_imprint = {"representation": str(representation["_id"]),
|
||||
"frameStart": first,
|
||||
"frameEnd": last,
|
||||
"version": vname,
|
||||
"colorspaceInput": colorspace,
|
||||
"objectName": object_name}
|
||||
|
||||
for k in add_keys:
|
||||
data_imprint.update({k: version_data[k]})
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
GN,
|
||||
data_imprint
|
||||
)
|
||||
|
||||
# getting data from json file with unicode conversion
|
||||
with open(file, "r") as f:
|
||||
json_f = {self.byteify(key): self.byteify(value)
|
||||
for key, value in json.load(f).iteritems()}
|
||||
|
||||
# get correct order of nodes by positions on track and subtrack
|
||||
nodes_order = self.reorder_nodes(json_f["effects"])
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
# adding content to the group node
|
||||
with GN:
|
||||
# first remove all nodes
|
||||
[nuke.delete(n) for n in nuke.allNodes()]
|
||||
|
||||
# create input node
|
||||
pre_node = nuke.createNode("Input")
|
||||
pre_node["name"].setValue("rgb")
|
||||
|
||||
for ef_name, ef_val in nodes_order.items():
|
||||
node = nuke.createNode(ef_val["class"])
|
||||
for k, v in ef_val["node"].items():
|
||||
if isinstance(v, list) and len(v) > 3:
|
||||
node[k].setAnimated()
|
||||
for i, value in enumerate(v):
|
||||
if isinstance(value, list):
|
||||
for ci, cv in enumerate(value):
|
||||
node[k].setValueAt(
|
||||
cv,
|
||||
(workfile_first_frame + i),
|
||||
ci)
|
||||
else:
|
||||
node[k].setValueAt(
|
||||
value,
|
||||
(workfile_first_frame + i))
|
||||
else:
|
||||
node[k].setValue(v)
|
||||
node.setInput(0, pre_node)
|
||||
pre_node = node
|
||||
|
||||
# create output node
|
||||
output = nuke.createNode("Output")
|
||||
output.setInput(0, pre_node)
|
||||
|
||||
# try to find parent read node
|
||||
self.connect_read_node(GN, namespace, json_f["assignTo"])
|
||||
|
||||
# get all versions in list
|
||||
versions = io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
|
||||
max_version = max(versions)
|
||||
|
||||
# change color of node
|
||||
if version.get("name") not in [max_version]:
|
||||
GN["tile_color"].setValue(int("0xd84f20ff", 16))
|
||||
else:
|
||||
GN["tile_color"].setValue(int("0x3469ffff", 16))
|
||||
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
|
||||
def connect_read_node(self, group_node, asset, subset):
|
||||
"""
|
||||
Finds read node and selects it
|
||||
|
||||
Arguments:
|
||||
asset (str): asset name
|
||||
|
||||
Returns:
|
||||
nuke node: node is selected
|
||||
None: if nothing found
|
||||
"""
|
||||
search_name = "{0}_{1}".format(asset, subset)
|
||||
node = [n for n in nuke.allNodes() if search_name in n["name"].value()]
|
||||
if len(node) > 0:
|
||||
rn = node[0]
|
||||
else:
|
||||
rn = None
|
||||
|
||||
# Parent read node has been found
|
||||
# solving connections
|
||||
if rn:
|
||||
dep_nodes = rn.dependent()
|
||||
|
||||
if len(dep_nodes) > 0:
|
||||
for dn in dep_nodes:
|
||||
dn.setInput(0, group_node)
|
||||
|
||||
group_node.setInput(0, rn)
|
||||
group_node.autoplace()
|
||||
|
||||
def reorder_nodes(self, data):
|
||||
new_order = OrderedDict()
|
||||
trackNums = [v["trackIndex"] for k, v in data.items()]
|
||||
subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
|
||||
|
||||
for trackIndex in range(
|
||||
min(trackNums), max(trackNums) + 1):
|
||||
for subTrackIndex in range(
|
||||
min(subTrackNums), max(subTrackNums) + 1):
|
||||
item = self.get_item(data, trackIndex, subTrackIndex)
|
||||
if item is not {}:
|
||||
new_order.update(item)
|
||||
return new_order
|
||||
|
||||
def get_item(self, data, trackIndex, subTrackIndex):
|
||||
return {key: val for key, val in data.items()
|
||||
if subTrackIndex == val["subTrackIndex"]
|
||||
if trackIndex == val["trackIndex"]}
|
||||
|
||||
def byteify(self, input):
|
||||
"""
|
||||
Converts unicode strings to strings
|
||||
It goes trought all dictionary
|
||||
|
||||
Arguments:
|
||||
input (dict/str): input
|
||||
|
||||
Returns:
|
||||
dict: with fixed values and keys
|
||||
|
||||
"""
|
||||
|
||||
if isinstance(input, dict):
|
||||
return {self.byteify(key): self.byteify(value)
|
||||
for key, value in input.iteritems()}
|
||||
elif isinstance(input, list):
|
||||
return [self.byteify(element) for element in input]
|
||||
elif isinstance(input, unicode):
|
||||
return input.encode('utf-8')
|
||||
else:
|
||||
return input
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
node = nuke.toNode(container['objectName'])
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
330
pype/plugins/nuke/load/load_luts_ip.py
Normal file
|
|
@ -0,0 +1,330 @@
|
|||
from avalon import api, style, io
|
||||
import nuke
|
||||
import json
|
||||
from collections import OrderedDict
|
||||
from pype.nuke import lib
|
||||
|
||||
class LoadLutsInputProcess(api.Loader):
|
||||
"""Loading colorspace soft effect exported from nukestudio"""
|
||||
|
||||
representations = ["lutJson"]
|
||||
families = ["lut"]
|
||||
|
||||
label = "Load Luts - Input Process"
|
||||
order = 0
|
||||
icon = "eye"
|
||||
color = style.colors.alert
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
"""
|
||||
Loading function to get the soft effects to particular read node
|
||||
|
||||
Arguments:
|
||||
context (dict): context of version
|
||||
name (str): name of the version
|
||||
namespace (str): asset name
|
||||
data (dict): compulsory attribute > not used
|
||||
|
||||
Returns:
|
||||
nuke node: containerised nuke node object
|
||||
"""
|
||||
# import dependencies
|
||||
from avalon.nuke import containerise
|
||||
|
||||
# get main variables
|
||||
version = context['version']
|
||||
version_data = version.get("data", {})
|
||||
vname = version.get("name", None)
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
namespace = namespace or context['asset']['name']
|
||||
colorspace = version_data.get("colorspace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
# prepare data for imprinting
|
||||
# add additional metadata from the version to imprint to Avalon knob
|
||||
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
|
||||
"source", "author", "fps"]
|
||||
|
||||
data_imprint = {"frameStart": first,
|
||||
"frameEnd": last,
|
||||
"version": vname,
|
||||
"colorspaceInput": colorspace,
|
||||
"objectName": object_name}
|
||||
|
||||
for k in add_keys:
|
||||
data_imprint.update({k: version_data[k]})
|
||||
|
||||
# getting file path
|
||||
file = self.fname.replace("\\", "/")
|
||||
|
||||
# getting data from json file with unicode conversion
|
||||
with open(file, "r") as f:
|
||||
json_f = {self.byteify(key): self.byteify(value)
|
||||
for key, value in json.load(f).iteritems()}
|
||||
|
||||
# get correct order of nodes by positions on track and subtrack
|
||||
nodes_order = self.reorder_nodes(json_f["effects"])
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
GN = nuke.createNode("Group")
|
||||
|
||||
GN["name"].setValue(object_name)
|
||||
|
||||
# adding content to the group node
|
||||
with GN:
|
||||
pre_node = nuke.createNode("Input")
|
||||
pre_node["name"].setValue("rgb")
|
||||
|
||||
for ef_name, ef_val in nodes_order.items():
|
||||
node = nuke.createNode(ef_val["class"])
|
||||
for k, v in ef_val["node"].items():
|
||||
if isinstance(v, list) and len(v) > 4:
|
||||
node[k].setAnimated()
|
||||
for i, value in enumerate(v):
|
||||
if isinstance(value, list):
|
||||
for ci, cv in enumerate(value):
|
||||
node[k].setValueAt(
|
||||
cv,
|
||||
(workfile_first_frame + i),
|
||||
ci)
|
||||
else:
|
||||
node[k].setValueAt(
|
||||
value,
|
||||
(workfile_first_frame + i))
|
||||
else:
|
||||
node[k].setValue(v)
|
||||
node.setInput(0, pre_node)
|
||||
pre_node = node
|
||||
|
||||
output = nuke.createNode("Output")
|
||||
output.setInput(0, pre_node)
|
||||
|
||||
# try to place it under Viewer1
|
||||
if not self.connect_active_viewer(GN):
|
||||
nuke.delete(GN)
|
||||
return
|
||||
|
||||
GN["tile_color"].setValue(int("0x3469ffff", 16))
|
||||
|
||||
self.log.info("Loaded lut setup: `{}`".format(GN["name"].value()))
|
||||
|
||||
return containerise(
|
||||
node=GN,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
context=context,
|
||||
loader=self.__class__.__name__,
|
||||
data=data_imprint)
|
||||
|
||||
def update(self, container, representation):
|
||||
"""Update the Loader's path
|
||||
|
||||
Nuke automatically tries to reset some variables when changing
|
||||
the loader's path to a new file. These automatic changes are to its
|
||||
inputs:
|
||||
|
||||
"""
|
||||
|
||||
from avalon.nuke import (
|
||||
update_container
|
||||
)
|
||||
# get main variables
|
||||
# Get version from io
|
||||
version = io.find_one({
|
||||
"type": "version",
|
||||
"_id": representation["parent"]
|
||||
})
|
||||
# get corresponding node
|
||||
GN = nuke.toNode(container['objectName'])
|
||||
|
||||
file = api.get_representation_path(representation).replace("\\", "/")
|
||||
name = container['name']
|
||||
version_data = version.get("data", {})
|
||||
vname = version.get("name", None)
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
workfile_first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
namespace = container['namespace']
|
||||
colorspace = version_data.get("colorspace", None)
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
|
||||
"source", "author", "fps"]
|
||||
|
||||
data_imprint = {"representation": str(representation["_id"]),
|
||||
"frameStart": first,
|
||||
"frameEnd": last,
|
||||
"version": vname,
|
||||
"colorspaceInput": colorspace,
|
||||
"objectName": object_name}
|
||||
|
||||
for k in add_keys:
|
||||
data_imprint.update({k: version_data[k]})
|
||||
|
||||
# Update the imprinted representation
|
||||
update_container(
|
||||
GN,
|
||||
data_imprint
|
||||
)
|
||||
|
||||
# getting data from json file with unicode conversion
|
||||
with open(file, "r") as f:
|
||||
json_f = {self.byteify(key): self.byteify(value)
|
||||
for key, value in json.load(f).iteritems()}
|
||||
|
||||
# get correct order of nodes by positions on track and subtrack
|
||||
nodes_order = self.reorder_nodes(json_f["effects"])
|
||||
|
||||
# adding nodes to node graph
|
||||
# just in case we are in group lets jump out of it
|
||||
nuke.endGroup()
|
||||
|
||||
# adding content to the group node
|
||||
with GN:
|
||||
# first remove all nodes
|
||||
[nuke.delete(n) for n in nuke.allNodes()]
|
||||
|
||||
# create input node
|
||||
pre_node = nuke.createNode("Input")
|
||||
pre_node["name"].setValue("rgb")
|
||||
|
||||
for ef_name, ef_val in nodes_order.items():
|
||||
node = nuke.createNode(ef_val["class"])
|
||||
for k, v in ef_val["node"].items():
|
||||
if isinstance(v, list) and len(v) > 3:
|
||||
node[k].setAnimated()
|
||||
for i, value in enumerate(v):
|
||||
if isinstance(value, list):
|
||||
for ci, cv in enumerate(value):
|
||||
node[k].setValueAt(
|
||||
cv,
|
||||
(workfile_first_frame + i),
|
||||
ci)
|
||||
else:
|
||||
node[k].setValueAt(
|
||||
value,
|
||||
(workfile_first_frame + i))
|
||||
else:
|
||||
node[k].setValue(v)
|
||||
node.setInput(0, pre_node)
|
||||
pre_node = node
|
||||
|
||||
# create output node
|
||||
output = nuke.createNode("Output")
|
||||
output.setInput(0, pre_node)
|
||||
|
||||
# try to place it under Viewer1
|
||||
if not self.connect_active_viewer(GN):
|
||||
nuke.delete(GN)
|
||||
return
|
||||
|
||||
# get all versions in list
|
||||
versions = io.find({
|
||||
"type": "version",
|
||||
"parent": version["parent"]
|
||||
}).distinct('name')
|
||||
|
||||
max_version = max(versions)
|
||||
|
||||
# change color of node
|
||||
if version.get("name") not in [max_version]:
|
||||
GN["tile_color"].setValue(int("0xd84f20ff", 16))
|
||||
else:
|
||||
GN["tile_color"].setValue(int("0x3469ffff", 16))
|
||||
|
||||
self.log.info("udated to version: {}".format(version.get("name")))
|
||||
|
||||
def connect_active_viewer(self, group_node):
|
||||
"""
|
||||
Finds Active viewer and
|
||||
place the node under it, also adds
|
||||
name of group into Input Process of the viewer
|
||||
|
||||
Arguments:
|
||||
group_node (nuke node): nuke group node object
|
||||
|
||||
"""
|
||||
group_node_name = group_node["name"].value()
|
||||
|
||||
viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
|
||||
if len(viewer) > 0:
|
||||
viewer = viewer[0]
|
||||
else:
|
||||
self.log.error("Please create Viewer node before you run this action again")
|
||||
return None
|
||||
|
||||
# get coordinates of Viewer1
|
||||
xpos = viewer["xpos"].value()
|
||||
ypos = viewer["ypos"].value()
|
||||
|
||||
ypos += 150
|
||||
|
||||
viewer["ypos"].setValue(ypos)
|
||||
|
||||
# set coordinates to group node
|
||||
group_node["xpos"].setValue(xpos)
|
||||
group_node["ypos"].setValue(ypos + 50)
|
||||
|
||||
# add group node name to Viewer Input Process
|
||||
viewer["input_process_node"].setValue(group_node_name)
|
||||
|
||||
# put backdrop under
|
||||
lib.create_backdrop(label="Input Process", layer=2, nodes=[viewer, group_node], color="0x7c7faaff")
|
||||
|
||||
return True
|
||||
|
||||
def reorder_nodes(self, data):
|
||||
new_order = OrderedDict()
|
||||
trackNums = [v["trackIndex"] for k, v in data.items()]
|
||||
subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
|
||||
|
||||
for trackIndex in range(
|
||||
min(trackNums), max(trackNums) + 1):
|
||||
for subTrackIndex in range(
|
||||
min(subTrackNums), max(subTrackNums) + 1):
|
||||
item = self.get_item(data, trackIndex, subTrackIndex)
|
||||
if item is not {}:
|
||||
new_order.update(item)
|
||||
return new_order
|
||||
|
||||
def get_item(self, data, trackIndex, subTrackIndex):
|
||||
return {key: val for key, val in data.items()
|
||||
if subTrackIndex == val["subTrackIndex"]
|
||||
if trackIndex == val["trackIndex"]}
|
||||
|
||||
def byteify(self, input):
|
||||
"""
|
||||
Converts unicode strings to strings
|
||||
It goes trought all dictionary
|
||||
|
||||
Arguments:
|
||||
input (dict/str): input
|
||||
|
||||
Returns:
|
||||
dict: with fixed values and keys
|
||||
|
||||
"""
|
||||
|
||||
if isinstance(input, dict):
|
||||
return {self.byteify(key): self.byteify(value)
|
||||
for key, value in input.iteritems()}
|
||||
elif isinstance(input, list):
|
||||
return [self.byteify(element) for element in input]
|
||||
elif isinstance(input, unicode):
|
||||
return input.encode('utf-8')
|
||||
else:
|
||||
return input
|
||||
|
||||
def switch(self, container, representation):
|
||||
self.update(container, representation)
|
||||
|
||||
def remove(self, container):
|
||||
from avalon.nuke import viewer_update_and_undo_stop
|
||||
node = nuke.toNode(container['objectName'])
|
||||
with viewer_update_and_undo_stop():
|
||||
nuke.delete(node)
|
||||
|
|
@ -101,7 +101,8 @@ class LoadMov(api.Loader):
|
|||
handles = version_data.get("handles", None)
|
||||
handle_start = version_data.get("handleStart", None)
|
||||
handle_end = version_data.get("handleEnd", None)
|
||||
|
||||
repr_cont = context["representation"]["context"]
|
||||
|
||||
# fix handle start and end if none are available
|
||||
if not handle_start and not handle_end:
|
||||
handle_start = handles
|
||||
|
|
@ -119,9 +120,11 @@ class LoadMov(api.Loader):
|
|||
file = self.fname.replace("\\", "/")
|
||||
log.info("file: {}\n".format(self.fname))
|
||||
|
||||
read_name = "Read"
|
||||
read_name += '_' + context["representation"]["context"]["subset"]
|
||||
read_name += '_' + context["representation"]["name"]
|
||||
read_name = "Read_{0}_{1}_{2}".format(
|
||||
repr_cont["asset"],
|
||||
repr_cont["subset"],
|
||||
repr_cont["representation"])
|
||||
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
with viewer_update_and_undo_stop():
|
||||
|
|
|
|||
|
|
@ -96,6 +96,8 @@ class LoadSequence(api.Loader):
|
|||
|
||||
self.first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
|
|
@ -104,10 +106,17 @@ class LoadSequence(api.Loader):
|
|||
if namespace is None:
|
||||
namespace = context['asset']['name']
|
||||
|
||||
first -= self.handle_start
|
||||
last += self.handle_end
|
||||
|
||||
file = self.fname.replace("\\", "/")
|
||||
log.info("file: {}\n".format(self.fname))
|
||||
|
||||
read_name = "Read_" + context["representation"]["context"]["subset"]
|
||||
repr_cont = context["representation"]["context"]
|
||||
read_name = "Read_{0}_{1}_{2}".format(
|
||||
repr_cont["asset"],
|
||||
repr_cont["subset"],
|
||||
repr_cont["representation"])
|
||||
|
||||
# Create the Loader with the filename path set
|
||||
with viewer_update_and_undo_stop():
|
||||
|
|
@ -227,7 +236,8 @@ class LoadSequence(api.Loader):
|
|||
|
||||
self.first_frame = int(nuke.root()["first_frame"].getValue())
|
||||
self.handle_start = version_data.get("handleStart", 0)
|
||||
|
||||
self.handle_end = version_data.get("handleEnd", 0)
|
||||
|
||||
first = version_data.get("frameStart", None)
|
||||
last = version_data.get("frameEnd", None)
|
||||
|
||||
|
|
@ -237,6 +247,9 @@ class LoadSequence(api.Loader):
|
|||
"{} ({})".format(node['name'].value(), representation))
|
||||
first = 0
|
||||
|
||||
first -= self.handle_start
|
||||
last += self.handle_end
|
||||
|
||||
# Update the loader's path whilst preserving some values
|
||||
with preserve_trim(node):
|
||||
node["file"].setValue(file["path"])
|
||||
|
|
|
|||
|
|
@ -101,6 +101,11 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
"fps": instance.context.data["fps"]
|
||||
}
|
||||
|
||||
group_node = [x for x in instance if x.Class() == "Group"][0]
|
||||
deadlineChunkSize = 1
|
||||
if "deadlineChunkSize" in group_node.knobs():
|
||||
deadlineChunkSize = group_node["deadlineChunkSize"].value()
|
||||
|
||||
instance.data.update({
|
||||
"versionData": version_data,
|
||||
"path": path,
|
||||
|
|
@ -112,6 +117,7 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
|
|||
"frameEnd": last_frame,
|
||||
"outputType": output_type,
|
||||
"colorspace": node["colorspace"].value(),
|
||||
"deadlineChunkSize": deadlineChunkSize
|
||||
})
|
||||
|
||||
self.log.debug("instance.data: {}".format(instance.data))
|
||||
|
|
|
|||
|
|
@ -84,6 +84,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
start=int(instance.data["frameStart"]),
|
||||
end=int(instance.data["frameEnd"])
|
||||
),
|
||||
"ChunkSize": instance.data["deadlineChunkSize"],
|
||||
|
||||
"Comment": comment,
|
||||
|
||||
|
|
|
|||
42
pype/plugins/nuke/publish/validate_write_deadline_tab.py
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
import pyblish.api
|
||||
import pype.nuke.lib
|
||||
|
||||
|
||||
class RepairNukeWriteDeadlineTab(pyblish.api.Action):
|
||||
|
||||
label = "Repair"
|
||||
icon = "wrench"
|
||||
on = "failed"
|
||||
|
||||
def process(self, context, plugin):
|
||||
|
||||
# Get the errored instances
|
||||
failed = []
|
||||
for result in context.data["results"]:
|
||||
if (result["error"] is not None and result["instance"] is not None
|
||||
and result["instance"] not in failed):
|
||||
failed.append(result["instance"])
|
||||
|
||||
# Apply pyblish.logic to get the instances for the plug-in
|
||||
instances = pyblish.api.instances_by_plugin(failed, plugin)
|
||||
|
||||
for instance in instances:
|
||||
group_node = [x for x in instance if x.Class() == "Group"][0]
|
||||
pype.nuke.lib.add_deadline_tab(group_node)
|
||||
|
||||
|
||||
class ValidateNukeWriteDeadlineTab(pyblish.api.InstancePlugin):
|
||||
"""Ensure Deadline tab is present and current."""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
label = "Deadline Tab"
|
||||
hosts = ["nuke"]
|
||||
optional = True
|
||||
families = ["write"]
|
||||
actions = [RepairNukeWriteDeadlineTab]
|
||||
|
||||
def process(self, instance):
|
||||
group_node = [x for x in instance if x.Class() == "Group"][0]
|
||||
|
||||
msg = "Deadline tab missing on \"{}\"".format(group_node.name())
|
||||
assert "Deadline" in group_node.knobs(), msg
|
||||
|
|
@ -20,83 +20,114 @@ class CollectClips(api.ContextPlugin):
|
|||
|
||||
projectdata = context.data["projectData"]
|
||||
version = context.data.get("version", "001")
|
||||
instances_data = []
|
||||
for item in context.data.get("selection", []):
|
||||
# Skip audio track items
|
||||
# Try/Except is to handle items types, like EffectTrackItem
|
||||
try:
|
||||
media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
|
||||
if str(item.mediaType()) != media_type:
|
||||
sequence = context.data.get("activeSequence")
|
||||
selection = context.data.get("selection")
|
||||
|
||||
track_effects = dict()
|
||||
|
||||
# collect all trackItems as instances
|
||||
for track_index, video_track in enumerate(sequence.videoTracks()):
|
||||
items = video_track.items()
|
||||
sub_items = video_track.subTrackItems()
|
||||
|
||||
for item in items:
|
||||
# compare with selection or if disabled
|
||||
if item not in selection or not item.isEnabled():
|
||||
continue
|
||||
except:
|
||||
|
||||
# Skip audio track items
|
||||
# Try/Except is to handle items types, like EffectTrackItem
|
||||
try:
|
||||
media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
|
||||
if str(item.mediaType()) != media_type:
|
||||
continue
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
asset = item.name()
|
||||
track = item.parent()
|
||||
source = item.source().mediaSource()
|
||||
source_path = source.firstpath()
|
||||
effects = [f for f in item.linkedItems() if f.isEnabled()]
|
||||
|
||||
# If source is *.nk its a comp effect and we need to fetch the
|
||||
# write node output. This should be improved by parsing the script
|
||||
# rather than opening it.
|
||||
if source_path.endswith(".nk"):
|
||||
nuke.scriptOpen(source_path)
|
||||
# There should noly be one.
|
||||
write_node = nuke.allNodes(filter="Write")[0]
|
||||
path = nuke.filename(write_node)
|
||||
|
||||
if "%" in path:
|
||||
# Get start frame from Nuke script and use the item source
|
||||
# in/out, because you can have multiple shots covered with
|
||||
# one nuke script.
|
||||
start_frame = int(nuke.root()["first_frame"].getValue())
|
||||
if write_node["use_limit"].getValue():
|
||||
start_frame = int(write_node["first"].getValue())
|
||||
|
||||
path = path % (start_frame + item.sourceIn())
|
||||
|
||||
source_path = path
|
||||
self.log.debug(
|
||||
"Fetched source path \"{}\" from \"{}\" in "
|
||||
"\"{}\".".format(
|
||||
source_path, write_node.name(), source.firstpath()
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
head, padding, ext = os.path.basename(source_path).split(".")
|
||||
source_first_frame = int(padding)
|
||||
except Exception:
|
||||
source_first_frame = 0
|
||||
|
||||
data = {"name": "{0}_{1}".format(track.name(), item.name()),
|
||||
"item": item,
|
||||
"source": source,
|
||||
"sourcePath": source_path,
|
||||
"track": track.name(),
|
||||
"trackIndex": track_index,
|
||||
"sourceFirst": source_first_frame,
|
||||
"effects": effects,
|
||||
"sourceIn": int(item.sourceIn()),
|
||||
"sourceOut": int(item.sourceOut()),
|
||||
"clipIn": int(item.timelineIn()),
|
||||
"clipOut": int(item.timelineOut()),
|
||||
"asset": asset,
|
||||
"family": "clip",
|
||||
"families": [],
|
||||
"handles": 0,
|
||||
"handleStart": projectdata.get("handles", 0),
|
||||
"handleEnd": projectdata.get("handles", 0),
|
||||
"version": int(version)}
|
||||
|
||||
instance = context.create_instance(**data)
|
||||
|
||||
self.log.info("Created instance: {}".format(instance))
|
||||
self.log.debug(">> effects: {}".format(instance.data["effects"]))
|
||||
|
||||
context.data["assetsShared"][asset] = dict()
|
||||
|
||||
# from now we are collecting only subtrackitems on
|
||||
# track with no video items
|
||||
if len(items) > 0:
|
||||
continue
|
||||
|
||||
track = item.parent()
|
||||
source = item.source().mediaSource()
|
||||
source_path = source.firstpath()
|
||||
# create list in track key
|
||||
# get all subTrackItems and add it to context
|
||||
track_effects[track_index] = list()
|
||||
|
||||
# If source is *.nk its a comp effect and we need to fetch the
|
||||
# write node output. This should be improved by parsing the script
|
||||
# rather than opening it.
|
||||
if source_path.endswith(".nk"):
|
||||
nuke.scriptOpen(source_path)
|
||||
# There should noly be one.
|
||||
write_node = nuke.allNodes(filter="Write")[0]
|
||||
path = nuke.filename(write_node)
|
||||
# collect all subtrack items
|
||||
for sitem in sub_items:
|
||||
# unwrap from tuple >> it is always tuple with one item
|
||||
sitem = sitem[0]
|
||||
# checking if not enabled
|
||||
if not sitem.isEnabled():
|
||||
continue
|
||||
|
||||
if "%" in path:
|
||||
# Get start frame from Nuke script and use the item source
|
||||
# in/out, because you can have multiple shots covered with
|
||||
# one nuke script.
|
||||
start_frame = int(nuke.root()["first_frame"].getValue())
|
||||
if write_node["use_limit"].getValue():
|
||||
start_frame = int(write_node["first"].getValue())
|
||||
track_effects[track_index].append(sitem)
|
||||
|
||||
path = path % (start_frame + item.sourceIn())
|
||||
|
||||
source_path = path
|
||||
self.log.debug(
|
||||
"Fetched source path \"{}\" from \"{}\" in "
|
||||
"\"{}\".".format(
|
||||
source_path, write_node.name(), source.firstpath()
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
head, padding, ext = os.path.basename(source_path).split(".")
|
||||
source_first_frame = int(padding)
|
||||
except:
|
||||
source_first_frame = 0
|
||||
|
||||
instances_data.append(
|
||||
{
|
||||
"name": "{0}_{1}".format(track.name(), item.name()),
|
||||
"item": item,
|
||||
"source": source,
|
||||
"sourcePath": source_path,
|
||||
"track": track.name(),
|
||||
"sourceFirst": source_first_frame,
|
||||
"sourceIn": int(item.sourceIn()),
|
||||
"sourceOut": int(item.sourceOut()),
|
||||
"clipIn": int(item.timelineIn()),
|
||||
"clipOut": int(item.timelineOut())
|
||||
}
|
||||
)
|
||||
|
||||
for data in instances_data:
|
||||
data.update(
|
||||
{
|
||||
"asset": data["item"].name(),
|
||||
"family": "clip",
|
||||
"families": [],
|
||||
"handles": 0,
|
||||
"handleStart": projectdata.get("handles", 0),
|
||||
"handleEnd": projectdata.get("handles", 0),
|
||||
"version": int(version)
|
||||
}
|
||||
)
|
||||
instance = context.create_instance(**data)
|
||||
self.log.debug(
|
||||
"Created instance with data: {}".format(instance.data)
|
||||
)
|
||||
context.data["assetsShared"][data["asset"]] = dict()
|
||||
context.data["trackEffects"] = track_effects
|
||||
self.log.debug(">> sub_track_items: `{}`".format(track_effects))
|
||||
|
|
|
|||
100
pype/plugins/nukestudio/publish/collect_effects.py
Normal file
|
|
@ -0,0 +1,100 @@
|
|||
import pyblish.api
|
||||
import re
|
||||
|
||||
|
||||
class CollectVideoTracksLuts(pyblish.api.InstancePlugin):
|
||||
"""Collect video tracks effects into context."""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.1015
|
||||
label = "Collect Soft Lut Effects"
|
||||
families = ["clip"]
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
self.log.debug("Finding soft effect for subset: `{}`".format(instance.data.get("subset")))
|
||||
|
||||
# taking active sequence
|
||||
subset = instance.data.get("subset")
|
||||
|
||||
if not subset:
|
||||
return
|
||||
|
||||
track_effects = instance.context.data.get("trackEffects", {})
|
||||
track_index = instance.data["trackIndex"]
|
||||
effects = instance.data["effects"]
|
||||
|
||||
# creating context attribute
|
||||
self.effects = {"assignTo": subset, "effects": dict()}
|
||||
|
||||
for sitem in effects:
|
||||
self.add_effect(instance, track_index, sitem)
|
||||
|
||||
for t_index, sitems in track_effects.items():
|
||||
for sitem in sitems:
|
||||
if not t_index > track_index:
|
||||
continue
|
||||
self.log.debug(">> sitem: `{}`".format(sitem))
|
||||
self.add_effect(instance, t_index, sitem)
|
||||
|
||||
if self.effects["effects"]:
|
||||
instance.data["effectTrackItems"] = self.effects
|
||||
|
||||
if len(instance.data.get("effectTrackItems", {}).keys()) > 0:
|
||||
instance.data["families"] += ["lut"]
|
||||
self.log.debug("effects.keys: {}".format(instance.data.get("effectTrackItems", {}).keys()))
|
||||
self.log.debug("effects: {}".format(instance.data.get("effectTrackItems", {})))
|
||||
|
||||
def add_effect(self, instance, track_index, item):
|
||||
track = item.parentTrack().name()
|
||||
# node serialization
|
||||
node = item.node()
|
||||
node_serialized = self.node_serialisation(instance, node)
|
||||
|
||||
# collect timelineIn/Out
|
||||
effect_t_in = int(item.timelineIn())
|
||||
effect_t_out = int(item.timelineOut())
|
||||
|
||||
node_name = item.name()
|
||||
node_class = re.sub(r"\d+", "", node_name)
|
||||
|
||||
self.effects["effects"].update({node_name: {
|
||||
"class": node_class,
|
||||
"timelineIn": effect_t_in,
|
||||
"timelineOut": effect_t_out,
|
||||
"subTrackIndex": item.subTrackIndex(),
|
||||
"trackIndex": track_index,
|
||||
"track": track,
|
||||
"node": node_serialized
|
||||
}})
|
||||
|
||||
def node_serialisation(self, instance, node):
|
||||
node_serialized = {}
|
||||
timeline_in_h = instance.data["clipInH"]
|
||||
timeline_out_h = instance.data["clipOutH"]
|
||||
|
||||
# adding ignoring knob keys
|
||||
_ignoring_keys = ['invert_mask', 'help', 'mask',
|
||||
'xpos', 'ypos', 'layer', 'process_mask', 'channel',
|
||||
'channels', 'maskChannelMask', 'maskChannelInput',
|
||||
'note_font', 'note_font_size', 'unpremult',
|
||||
'postage_stamp_frame', 'maskChannel', 'export_cc',
|
||||
'select_cccid', 'mix', 'version', 'matrix']
|
||||
|
||||
# loop trough all knobs and collect not ignored
|
||||
# and any with any value
|
||||
for knob in node.knobs().keys():
|
||||
# skip nodes in ignore keys
|
||||
if knob in _ignoring_keys:
|
||||
continue
|
||||
|
||||
# get animation if node is animated
|
||||
if node[knob].isAnimated():
|
||||
# grab animation including handles
|
||||
knob_anim = [node[knob].getValueAt(i)
|
||||
for i in range(timeline_in_h, timeline_out_h + 1)]
|
||||
|
||||
node_serialized[knob] = knob_anim
|
||||
else:
|
||||
node_serialized[knob] = node[knob].value()
|
||||
|
||||
return node_serialized
|
||||
|
|
@ -38,6 +38,10 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
|
|||
tags = instance.data.get("tags", None)
|
||||
clip = instance.data["item"]
|
||||
asset = instance.data.get("asset")
|
||||
sequence = context.data['activeSequence']
|
||||
width = int(sequence.format().width())
|
||||
height = int(sequence.format().height())
|
||||
pixel_aspect = sequence.format().pixelAspect()
|
||||
|
||||
# build data for inner nukestudio project property
|
||||
data = {
|
||||
|
|
@ -157,6 +161,9 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
|
|||
"asset": asset,
|
||||
"hierarchy": hierarchy,
|
||||
"parents": parents,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"pixelAspect": pixel_aspect,
|
||||
"tasks": instance.data["tasks"]
|
||||
})
|
||||
|
||||
|
|
@ -191,7 +198,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
|
||||
def process(self, context):
|
||||
instances = context[:]
|
||||
sequence = context.data['activeSequence']
|
||||
|
||||
# create hierarchyContext attr if context has none
|
||||
|
||||
temp_context = {}
|
||||
|
|
@ -216,6 +223,9 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
instance.data["parents"] = s_asset_data["parents"]
|
||||
instance.data["hierarchy"] = s_asset_data["hierarchy"]
|
||||
instance.data["tasks"] = s_asset_data["tasks"]
|
||||
instance.data["width"] = s_asset_data["width"]
|
||||
instance.data["height"] = s_asset_data["height"]
|
||||
instance.data["pixelAspect"] = s_asset_data["pixelAspect"]
|
||||
|
||||
# adding frame start if any on instance
|
||||
start_frame = s_asset_data.get("startingFrame")
|
||||
|
|
@ -265,16 +275,10 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
|
|||
|
||||
# adding SourceResolution if Tag was present
|
||||
if instance.data.get("main"):
|
||||
width = int(sequence.format().width())
|
||||
height = int(sequence.format().height())
|
||||
pixel_aspect = sequence.format().pixelAspect()
|
||||
self.log.info("Sequence Width,Height,PixelAspect are: `{0},{1},{2}`".format(
|
||||
width, height, pixel_aspect))
|
||||
|
||||
in_info['custom_attributes'].update({
|
||||
"resolutionWidth": width,
|
||||
"resolutionHeight": height,
|
||||
"pixelAspect": pixel_aspect
|
||||
"resolutionWidth": instance.data["width"],
|
||||
"resolutionHeight": instance.data["height"],
|
||||
"pixelAspect": instance.data["pixelAspect"]
|
||||
})
|
||||
|
||||
in_info['tasks'] = instance.data['tasks']
|
||||
|
|
|
|||
|
|
@ -66,11 +66,14 @@ class CollectPlates(api.InstancePlugin):
|
|||
item = instance.data["item"]
|
||||
width = int(item.source().mediaSource().width())
|
||||
height = int(item.source().mediaSource().height())
|
||||
self.log.info("Source Width and Height are: `{0} x {1}`".format(
|
||||
width, height))
|
||||
pixel_aspect = int(item.source().mediaSource().pixelAspect())
|
||||
|
||||
self.log.info("Source Width and Height are: `{0} x {1} : {2}`".format(
|
||||
width, height, pixel_aspect))
|
||||
data.update({
|
||||
"width": width,
|
||||
"height": height
|
||||
"height": height,
|
||||
"pixelAspect": pixel_aspect
|
||||
})
|
||||
|
||||
self.log.debug("Creating instance with name: {}".format(data["name"]))
|
||||
|
|
@ -123,7 +126,7 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart",
|
||||
"frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut",
|
||||
"clipInH", "clipOutH", "asset", "track", "version"
|
||||
"clipInH", "clipOutH", "asset", "track", "version", "width", "height", "pixelAspect"
|
||||
]
|
||||
|
||||
# pass data to version
|
||||
|
|
@ -133,6 +136,7 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
version_data.update({
|
||||
"handles": version_data['handleStart'],
|
||||
"colorspace": item.sourceMediaColourTransform(),
|
||||
"colorspaceScript": instance.context.data["colorspace"],
|
||||
"families": [f for f in families if 'ftrack' not in f],
|
||||
"subset": name,
|
||||
"fps": instance.context.data["fps"]
|
||||
|
|
@ -171,6 +175,8 @@ class CollectPlatesData(api.InstancePlugin):
|
|||
if os.path.exists(mov_path):
|
||||
# adding mov into the representations
|
||||
self.log.debug("__ mov_path: {}".format(mov_path))
|
||||
instance.data["label"] += " - review"
|
||||
|
||||
plates_mov_representation = {
|
||||
'files': mov_file,
|
||||
'stagingDir': staging_dir,
|
||||
|
|
|
|||
|
|
@ -14,12 +14,4 @@ class CollectSelection(pyblish.api.ContextPlugin):
|
|||
|
||||
self.log.debug("selection: {}".format(selection))
|
||||
|
||||
# if not selection:
|
||||
# self.log.debug(
|
||||
# "Nothing is selected. Collecting all items from sequence "
|
||||
# "\"{}\"".format(hiero.ui.activeSequence())
|
||||
# )
|
||||
# for track in hiero.ui.activeSequence().items():
|
||||
# selection.extend(track.items())
|
||||
|
||||
context.data["selection"] = selection
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectShots(api.ContextPlugin):
|
||||
class CollectShots(api.InstancePlugin):
|
||||
"""Collect Shot from Clip."""
|
||||
|
||||
# Run just before CollectClipSubsets
|
||||
|
|
@ -10,63 +10,63 @@ class CollectShots(api.ContextPlugin):
|
|||
hosts = ["nukestudio"]
|
||||
families = ["clip"]
|
||||
|
||||
def process(self, context):
|
||||
for instance in context[:]:
|
||||
# Exclude non-tagged instances.
|
||||
tagged = False
|
||||
for tag in instance.data["tags"]:
|
||||
if tag["name"].lower() == "hierarchy":
|
||||
tagged = True
|
||||
def process(self, instance):
|
||||
self.log.debug(
|
||||
"Skipping \"{}\" because its not tagged with "
|
||||
"\"Hierarchy\"".format(instance))
|
||||
# Exclude non-tagged instances.
|
||||
tagged = False
|
||||
for tag in instance.data["tags"]:
|
||||
if tag["name"].lower() == "hierarchy":
|
||||
tagged = True
|
||||
|
||||
if not tagged:
|
||||
self.log.debug(
|
||||
"Skipping \"{}\" because its not tagged with "
|
||||
"\"Hierarchy\"".format(instance)
|
||||
)
|
||||
continue
|
||||
|
||||
# Collect data.
|
||||
data = {}
|
||||
for key, value in instance.data.iteritems():
|
||||
data[key] = value
|
||||
|
||||
# Collect comments.
|
||||
data["comments"] = []
|
||||
|
||||
# Exclude non-tagged instances.
|
||||
for tag in instance.data["tags"]:
|
||||
if tag["name"].lower() == "comment":
|
||||
data["comments"].append(
|
||||
tag.metadata().dict()["tag.note"]
|
||||
)
|
||||
|
||||
# Find tags on the source clip.
|
||||
tags = instance.data["item"].source().tags()
|
||||
for tag in tags:
|
||||
if tag.name().lower() == "comment":
|
||||
data["comments"].append(
|
||||
tag.metadata().dict()["tag.note"]
|
||||
)
|
||||
|
||||
data["family"] = "shot"
|
||||
data["families"] = []
|
||||
|
||||
data["subset"] = data["family"] + "Main"
|
||||
|
||||
data["name"] = data["subset"] + "_" + data["asset"]
|
||||
|
||||
data["label"] = (
|
||||
"{} - {} - tasks:{} - assetbuilds:{} - comments:{}".format(
|
||||
data["asset"],
|
||||
data["subset"],
|
||||
data["tasks"],
|
||||
[x["name"] for x in data.get("assetbuilds", [])],
|
||||
len(data["comments"])
|
||||
)
|
||||
if not tagged:
|
||||
self.log.debug(
|
||||
"Skipping \"{}\" because its not tagged with "
|
||||
"\"Hierarchy\"".format(instance)
|
||||
)
|
||||
return
|
||||
|
||||
# Create instance.
|
||||
self.log.debug("Creating instance with: {}".format(data["name"]))
|
||||
instance.context.create_instance(**data)
|
||||
# Collect data.
|
||||
data = {}
|
||||
for key, value in instance.data.iteritems():
|
||||
data[key] = value
|
||||
|
||||
self.log.debug("_ context: {}".format(context[:]))
|
||||
# Collect comments.
|
||||
data["comments"] = []
|
||||
|
||||
# Exclude non-tagged instances.
|
||||
for tag in instance.data["tags"]:
|
||||
if tag["name"].lower() == "comment":
|
||||
data["comments"].append(
|
||||
tag.metadata().dict()["tag.note"]
|
||||
)
|
||||
|
||||
# Find tags on the source clip.
|
||||
tags = instance.data["item"].source().tags()
|
||||
for tag in tags:
|
||||
if tag.name().lower() == "comment":
|
||||
data["comments"].append(
|
||||
tag.metadata().dict()["tag.note"]
|
||||
)
|
||||
|
||||
data["family"] = "shot"
|
||||
data["families"] = []
|
||||
|
||||
data["subset"] = data["family"] + "Main"
|
||||
|
||||
data["name"] = data["subset"] + "_" + data["asset"]
|
||||
|
||||
data["label"] = (
|
||||
"{} - {} - tasks:{} - assetbuilds:{} - comments:{}".format(
|
||||
data["asset"],
|
||||
data["subset"],
|
||||
data["tasks"],
|
||||
[x["name"] for x in data.get("assetbuilds", [])],
|
||||
len(data["comments"])
|
||||
)
|
||||
)
|
||||
|
||||
# Create instance.
|
||||
self.log.debug("Creating instance with: {}".format(data["name"]))
|
||||
instance.context.create_instance(**data)
|
||||
|
|
|
|||
|
|
@ -19,13 +19,14 @@ class CollectClipTagFrameStart(api.InstancePlugin):
|
|||
|
||||
# gets only task family tags and collect labels
|
||||
if "frameStart" in t_family:
|
||||
t_value = t_metadata.get("tag.value", "")
|
||||
t_value = t_metadata.get("tag.value", None)
|
||||
|
||||
# backward compatibility
|
||||
t_number = t_metadata.get("tag.number", "")
|
||||
t_number = t_metadata.get("tag.number", None)
|
||||
start_frame = t_number or t_value
|
||||
|
||||
try:
|
||||
start_frame = int(t_number) or int(t_value)
|
||||
start_frame = int(start_frame)
|
||||
except ValueError:
|
||||
if "source" in t_value:
|
||||
source_first = instance.data["sourceFirst"]
|
||||
|
|
|
|||
231
pype/plugins/nukestudio/publish/extract_effects.py
Normal file
|
|
@ -0,0 +1,231 @@
|
|||
# from pype import plugins
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
import pyblish.api
|
||||
import tempfile
|
||||
from avalon import io, api
|
||||
|
||||
class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
|
||||
"""Collect video tracks effects into context."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Export Soft Lut Effects"
|
||||
families = ["lut"]
|
||||
|
||||
def process(self, instance):
|
||||
item = instance.data["item"]
|
||||
effects = instance.data.get("effectTrackItems")
|
||||
|
||||
instance.data["families"] = [f for f in instance.data.get("families", []) if f not in ["lut"]]
|
||||
|
||||
self.log.debug("___ instance.data[families]: `{}`".format(instance.data["families"]))
|
||||
|
||||
# skip any without effects
|
||||
if not effects:
|
||||
return
|
||||
|
||||
subset = instance.data.get("subset")
|
||||
subset_split = re.findall(r'[A-Z][^A-Z]*', subset)
|
||||
|
||||
if len(subset_split) > 0:
|
||||
root_name = subset.replace(subset_split[0], "")
|
||||
subset_split.insert(0, root_name.capitalize())
|
||||
|
||||
subset_split.insert(0, "lut")
|
||||
|
||||
self.log.debug("creating staging dir")
|
||||
# staging_dir = self.staging_dir(instance)
|
||||
|
||||
# TODO: only provisory will be replace by function
|
||||
staging_dir = instance.data.get('stagingDir', None)
|
||||
|
||||
if not staging_dir:
|
||||
staging_dir = os.path.normpath(
|
||||
tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
)
|
||||
instance.data['stagingDir'] = staging_dir
|
||||
|
||||
self.log.debug("creating staging dir: `{}`".format(staging_dir))
|
||||
|
||||
transfers = list()
|
||||
if "transfers" not in instance.data:
|
||||
instance.data["transfers"] = list()
|
||||
|
||||
name = "".join(subset_split)
|
||||
ext = "json"
|
||||
file = name + "." + ext
|
||||
|
||||
# create new instance and inherit data
|
||||
data = {}
|
||||
for key, value in instance.data.iteritems():
|
||||
data[key] = value
|
||||
|
||||
# change names
|
||||
data["subset"] = name
|
||||
data["family"] = "lut"
|
||||
data["families"] = []
|
||||
data["name"] = data["subset"] + "_" + data["asset"]
|
||||
data["label"] = "{} - {} - ({})".format(
|
||||
data['asset'], data["subset"], os.path.splitext(file)[1]
|
||||
)
|
||||
data["source"] = data["sourcePath"]
|
||||
|
||||
# create new instance
|
||||
instance = instance.context.create_instance(**data)
|
||||
|
||||
dst_dir = self.resource_destination_dir(instance)
|
||||
|
||||
# change paths in effects to files
|
||||
for k, effect in effects["effects"].items():
|
||||
trn = self.copy_linked_files(effect, dst_dir)
|
||||
if trn:
|
||||
transfers.append((trn[0], trn[1]))
|
||||
|
||||
instance.data["transfers"].extend(transfers)
|
||||
self.log.debug("_ transfers: `{}`".format(
|
||||
instance.data["transfers"]))
|
||||
|
||||
# create representations
|
||||
instance.data["representations"] = list()
|
||||
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd", "sourceIn", "sourceOut",
|
||||
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
|
||||
"clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track",
|
||||
"version"
|
||||
]
|
||||
|
||||
# pass data to version
|
||||
version_data = dict()
|
||||
version_data.update({k: instance.data[k] for k in transfer_data})
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"handles": version_data['handleStart'],
|
||||
"colorspace": item.sourceMediaColourTransform(),
|
||||
"colorspaceScript": instance.context.data["colorspace"],
|
||||
"families": ["plate", "lut"],
|
||||
"subset": name,
|
||||
"fps": instance.context.data["fps"]
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
||||
representation = {
|
||||
'files': file,
|
||||
'stagingDir': staging_dir,
|
||||
'name': "lut" + ext.title(),
|
||||
'ext': ext
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.debug("_ representations: `{}`".format(
|
||||
instance.data["representations"]))
|
||||
|
||||
self.log.debug("_ version_data: `{}`".format(
|
||||
instance.data["versionData"]))
|
||||
|
||||
with open(os.path.join(staging_dir, file), "w") as outfile:
|
||||
outfile.write(json.dumps(effects, indent=4, sort_keys=True))
|
||||
|
||||
return
|
||||
|
||||
def copy_linked_files(self, effect, dst_dir):
|
||||
for k, v in effect["node"].items():
|
||||
if k in "file" and v is not '':
|
||||
base_name = os.path.basename(v)
|
||||
dst = os.path.join(dst_dir, base_name).replace("\\", "/")
|
||||
|
||||
# add it to the json
|
||||
effect["node"][k] = dst
|
||||
return (v, dst)
|
||||
|
||||
def resource_destination_dir(self, instance):
|
||||
anatomy = instance.context.data['anatomy']
|
||||
self.create_destination_template(instance, anatomy)
|
||||
|
||||
return os.path.join(
|
||||
instance.data["assumedDestination"],
|
||||
"resources"
|
||||
)
|
||||
|
||||
def create_destination_template(self, instance, anatomy):
|
||||
"""Create a filepath based on the current data available
|
||||
|
||||
Example template:
|
||||
{root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
|
||||
{subset}.{representation}
|
||||
Args:
|
||||
instance: the instance to publish
|
||||
|
||||
Returns:
|
||||
file path (str)
|
||||
"""
|
||||
|
||||
# get all the stuff from the database
|
||||
subset_name = instance.data["subset"]
|
||||
self.log.info(subset_name)
|
||||
asset_name = instance.data["asset"]
|
||||
project_name = api.Session["AVALON_PROJECT"]
|
||||
a_template = anatomy.templates
|
||||
|
||||
project = io.find_one({"type": "project",
|
||||
"name": project_name},
|
||||
projection={"config": True, "data": True})
|
||||
|
||||
template = a_template['publish']['path']
|
||||
# anatomy = instance.context.data['anatomy']
|
||||
|
||||
asset = io.find_one({"type": "asset",
|
||||
"name": asset_name,
|
||||
"parent": project["_id"]})
|
||||
|
||||
assert asset, ("No asset found by the name '{}' "
|
||||
"in project '{}'".format(asset_name, project_name))
|
||||
silo = asset['silo']
|
||||
|
||||
subset = io.find_one({"type": "subset",
|
||||
"name": subset_name,
|
||||
"parent": asset["_id"]})
|
||||
|
||||
# assume there is no version yet, we start at `1`
|
||||
version = None
|
||||
version_number = 1
|
||||
if subset is not None:
|
||||
version = io.find_one({"type": "version",
|
||||
"parent": subset["_id"]},
|
||||
sort=[("name", -1)])
|
||||
|
||||
# if there is a subset there ought to be version
|
||||
if version is not None:
|
||||
version_number += version["name"]
|
||||
|
||||
if instance.data.get('version'):
|
||||
version_number = int(instance.data.get('version'))
|
||||
|
||||
padding = int(a_template['render']['padding'])
|
||||
|
||||
hierarchy = asset['data']['parents']
|
||||
if hierarchy:
|
||||
# hierarchy = os.path.sep.join(hierarchy)
|
||||
hierarchy = "/".join(hierarchy)
|
||||
|
||||
template_data = {"root": api.Session["AVALON_PROJECTS"],
|
||||
"project": {"name": project_name,
|
||||
"code": project['data']['code']},
|
||||
"silo": silo,
|
||||
"family": instance.data['family'],
|
||||
"asset": asset_name,
|
||||
"subset": subset_name,
|
||||
"frame": ('#' * padding),
|
||||
"version": version_number,
|
||||
"hierarchy": hierarchy,
|
||||
"representation": "TEMP"}
|
||||
|
||||
instance.data["assumedTemplateData"] = template_data
|
||||
self.log.info(template_data)
|
||||
instance.data["template"] = template
|
||||
# We take the parent folder of representation 'filepath'
|
||||
instance.data["assumedDestination"] = os.path.dirname(
|
||||
anatomy.format(template_data)["publish"]["path"]
|
||||
)
|
||||
40
pype/tests/test_avalon_plugin_presets.py
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
import avalon.api as api
|
||||
import pype
|
||||
|
||||
|
||||
class MyTestCreator(api.Creator):
|
||||
|
||||
my_test_property = "A"
|
||||
|
||||
def __init__(self, name, asset, options=None, data=None):
|
||||
super(MyTestCreator, self).__init__(self, name, asset,
|
||||
options=None, data=None)
|
||||
|
||||
|
||||
# this is hack like no other - we need to inject our own avalon host
|
||||
# and bypass all its validation. Avalon hosts are modules that needs
|
||||
# `ls` callable as attribute. Voila:
|
||||
class Test:
|
||||
__name__ = "test"
|
||||
ls = len
|
||||
|
||||
def __call__(self):
|
||||
pass
|
||||
|
||||
|
||||
def test_avalon_plugin_presets(monkeypatch, printer):
|
||||
|
||||
pype.install()
|
||||
api.register_host(Test())
|
||||
api.register_plugin(api.Creator, MyTestCreator)
|
||||
plugins = api.discover(api.Creator)
|
||||
printer("Test if we got our test plugin")
|
||||
assert MyTestCreator in plugins
|
||||
for p in plugins:
|
||||
if p.__name__ == "MyTestCreator":
|
||||
printer("Test if we have overriden existing property")
|
||||
assert p.my_test_property == "B"
|
||||
printer("Test if we have overriden superclass property")
|
||||
assert p.active is False
|
||||
printer("Test if we have added new property")
|
||||
assert p.new_property == "new"
|
||||
|
|
@ -18,7 +18,7 @@ def test_pyblish_plugin_filter_modifier(printer, monkeypatch):
|
|||
assert len(plugins) == 0
|
||||
paths = pyblish.api.registered_paths()
|
||||
printer("Test if we have no registered plugin paths")
|
||||
print(paths)
|
||||
assert len(paths) == 0
|
||||
|
||||
class MyTestPlugin(pyblish.api.InstancePlugin):
|
||||
my_test_property = 1
|
||||
|
|
|
|||
|
|
@ -124,6 +124,26 @@ class Popup2(Popup):
|
|||
fix = self.widgets["show"]
|
||||
fix.setText("Fix")
|
||||
|
||||
def calculate_window_geometry(self):
|
||||
"""Respond to status changes
|
||||
|
||||
On creation, align window with screen bottom right.
|
||||
|
||||
"""
|
||||
parent_widget = self.parent()
|
||||
|
||||
app = QtWidgets.QApplication.instance()
|
||||
if parent_widget:
|
||||
screen = app.desktop().screenNumber(parent_widget)
|
||||
else:
|
||||
screen = app.desktop().screenNumber(app.desktop().cursor().pos())
|
||||
center_point = app.desktop().screenGeometry(screen).center()
|
||||
|
||||
frame_geo = self.frameGeometry()
|
||||
frame_geo.moveCenter(center_point)
|
||||
|
||||
return frame_geo
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def application():
|
||||
|
|
|
|||
|
Before Width: | Height: | Size: 199 KiB After Width: | Height: | Size: 5.9 KiB |
|
Before Width: | Height: | Size: 215 KiB After Width: | Height: | Size: 6.1 KiB |
|
Before Width: | Height: | Size: 96 KiB After Width: | Height: | Size: 10 KiB |
|
Before Width: | Height: | Size: 194 KiB After Width: | Height: | Size: 5.7 KiB |
|
Before Width: | Height: | Size: 70 KiB After Width: | Height: | Size: 7.8 KiB |
|
Before Width: | Height: | Size: 70 KiB After Width: | Height: | Size: 8.2 KiB |
|
Before Width: | Height: | Size: 190 KiB After Width: | Height: | Size: 8 KiB |
|
Before Width: | Height: | Size: 73 KiB After Width: | Height: | Size: 5.6 KiB |
|
Before Width: | Height: | Size: 11 KiB After Width: | Height: | Size: 5.1 KiB |
|
Before Width: | Height: | Size: 103 KiB After Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 124 KiB After Width: | Height: | Size: 13 KiB |
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 4.5 KiB |
|
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 7.5 KiB |
|
Before Width: | Height: | Size: 7.6 KiB After Width: | Height: | Size: 3.8 KiB |
|
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 8.1 KiB |
|
Before Width: | Height: | Size: 8.7 KiB After Width: | Height: | Size: 5.5 KiB |
|
Before Width: | Height: | Size: 19 KiB After Width: | Height: | Size: 4.7 KiB |
|
Before Width: | Height: | Size: 87 KiB After Width: | Height: | Size: 7.9 KiB |
|
Before Width: | Height: | Size: 74 KiB After Width: | Height: | Size: 7.8 KiB |
|
Before Width: | Height: | Size: 62 KiB After Width: | Height: | Size: 7.4 KiB |