mirror of
https://github.com/ynput/ayon-core.git
synced 2025-12-24 21:04:40 +01:00
Merge branch 'develop' into feature/PYPE-95-nks-load-subset-to-timeline
This commit is contained in:
commit
aea8858d70
42 changed files with 2533 additions and 1210 deletions
|
|
@ -11,6 +11,7 @@ log = logging.getLogger(__name__)
|
|||
|
||||
__version__ = "2.5.0"
|
||||
|
||||
PROJECT_PLUGINS_PATH = os.environ.get("PYPE_PROJECT_PLUGINS")
|
||||
PACKAGE_DIR = os.path.dirname(__file__)
|
||||
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
|
||||
|
||||
|
|
@ -72,6 +73,18 @@ def install():
|
|||
pyblish.register_discovery_filter(filter_pyblish_plugins)
|
||||
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
|
||||
|
||||
# Register project specific plugins
|
||||
project_name = os.environ.get("AVALON_PROJECT")
|
||||
if PROJECT_PLUGINS_PATH and project_name:
|
||||
for path in PROJECT_PLUGINS_PATH.split(os.pathsep):
|
||||
if not path:
|
||||
continue
|
||||
plugin_path = os.path.join(path, project_name, "plugins")
|
||||
if os.path.exists(plugin_path):
|
||||
pyblish.register_plugin_path(plugin_path)
|
||||
avalon.register_plugin_path(avalon.Loader, plugin_path)
|
||||
avalon.register_plugin_path(avalon.Creator, plugin_path)
|
||||
|
||||
# apply monkey patched discover to original one
|
||||
avalon.discover = patched_discover
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class CreateProjectFolders(BaseAction):
|
|||
#: Action description.
|
||||
description = 'Creates folder structure'
|
||||
#: roles that are allowed to register this action
|
||||
role_list = ['Pypeclub', 'Administrator']
|
||||
role_list = ['Pypeclub', 'Administrator', 'Project Manager']
|
||||
icon = '{}/ftrack/action_icons/CreateProjectFolders.svg'.format(
|
||||
os.environ.get('PYPE_STATICS_SERVER', '')
|
||||
)
|
||||
|
|
|
|||
|
|
@ -2067,9 +2067,10 @@ class SyncEntitiesFactory:
|
|||
# different hierarchy - can't recreate entity
|
||||
continue
|
||||
|
||||
_vis_parent = str(deleted_entity["data"]["visualParent"])
|
||||
_vis_parent = deleted_entity["data"]["visualParent"]
|
||||
if _vis_parent is None:
|
||||
_vis_parent = self.avalon_project_id
|
||||
_vis_parent = str(_vis_parent)
|
||||
ftrack_parent_id = self.avalon_ftrack_mapper[_vis_parent]
|
||||
self.create_ftrack_ent_from_avalon_ent(
|
||||
deleted_entity, ftrack_parent_id
|
||||
|
|
|
|||
|
|
@ -193,6 +193,8 @@ class AppAction(BaseHandler):
|
|||
if parents:
|
||||
hierarchy = os.path.join(*parents)
|
||||
|
||||
os.environ["AVALON_HIERARCHY"] = hierarchy
|
||||
|
||||
application = avalonlib.get_application(os.environ["AVALON_APP_NAME"])
|
||||
|
||||
data = {
|
||||
|
|
|
|||
56
pype/lib.py
56
pype/lib.py
|
|
@ -13,6 +13,62 @@ import avalon
|
|||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_paths_from_environ(env_key, return_first=False):
|
||||
"""Return existing paths from specific envirnment variable.
|
||||
|
||||
:param env_key: Environment key where should look for paths.
|
||||
:type env_key: str
|
||||
:param return_first: Return first path on `True`, list of all on `False`.
|
||||
:type return_first: boolean
|
||||
|
||||
Difference when none of paths exists:
|
||||
- when `return_first` is set to `False` then function returns empty list.
|
||||
- when `return_first` is set to `True` then function returns `None`.
|
||||
"""
|
||||
|
||||
existing_paths = []
|
||||
paths = os.environ.get(env_key) or ""
|
||||
path_items = paths.split(os.pathsep)
|
||||
for path in path_items:
|
||||
# Skip empty string
|
||||
if not path:
|
||||
continue
|
||||
# Normalize path
|
||||
path = os.path.normpath(path)
|
||||
# Check if path exists
|
||||
if os.path.exists(path):
|
||||
# Return path if `return_first` is set to True
|
||||
if return_first:
|
||||
return path
|
||||
# Store path
|
||||
existing_paths.append(path)
|
||||
|
||||
# Return None if none of paths exists
|
||||
if return_first:
|
||||
return None
|
||||
# Return all existing paths from environment variable
|
||||
return existing_paths
|
||||
|
||||
|
||||
def get_ffmpeg_tool_path(tool="ffmpeg"):
|
||||
"""Find path to ffmpeg tool in FFMPEG_PATH paths.
|
||||
|
||||
Function looks for tool in paths set in FFMPEG_PATH environment. If tool
|
||||
exists then returns it's full path.
|
||||
|
||||
Returns tool name itself when tool path was not found. (FFmpeg path may be
|
||||
set in PATH environment variable)
|
||||
"""
|
||||
|
||||
dir_paths = get_paths_from_environ("FFMPEG_PATH")
|
||||
for dir_path in dir_paths:
|
||||
for file_name in os.listdir(dir_path):
|
||||
base, ext = os.path.splitext(file_name)
|
||||
if base.lower() == tool.lower():
|
||||
return os.path.join(dir_path, tool)
|
||||
return tool
|
||||
|
||||
|
||||
# Special naming case for subprocess since its a built-in method.
|
||||
def _subprocess(*args, **kwargs):
|
||||
"""Convenience method for getting output errors for subprocess."""
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ from avalon import api as avalon, pipeline, maya
|
|||
from avalon.maya.pipeline import IS_HEADLESS
|
||||
from avalon.tools import workfiles
|
||||
from pyblish import api as pyblish
|
||||
from pypeapp import config
|
||||
|
||||
from ..lib import (
|
||||
any_outdated
|
||||
|
|
@ -156,6 +155,12 @@ def on_open(_):
|
|||
from avalon.vendor.Qt import QtWidgets
|
||||
from ..widgets import popup
|
||||
|
||||
cmds.evalDeferred(
|
||||
"from pype.maya import lib;lib.remove_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.maya import lib;lib.add_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.maya import lib;lib.add_render_layer_change_observer()")
|
||||
# # Update current task for the current scene
|
||||
# update_task_from_path(cmds.file(query=True, sceneName=True))
|
||||
|
||||
|
|
@ -194,6 +199,12 @@ def on_new(_):
|
|||
"""Set project resolution and fps when create a new file"""
|
||||
avalon.logger.info("Running callback on new..")
|
||||
with maya.suspended_refresh():
|
||||
cmds.evalDeferred(
|
||||
"from pype.maya import lib;lib.remove_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.maya import lib;lib.add_render_layer_observer()")
|
||||
cmds.evalDeferred(
|
||||
"from pype.maya import lib;lib.add_render_layer_change_observer()")
|
||||
lib.set_context_settings()
|
||||
|
||||
|
||||
|
|
@ -218,3 +229,10 @@ def on_task_changed(*args):
|
|||
|
||||
# Run
|
||||
maya.pipeline._on_task_changed()
|
||||
with maya.suspended_refresh():
|
||||
lib.set_context_settings()
|
||||
lib.update_content_on_context_change()
|
||||
|
||||
lib.show_message("Context was changed",
|
||||
("Context was changed to {}".format(
|
||||
avalon.Session["AVALON_ASSET"])))
|
||||
|
|
|
|||
193
pype/maya/lib.py
193
pype/maya/lib.py
|
|
@ -2399,15 +2399,19 @@ class shelf():
|
|||
if not item.get('command'):
|
||||
item['command'] = self._null
|
||||
if item['type'] == 'button':
|
||||
self.addButon(item['name'], command=item['command'])
|
||||
self.addButon(item['name'],
|
||||
command=item['command'],
|
||||
icon=item['icon'])
|
||||
if item['type'] == 'menuItem':
|
||||
self.addMenuItem(item['parent'],
|
||||
item['name'],
|
||||
command=item['command'])
|
||||
command=item['command'],
|
||||
icon=item['icon'])
|
||||
if item['type'] == 'subMenu':
|
||||
self.addMenuItem(item['parent'],
|
||||
item['name'],
|
||||
command=item['command'])
|
||||
command=item['command'],
|
||||
icon=item['icon'])
|
||||
|
||||
def addButon(self, label, icon="commandButton.png",
|
||||
command=_null, doubleCommand=_null):
|
||||
|
|
@ -2417,7 +2421,8 @@ class shelf():
|
|||
'''
|
||||
cmds.setParent(self.name)
|
||||
if icon:
|
||||
icon = self.iconPath + icon
|
||||
icon = os.path.join(self.iconPath, icon)
|
||||
print(icon)
|
||||
cmds.shelfButton(width=37, height=37, image=icon, label=label,
|
||||
command=command, dcc=doubleCommand,
|
||||
imageOverlayLabel=label, olb=self.labelBackground,
|
||||
|
|
@ -2429,7 +2434,8 @@ class shelf():
|
|||
double click command and image.
|
||||
'''
|
||||
if icon:
|
||||
icon = self.iconPath + icon
|
||||
icon = os.path.join(self.iconPath, icon)
|
||||
print(icon)
|
||||
return cmds.menuItem(p=parent, label=label, c=command, i="")
|
||||
|
||||
def addSubMenu(self, parent, label, icon=None):
|
||||
|
|
@ -2438,7 +2444,8 @@ class shelf():
|
|||
the specified parent popup menu.
|
||||
'''
|
||||
if icon:
|
||||
icon = self.iconPath + icon
|
||||
icon = os.path.join(self.iconPath, icon)
|
||||
print(icon)
|
||||
return cmds.menuItem(p=parent, label=label, i=icon, subMenu=1)
|
||||
|
||||
def _cleanOldShelf(self):
|
||||
|
|
@ -2452,3 +2459,177 @@ class shelf():
|
|||
cmds.deleteUI(each)
|
||||
else:
|
||||
cmds.shelfLayout(self.name, p="ShelfLayout")
|
||||
|
||||
|
||||
def _get_render_instance():
|
||||
objectset = cmds.ls("*.id", long=True, type="objectSet",
|
||||
recursive=True, objectsOnly=True)
|
||||
|
||||
for objset in objectset:
|
||||
|
||||
if not cmds.attributeQuery("id", node=objset, exists=True):
|
||||
continue
|
||||
|
||||
id_attr = "{}.id".format(objset)
|
||||
if cmds.getAttr(id_attr) != "pyblish.avalon.instance":
|
||||
continue
|
||||
|
||||
has_family = cmds.attributeQuery("family",
|
||||
node=objset,
|
||||
exists=True)
|
||||
if not has_family:
|
||||
continue
|
||||
|
||||
if cmds.getAttr("{}.family".format(objset)) == 'rendering':
|
||||
return objset
|
||||
|
||||
return None
|
||||
|
||||
|
||||
renderItemObserverList = []
|
||||
|
||||
|
||||
class RenderSetupListObserver:
|
||||
|
||||
def listItemAdded(self, item):
|
||||
print("--- adding ...")
|
||||
self._add_render_layer(item)
|
||||
|
||||
def listItemRemoved(self, item):
|
||||
print("--- removing ...")
|
||||
self._remove_render_layer(item.name())
|
||||
|
||||
def _add_render_layer(self, item):
|
||||
render_set = _get_render_instance()
|
||||
layer_name = item.name()
|
||||
|
||||
if not render_set:
|
||||
return
|
||||
|
||||
members = cmds.sets(render_set, query=True) or []
|
||||
if not "LAYER_{}".format(layer_name) in members:
|
||||
print(" - creating set for {}".format(layer_name))
|
||||
set = cmds.sets(n="LAYER_{}".format(layer_name), empty=True)
|
||||
cmds.sets(set, forceElement=render_set)
|
||||
rio = RenderSetupItemObserver(item)
|
||||
print("- adding observer for {}".format(item.name()))
|
||||
item.addItemObserver(rio.itemChanged)
|
||||
renderItemObserverList.append(rio)
|
||||
|
||||
def _remove_render_layer(self, layer_name):
|
||||
render_set = _get_render_instance()
|
||||
|
||||
if not render_set:
|
||||
return
|
||||
|
||||
members = cmds.sets(render_set, query=True)
|
||||
if "LAYER_{}".format(layer_name) in members:
|
||||
print(" - removing set for {}".format(layer_name))
|
||||
cmds.delete("LAYER_{}".format(layer_name))
|
||||
|
||||
|
||||
class RenderSetupItemObserver():
|
||||
|
||||
def __init__(self, item):
|
||||
self.item = item
|
||||
self.original_name = item.name()
|
||||
|
||||
def itemChanged(self, *args, **kwargs):
|
||||
if self.item.name() == self.original_name:
|
||||
return
|
||||
|
||||
render_set = _get_render_instance()
|
||||
|
||||
if not render_set:
|
||||
return
|
||||
|
||||
members = cmds.sets(render_set, query=True)
|
||||
if "LAYER_{}".format(self.original_name) in members:
|
||||
print(" <> renaming {} to {}".format(self.original_name,
|
||||
self.item.name()))
|
||||
cmds.rename("LAYER_{}".format(self.original_name),
|
||||
"LAYER_{}".format(self.item.name()))
|
||||
self.original_name = self.item.name()
|
||||
|
||||
|
||||
renderListObserver = RenderSetupListObserver()
|
||||
|
||||
|
||||
def add_render_layer_change_observer():
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
||||
rs = renderSetup.instance()
|
||||
render_set = _get_render_instance()
|
||||
if not render_set:
|
||||
return
|
||||
|
||||
members = cmds.sets(render_set, query=True)
|
||||
layers = rs.getRenderLayers()
|
||||
for layer in layers:
|
||||
if "LAYER_{}".format(layer.name()) in members:
|
||||
rio = RenderSetupItemObserver(layer)
|
||||
print("- adding observer for {}".format(layer.name()))
|
||||
layer.addItemObserver(rio.itemChanged)
|
||||
renderItemObserverList.append(rio)
|
||||
|
||||
|
||||
def add_render_layer_observer():
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
||||
print("> adding renderSetup observer ...")
|
||||
rs = renderSetup.instance()
|
||||
rs.addListObserver(renderListObserver)
|
||||
pass
|
||||
|
||||
|
||||
def remove_render_layer_observer():
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
||||
print("< removing renderSetup observer ...")
|
||||
rs = renderSetup.instance()
|
||||
try:
|
||||
rs.removeListObserver(renderListObserver)
|
||||
except ValueError:
|
||||
# no observer set yet
|
||||
pass
|
||||
|
||||
|
||||
def update_content_on_context_change():
|
||||
"""
|
||||
This will update scene content to match new asset on context change
|
||||
"""
|
||||
scene_sets = cmds.listSets(allSets=True)
|
||||
new_asset = api.Session["AVALON_ASSET"]
|
||||
new_data = lib.get_asset()["data"]
|
||||
for s in scene_sets:
|
||||
try:
|
||||
if cmds.getAttr("{}.id".format(s)) == "pyblish.avalon.instance":
|
||||
attr = cmds.listAttr(s)
|
||||
print(s)
|
||||
if "asset" in attr:
|
||||
print(" - setting asset to: [ {} ]".format(new_asset))
|
||||
cmds.setAttr("{}.asset".format(s),
|
||||
new_asset, type="string")
|
||||
if "frameStart" in attr:
|
||||
cmds.setAttr("{}.frameStart".format(s),
|
||||
new_data["frameStart"])
|
||||
if "frameEnd" in attr:
|
||||
cmds.setAttr("{}.frameEnd".format(s),
|
||||
new_data["frameEnd"],)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
def show_message(title, msg):
|
||||
from avalon.vendor.Qt import QtWidgets
|
||||
from ..widgets import message_window
|
||||
|
||||
# Find maya main window
|
||||
top_level_widgets = {w.objectName(): w for w in
|
||||
QtWidgets.QApplication.topLevelWidgets()}
|
||||
|
||||
parent = top_level_widgets.get("MayaWindow", None)
|
||||
if parent is None:
|
||||
pass
|
||||
else:
|
||||
message_window.message(title=title, message=msg, parent=parent)
|
||||
|
|
|
|||
|
|
@ -5,13 +5,6 @@ from pypeapp import Logger
|
|||
from avalon.api import Session
|
||||
from hiero.ui import findMenuAction
|
||||
|
||||
# this way we secure compatibility between nuke 10 and 11
|
||||
try:
|
||||
from PySide.QtGui import *
|
||||
except Exception:
|
||||
from PySide2.QtGui import *
|
||||
from PySide2.QtWidgets import *
|
||||
|
||||
from .tags import add_tags_from_presets
|
||||
|
||||
from .lib import (
|
||||
|
|
@ -50,14 +43,8 @@ def install():
|
|||
"""
|
||||
|
||||
# here is the best place to add menu
|
||||
from avalon.tools import (
|
||||
creator,
|
||||
publish,
|
||||
cbloader,
|
||||
cbsceneinventory,
|
||||
contextmanager,
|
||||
libraryloader
|
||||
)
|
||||
from avalon.tools import publish, cbloader
|
||||
from avalon.vendor.Qt import QtGui
|
||||
|
||||
menu_name = os.environ['AVALON_LABEL']
|
||||
|
||||
|
|
@ -67,94 +54,57 @@ def install():
|
|||
|
||||
self._change_context_menu = context_label
|
||||
|
||||
# Grab Hiero's MenuBar
|
||||
M = hiero.ui.menuBar()
|
||||
|
||||
try:
|
||||
check_made_menu = findMenuAction(menu_name)
|
||||
except Exception:
|
||||
pass
|
||||
check_made_menu = None
|
||||
|
||||
if not check_made_menu:
|
||||
menu = M.addMenu(menu_name)
|
||||
# Grab Hiero's MenuBar
|
||||
menu = hiero.ui.menuBar().addMenu(menu_name)
|
||||
else:
|
||||
menu = check_made_menu.menu()
|
||||
|
||||
actions = [
|
||||
{
|
||||
'parent': context_label,
|
||||
'action': QAction('Set Context', None),
|
||||
'function': contextmanager.show,
|
||||
'icon': QIcon('icons:Position.png')
|
||||
},
|
||||
"separator",
|
||||
{
|
||||
'action': QAction("Work Files...", None),
|
||||
'function': set_workfiles,
|
||||
'icon': QIcon('icons:Position.png')
|
||||
},
|
||||
{
|
||||
'action': QAction('Create Default Tags..', None),
|
||||
'function': add_tags_from_presets,
|
||||
'icon': QIcon('icons:Position.png')
|
||||
},
|
||||
"separator",
|
||||
# {
|
||||
# 'action': QAction('Create...', None),
|
||||
# 'function': creator.show,
|
||||
# 'icon': QIcon('icons:ColorAdd.png')
|
||||
# },
|
||||
{
|
||||
'action': QAction('Load...', None),
|
||||
'function': cbloader.show,
|
||||
'icon': QIcon('icons:CopyRectangle.png')
|
||||
},
|
||||
{
|
||||
'action': QAction('Publish...', None),
|
||||
'function': publish.show,
|
||||
'icon': QIcon('icons:Output.png')
|
||||
},
|
||||
# {
|
||||
# 'action': QAction('Manage...', None),
|
||||
# 'function': cbsceneinventory.show,
|
||||
# 'icon': QIcon('icons:ModifyMetaData.png')
|
||||
# },
|
||||
{
|
||||
'action': QAction('Library...', None),
|
||||
'function': libraryloader.show,
|
||||
'icon': QIcon('icons:ColorAdd.png')
|
||||
},
|
||||
"separator",
|
||||
{
|
||||
'action': QAction('Reload pipeline...', None),
|
||||
'function': reload_config,
|
||||
'icon': QIcon('icons:ColorAdd.png')
|
||||
}]
|
||||
context_label_action = menu.addAction(context_label)
|
||||
context_label_action.setEnabled(False)
|
||||
|
||||
# Create menu items
|
||||
for a in actions:
|
||||
add_to_menu = menu
|
||||
if isinstance(a, dict):
|
||||
# create action
|
||||
for k in a.keys():
|
||||
if 'parent' in k:
|
||||
submenus = [sm for sm in a[k].split('/')]
|
||||
submenu = None
|
||||
for sm in submenus:
|
||||
if submenu:
|
||||
submenu.addMenu(sm)
|
||||
else:
|
||||
submenu = menu.addMenu(sm)
|
||||
add_to_menu = submenu
|
||||
if 'action' in k:
|
||||
action = a[k]
|
||||
elif 'function' in k:
|
||||
action.triggered.connect(a[k])
|
||||
elif 'icon' in k:
|
||||
action.setIcon(a[k])
|
||||
menu.addSeparator()
|
||||
|
||||
# add action to menu
|
||||
add_to_menu.addAction(action)
|
||||
hiero.ui.registerAction(action)
|
||||
elif isinstance(a, str):
|
||||
add_to_menu.addSeparator()
|
||||
workfiles_action = menu.addAction("Work Files...")
|
||||
workfiles_action.setIcon(QtGui.QIcon("icons:Position.png"))
|
||||
workfiles_action.triggered.connect(set_workfiles)
|
||||
|
||||
default_tags_action = menu.addAction("Create Default Tags...")
|
||||
default_tags_action.setIcon(QtGui.QIcon("icons:Position.png"))
|
||||
default_tags_action.triggered.connect(add_tags_from_presets)
|
||||
|
||||
menu.addSeparator()
|
||||
|
||||
publish_action = menu.addAction("Publish...")
|
||||
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
|
||||
publish_action.triggered.connect(
|
||||
lambda *args: publish.show(hiero.ui.mainWindow())
|
||||
)
|
||||
|
||||
loader_action = menu.addAction("Load...")
|
||||
loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
loader_action.triggered.connect(cbloader.show)
|
||||
menu.addSeparator()
|
||||
|
||||
reload_action = menu.addAction("Reload pipeline...")
|
||||
reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
|
||||
reload_action.triggered.connect(reload_config)
|
||||
|
||||
# Is this required?
|
||||
# hiero.ui.registerAction(context_label_action)
|
||||
# hiero.ui.registerAction(workfiles_action)
|
||||
# hiero.ui.registerAction(default_tags_action)
|
||||
# hiero.ui.registerAction(publish_action)
|
||||
# hiero.ui.registerAction(loader_action)
|
||||
# hiero.ui.registerAction(reload_action)
|
||||
|
||||
self.context_label_action = context_label_action
|
||||
self.workfile_actions = workfiles_action
|
||||
self.default_tags_action = default_tags_action
|
||||
self.publish_action = publish_action
|
||||
self.reload_action = reload_action
|
||||
|
|
|
|||
|
|
@ -73,5 +73,5 @@ def current_file():
|
|||
return normalised
|
||||
|
||||
|
||||
def work_root():
|
||||
return os.path.normpath(api.Session["AVALON_WORKDIR"]).replace("\\", "/")
|
||||
def work_root(session):
|
||||
return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/")
|
||||
|
|
|
|||
|
|
@ -35,7 +35,18 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
|
|||
# Find project entity
|
||||
project_query = 'Project where full_name is "{0}"'.format(project_name)
|
||||
self.log.debug("Project query: < {0} >".format(project_query))
|
||||
project_entity = session.query(project_query).one()
|
||||
project_entity = list(session.query(project_query).all())
|
||||
if len(project_entity) == 0:
|
||||
raise AssertionError(
|
||||
"Project \"{0}\" not found in Ftrack.".format(project_name)
|
||||
)
|
||||
# QUESTION Is possible to happen?
|
||||
elif len(project_entity) > 1:
|
||||
raise AssertionError((
|
||||
"Found more than one project with name \"{0}\" in Ftrack."
|
||||
).format(project_name))
|
||||
|
||||
project_entity = project_entity[0]
|
||||
self.log.debug("Project found: {0}".format(project_entity))
|
||||
|
||||
# Find asset entity
|
||||
|
|
@ -44,7 +55,25 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
|
|||
' and name is "{1}"'
|
||||
).format(project_entity["id"], asset_name)
|
||||
self.log.debug("Asset entity query: < {0} >".format(entity_query))
|
||||
asset_entity = session.query(entity_query).one()
|
||||
asset_entities = []
|
||||
for entity in session.query(entity_query).all():
|
||||
# Skip tasks
|
||||
if entity.entity_type.lower() != "task":
|
||||
asset_entities.append(entity)
|
||||
|
||||
if len(asset_entities) == 0:
|
||||
raise AssertionError((
|
||||
"Entity with name \"{0}\" not found"
|
||||
" in Ftrack project \"{1}\"."
|
||||
).format(asset_name, project_name))
|
||||
|
||||
elif len(asset_entities) > 1:
|
||||
raise AssertionError((
|
||||
"Found more than one entity with name \"{0}\""
|
||||
" in Ftrack project \"{1}\"."
|
||||
).format(asset_name, project_name))
|
||||
|
||||
asset_entity = asset_entities[0]
|
||||
self.log.debug("Asset found: {0}".format(asset_entity))
|
||||
|
||||
# Find task entity if task is set
|
||||
|
|
@ -53,8 +82,15 @@ class CollectFtrackApi(pyblish.api.ContextPlugin):
|
|||
'Task where name is "{0}" and parent_id is "{1}"'
|
||||
).format(task_name, asset_entity["id"])
|
||||
self.log.debug("Task entity query: < {0} >".format(task_query))
|
||||
task_entity = session.query(task_query).one()
|
||||
self.log.debug("Task entity found: {0}".format(task_entity))
|
||||
task_entity = session.query(task_query).first()
|
||||
if not task_entity:
|
||||
self.log.warning(
|
||||
"Task entity with name \"{0}\" was not found.".format(
|
||||
task_name
|
||||
)
|
||||
)
|
||||
else:
|
||||
self.log.debug("Task entity found: {0}".format(task_entity))
|
||||
|
||||
else:
|
||||
task_entity = None
|
||||
|
|
|
|||
|
|
@ -19,6 +19,7 @@ class CollectAvalonEntities(pyblish.api.ContextPlugin):
|
|||
label = "Collect Avalon Entities"
|
||||
|
||||
def process(self, context):
|
||||
io.install()
|
||||
project_name = api.Session["AVALON_PROJECT"]
|
||||
asset_name = api.Session["AVALON_ASSET"]
|
||||
|
||||
|
|
|
|||
|
|
@ -1,450 +0,0 @@
|
|||
"""
|
||||
Requires:
|
||||
environment -> PYPE_PUBLISH_PATHS
|
||||
context -> workspaceDir
|
||||
|
||||
Provides:
|
||||
context -> user (str)
|
||||
instance -> new instance
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import copy
|
||||
import json
|
||||
|
||||
import pyblish.api
|
||||
from avalon import api
|
||||
|
||||
|
||||
def collect(root,
|
||||
regex=None,
|
||||
exclude_regex=None,
|
||||
frame_start=None,
|
||||
frame_end=None):
|
||||
"""Collect sequence collections in root"""
|
||||
|
||||
from avalon.vendor import clique
|
||||
|
||||
files = list()
|
||||
for filename in os.listdir(root):
|
||||
|
||||
# Must have extension
|
||||
ext = os.path.splitext(filename)[1]
|
||||
if not ext:
|
||||
continue
|
||||
|
||||
# Only files
|
||||
if not os.path.isfile(os.path.join(root, filename)):
|
||||
continue
|
||||
|
||||
# Include and exclude regex
|
||||
if regex and not re.search(regex, filename):
|
||||
continue
|
||||
if exclude_regex and re.search(exclude_regex, filename):
|
||||
continue
|
||||
|
||||
files.append(filename)
|
||||
|
||||
# Match collections
|
||||
# Support filenames like: projectX_shot01_0010.tiff with this regex
|
||||
pattern = r"(?P<index>(?P<padding>0*)\d+)\.\D+\d?$"
|
||||
collections, remainder = clique.assemble(files,
|
||||
patterns=[pattern],
|
||||
minimum_items=1)
|
||||
|
||||
# Exclude any frames outside start and end frame.
|
||||
for collection in collections:
|
||||
for index in list(collection.indexes):
|
||||
if frame_start is not None and index < frame_start:
|
||||
collection.indexes.discard(index)
|
||||
continue
|
||||
if frame_end is not None and index > frame_end:
|
||||
collection.indexes.discard(index)
|
||||
continue
|
||||
|
||||
# Keep only collections that have at least a single frame
|
||||
collections = [c for c in collections if c.indexes]
|
||||
|
||||
return collections, remainder
|
||||
|
||||
|
||||
class CollectRenderedFrames(pyblish.api.ContextPlugin):
|
||||
"""Gather file sequences from working directory
|
||||
|
||||
When "FILESEQUENCE" environment variable is set these paths (folders or
|
||||
.json files) are parsed for image sequences. Otherwise the current
|
||||
working directory is searched for file sequences.
|
||||
|
||||
The json configuration may have the optional keys:
|
||||
asset (str): The asset to publish to. If not provided fall back to
|
||||
api.Session["AVALON_ASSET"]
|
||||
subset (str): The subset to publish to. If not provided the sequence's
|
||||
head (up to frame number) will be used.
|
||||
frame_start (int): The start frame for the sequence
|
||||
frame_end (int): The end frame for the sequence
|
||||
root (str): The path to collect from (can be relative to the .json)
|
||||
regex (str): A regex for the sequence filename
|
||||
exclude_regex (str): A regex for filename to exclude from collection
|
||||
metadata (dict): Custom metadata for instance.data["metadata"]
|
||||
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.0001
|
||||
targets = ["filesequence"]
|
||||
label = "RenderedFrames"
|
||||
|
||||
def process(self, context):
|
||||
pixel_aspect = 1
|
||||
resolution_width = 1920
|
||||
resolution_height = 1080
|
||||
lut_path = None
|
||||
slate_frame = None
|
||||
families_data = None
|
||||
baked_mov_path = None
|
||||
subset = None
|
||||
version = None
|
||||
frame_start = 0
|
||||
frame_end = 0
|
||||
if os.environ.get("PYPE_PUBLISH_PATHS"):
|
||||
paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep)
|
||||
self.log.info("Collecting paths: {}".format(paths))
|
||||
else:
|
||||
cwd = context.get("workspaceDir", os.getcwd())
|
||||
paths = [cwd]
|
||||
|
||||
for path in paths:
|
||||
|
||||
self.log.info("Loading: {}".format(path))
|
||||
|
||||
if path.endswith(".json"):
|
||||
# Search using .json configuration
|
||||
with open(path, "r") as f:
|
||||
try:
|
||||
data = json.load(f)
|
||||
except Exception as exc:
|
||||
self.log.error(
|
||||
"Error loading json: "
|
||||
"{} - Exception: {}".format(path, exc)
|
||||
)
|
||||
raise
|
||||
|
||||
cwd = os.path.dirname(path)
|
||||
root_override = data.get("root")
|
||||
frame_start = int(data.get("frameStart"))
|
||||
frame_end = int(data.get("frameEnd"))
|
||||
subset = data.get("subset")
|
||||
|
||||
if root_override:
|
||||
if os.path.isabs(root_override):
|
||||
root = root_override
|
||||
else:
|
||||
root = os.path.join(cwd, root_override)
|
||||
else:
|
||||
root = cwd
|
||||
|
||||
if data.get("ftrack"):
|
||||
f = data.get("ftrack")
|
||||
os.environ["FTRACK_API_USER"] = f["FTRACK_API_USER"]
|
||||
os.environ["FTRACK_API_KEY"] = f["FTRACK_API_KEY"]
|
||||
os.environ["FTRACK_SERVER"] = f["FTRACK_SERVER"]
|
||||
|
||||
metadata = data.get("metadata")
|
||||
if metadata:
|
||||
session = metadata.get("session")
|
||||
if session:
|
||||
self.log.info("setting session using metadata")
|
||||
api.Session.update(session)
|
||||
os.environ.update(session)
|
||||
instance = metadata.get("instance")
|
||||
if instance:
|
||||
pixel_aspect = instance.get("pixelAspect", 1)
|
||||
resolution_width = instance.get("resolutionWidth", 1920)
|
||||
resolution_height = instance.get("resolutionHeight", 1080)
|
||||
lut_path = instance.get("lutPath", None)
|
||||
baked_mov_path = instance.get("bakeRenderPath")
|
||||
families_data = instance.get("families")
|
||||
slate_frame = instance.get("slateFrame")
|
||||
version = instance.get("version")
|
||||
|
||||
else:
|
||||
# Search in directory
|
||||
data = dict()
|
||||
root = path
|
||||
|
||||
self.log.info("Collecting: {}".format(root))
|
||||
|
||||
regex = data.get("regex")
|
||||
if baked_mov_path:
|
||||
regex = "^{}.*$".format(subset)
|
||||
|
||||
if regex:
|
||||
self.log.info("Using regex: {}".format(regex))
|
||||
|
||||
if "slate" in families_data:
|
||||
frame_start -= 1
|
||||
|
||||
collections, remainder = collect(
|
||||
root=root,
|
||||
regex=regex,
|
||||
exclude_regex=data.get("exclude_regex"),
|
||||
frame_start=frame_start,
|
||||
frame_end=frame_end,
|
||||
)
|
||||
|
||||
self.log.info("Found collections: {}".format(collections))
|
||||
self.log.info("Found remainder: {}".format(remainder))
|
||||
|
||||
fps = data.get("fps", 25)
|
||||
|
||||
# adding publish comment and intent to context
|
||||
context.data["comment"] = data.get("comment", "")
|
||||
context.data["intent"] = data.get("intent", "")
|
||||
|
||||
if data.get("user"):
|
||||
context.data["user"] = data["user"]
|
||||
|
||||
if data.get("version"):
|
||||
version = data.get("version")
|
||||
|
||||
# Get family from the data
|
||||
families = data.get("families", ["render"])
|
||||
if "ftrack" not in families:
|
||||
families.append("ftrack")
|
||||
if families_data and "render2d" in families_data:
|
||||
families.append("render2d")
|
||||
if families_data and "slate" in families_data:
|
||||
families.append("slate")
|
||||
families.append("slate.farm")
|
||||
|
||||
if data.get("attachTo"):
|
||||
# we need to attach found collections to existing
|
||||
# subset version as review represenation.
|
||||
|
||||
for attach in data.get("attachTo"):
|
||||
self.log.info(
|
||||
"Attaching render {}:v{}".format(
|
||||
attach["subset"], attach["version"]))
|
||||
instance = context.create_instance(
|
||||
attach["subset"])
|
||||
instance.data.update(
|
||||
{
|
||||
"name": attach["subset"],
|
||||
"version": attach["version"],
|
||||
"family": 'review',
|
||||
"families": ['review', 'ftrack'],
|
||||
"asset": data.get(
|
||||
"asset", api.Session["AVALON_ASSET"]),
|
||||
"stagingDir": root,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"fps": fps,
|
||||
"source": data.get("source", ""),
|
||||
"pixelAspect": pixel_aspect,
|
||||
"resolutionWidth": resolution_width,
|
||||
"resolutionHeight": resolution_height
|
||||
})
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
for collection in collections:
|
||||
self.log.info(
|
||||
" - adding representation: {}".format(
|
||||
str(collection))
|
||||
)
|
||||
ext = collection.tail.lstrip(".")
|
||||
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"],
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
representation)
|
||||
|
||||
elif subset:
|
||||
# if we have subset - add all collections and known
|
||||
# reminder as representations
|
||||
|
||||
# take out review family if mov path
|
||||
# this will make imagesequence none review
|
||||
|
||||
if baked_mov_path:
|
||||
self.log.info(
|
||||
"Baked mov is available {}".format(
|
||||
baked_mov_path))
|
||||
families.append("review")
|
||||
|
||||
if session['AVALON_APP'] == "maya":
|
||||
families.append("review")
|
||||
|
||||
self.log.info(
|
||||
"Adding representations to subset {}".format(
|
||||
subset))
|
||||
|
||||
instance = context.create_instance(subset)
|
||||
data = copy.deepcopy(data)
|
||||
|
||||
instance.data.update(
|
||||
{
|
||||
"name": subset,
|
||||
"family": families[0],
|
||||
"families": list(families),
|
||||
"subset": subset,
|
||||
"asset": data.get(
|
||||
"asset", api.Session["AVALON_ASSET"]),
|
||||
"stagingDir": root,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"fps": fps,
|
||||
"source": data.get("source", ""),
|
||||
"pixelAspect": pixel_aspect,
|
||||
"resolutionWidth": resolution_width,
|
||||
"resolutionHeight": resolution_height,
|
||||
"slateFrame": slate_frame,
|
||||
"version": version
|
||||
}
|
||||
)
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
for collection in collections:
|
||||
self.log.info(" - {}".format(str(collection)))
|
||||
|
||||
ext = collection.tail.lstrip(".")
|
||||
|
||||
if "slate" in instance.data["families"]:
|
||||
frame_start += 1
|
||||
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"] if not baked_mov_path else ["thumb-nuke"],
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
representation)
|
||||
|
||||
# filter out only relevant mov in case baked available
|
||||
self.log.debug("__ remainder {}".format(remainder))
|
||||
if baked_mov_path:
|
||||
remainder = [r for r in remainder
|
||||
if r in baked_mov_path]
|
||||
self.log.debug("__ remainder {}".format(remainder))
|
||||
|
||||
# process reminders
|
||||
for rem in remainder:
|
||||
# add only known types to representation
|
||||
if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']:
|
||||
self.log.info(" . {}".format(rem))
|
||||
|
||||
if "slate" in instance.data["families"]:
|
||||
frame_start += 1
|
||||
|
||||
tags = ["review"]
|
||||
|
||||
if baked_mov_path:
|
||||
tags.append("delete")
|
||||
|
||||
representation = {
|
||||
"name": rem.split(".")[-1],
|
||||
"ext": "{}".format(rem.split(".")[-1]),
|
||||
"files": rem,
|
||||
"stagingDir": root,
|
||||
"frameStart": frame_start,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": tags
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
representation)
|
||||
|
||||
else:
|
||||
# we have no subset so we take every collection and create one
|
||||
# from it
|
||||
for collection in collections:
|
||||
instance = context.create_instance(str(collection))
|
||||
self.log.info("Creating subset from: %s" % str(collection))
|
||||
|
||||
# Ensure each instance gets a unique reference to the data
|
||||
data = copy.deepcopy(data)
|
||||
|
||||
# If no subset provided, get it from collection's head
|
||||
subset = data.get("subset", collection.head.rstrip("_. "))
|
||||
|
||||
# If no start or end frame provided, get it from collection
|
||||
indices = list(collection.indexes)
|
||||
start = int(data.get("frameStart", indices[0]))
|
||||
end = int(data.get("frameEnd", indices[-1]))
|
||||
|
||||
ext = list(collection)[0].split(".")[-1]
|
||||
|
||||
if "review" not in families:
|
||||
families.append("review")
|
||||
|
||||
instance.data.update(
|
||||
{
|
||||
"name": str(collection),
|
||||
"family": families[0], # backwards compatibility
|
||||
"families": list(families),
|
||||
"subset": subset,
|
||||
"asset": data.get(
|
||||
"asset", api.Session["AVALON_ASSET"]),
|
||||
"stagingDir": root,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"fps": fps,
|
||||
"source": data.get("source", ""),
|
||||
"pixelAspect": pixel_aspect,
|
||||
"resolutionWidth": resolution_width,
|
||||
"resolutionHeight": resolution_height,
|
||||
"version": version
|
||||
}
|
||||
)
|
||||
if lut_path:
|
||||
instance.data.update({"lutPath": lut_path})
|
||||
|
||||
instance.append(collection)
|
||||
instance.context.data["fps"] = fps
|
||||
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
representation = {
|
||||
"name": ext,
|
||||
"ext": "{}".format(ext),
|
||||
"files": list(collection),
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"stagingDir": root,
|
||||
"anatomy_template": "render",
|
||||
"fps": fps,
|
||||
"tags": ["review"],
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
# temporary ... allow only beauty on ftrack
|
||||
if session['AVALON_APP'] == "maya":
|
||||
AOV_filter = ['beauty']
|
||||
for aov in AOV_filter:
|
||||
if aov not in instance.data['subset']:
|
||||
instance.data['families'].remove('review')
|
||||
instance.data['families'].remove('ftrack')
|
||||
representation["tags"].remove('review')
|
||||
|
||||
self.log.debug(
|
||||
"__ representations {}".format(
|
||||
instance.data["representations"]))
|
||||
self.log.debug(
|
||||
"__ instance.data {}".format(instance.data))
|
||||
94
pype/plugins/global/publish/collect_rendered_files.py
Normal file
94
pype/plugins/global/publish/collect_rendered_files.py
Normal file
|
|
@ -0,0 +1,94 @@
|
|||
import os
|
||||
import json
|
||||
|
||||
import pyblish.api
|
||||
from avalon import api
|
||||
|
||||
from pypeapp import PypeLauncher
|
||||
|
||||
|
||||
class CollectRenderedFiles(pyblish.api.ContextPlugin):
|
||||
"""
|
||||
This collector will try to find json files in provided
|
||||
`PYPE_PUBLISH_DATA`. Those files _MUST_ share same context.
|
||||
|
||||
"""
|
||||
order = pyblish.api.CollectorOrder - 0.0001
|
||||
targets = ["filesequence"]
|
||||
label = "Collect rendered frames"
|
||||
|
||||
_context = None
|
||||
|
||||
def _load_json(self, path):
|
||||
assert os.path.isfile(path), ("path to json file doesn't exist")
|
||||
data = None
|
||||
with open(path, "r") as json_file:
|
||||
try:
|
||||
data = json.load(json_file)
|
||||
except Exception as exc:
|
||||
self.log.error(
|
||||
"Error loading json: "
|
||||
"{} - Exception: {}".format(path, exc)
|
||||
)
|
||||
return data
|
||||
|
||||
def _process_path(self, data):
|
||||
# validate basic necessary data
|
||||
data_err = "invalid json file - missing data"
|
||||
required = ["asset", "user", "intent", "comment",
|
||||
"job", "instances", "session", "version"]
|
||||
assert all(elem in data.keys() for elem in required), data_err
|
||||
|
||||
# set context by first json file
|
||||
ctx = self._context.data
|
||||
|
||||
ctx["asset"] = ctx.get("asset") or data.get("asset")
|
||||
ctx["intent"] = ctx.get("intent") or data.get("intent")
|
||||
ctx["comment"] = ctx.get("comment") or data.get("comment")
|
||||
ctx["user"] = ctx.get("user") or data.get("user")
|
||||
ctx["version"] = ctx.get("version") or data.get("version")
|
||||
|
||||
# basic sanity check to see if we are working in same context
|
||||
# if some other json file has different context, bail out.
|
||||
ctx_err = "inconsistent contexts in json files - %s"
|
||||
assert ctx.get("asset") == data.get("asset"), ctx_err % "asset"
|
||||
assert ctx.get("intent") == data.get("intent"), ctx_err % "intent"
|
||||
assert ctx.get("comment") == data.get("comment"), ctx_err % "comment"
|
||||
assert ctx.get("user") == data.get("user"), ctx_err % "user"
|
||||
assert ctx.get("version") == data.get("version"), ctx_err % "version"
|
||||
|
||||
# ftrack credentials are passed as environment variables by Deadline
|
||||
# to publish job, but Muster doesn't pass them.
|
||||
if data.get("ftrack") and not os.environ.get("FTRACK_API_USER"):
|
||||
ftrack = data.get("ftrack")
|
||||
os.environ["FTRACK_API_USER"] = ftrack["FTRACK_API_USER"]
|
||||
os.environ["FTRACK_API_KEY"] = ftrack["FTRACK_API_KEY"]
|
||||
os.environ["FTRACK_SERVER"] = ftrack["FTRACK_SERVER"]
|
||||
|
||||
# now we can just add instances from json file and we are done
|
||||
for instance in data.get("instances"):
|
||||
self.log.info(" - processing instance for {}".format(
|
||||
instance.get("subset")))
|
||||
i = self._context.create_instance(instance.get("subset"))
|
||||
self.log.info("remapping paths ...")
|
||||
i.data["representations"] = [PypeLauncher().path_remapper(
|
||||
data=r) for r in instance.get("representations")]
|
||||
i.data.update(instance)
|
||||
|
||||
def process(self, context):
|
||||
self._context = context
|
||||
|
||||
assert os.environ.get("PYPE_PUBLISH_DATA"), (
|
||||
"Missing `PYPE_PUBLISH_DATA`")
|
||||
paths = os.environ["PYPE_PUBLISH_DATA"].split(os.pathsep)
|
||||
|
||||
session_set = False
|
||||
for path in paths:
|
||||
data = self._load_json(path)
|
||||
if not session_set:
|
||||
self.log.info("Setting session using data from file")
|
||||
api.Session.update(data.get("session"))
|
||||
os.environ.update(data.get("session"))
|
||||
session_set = True
|
||||
assert data, "failed to load json file"
|
||||
self._process_path(data)
|
||||
|
|
@ -26,8 +26,8 @@ class ExtractBurnin(pype.api.Extractor):
|
|||
if "representations" not in instance.data:
|
||||
raise RuntimeError("Burnin needs already created mov to work on.")
|
||||
|
||||
version = instance.context.data.get(
|
||||
'version', instance.data.get('version'))
|
||||
version = instance.data.get(
|
||||
'version', instance.context.data.get('version'))
|
||||
frame_start = int(instance.data.get("frameStart") or 0)
|
||||
frame_end = int(instance.data.get("frameEnd") or 1)
|
||||
duration = frame_end - frame_start + 1
|
||||
|
|
|
|||
|
|
@ -28,29 +28,33 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
entity_type = entity_data["entity_type"]
|
||||
|
||||
data = {}
|
||||
|
||||
data["inputs"] = entity_data.get("inputs", [])
|
||||
data["entityType"] = entity_type
|
||||
|
||||
# Custom attributes.
|
||||
for k, val in entity_data.get("custom_attributes", {}).items():
|
||||
data[k] = val
|
||||
|
||||
# Tasks.
|
||||
tasks = entity_data.get("tasks", [])
|
||||
if tasks is not None or len(tasks) > 0:
|
||||
data["tasks"] = tasks
|
||||
parents = []
|
||||
visualParent = None
|
||||
# do not store project"s id as visualParent (silo asset)
|
||||
if self.project is not None:
|
||||
if self.project["_id"] != parent["_id"]:
|
||||
visualParent = parent["_id"]
|
||||
parents.extend(parent.get("data", {}).get("parents", []))
|
||||
parents.append(parent["name"])
|
||||
data["visualParent"] = visualParent
|
||||
data["parents"] = parents
|
||||
if entity_type.lower() != "project":
|
||||
data["inputs"] = entity_data.get("inputs", [])
|
||||
|
||||
# Tasks.
|
||||
tasks = entity_data.get("tasks", [])
|
||||
if tasks is not None or len(tasks) > 0:
|
||||
data["tasks"] = tasks
|
||||
parents = []
|
||||
visualParent = None
|
||||
# do not store project"s id as visualParent (silo asset)
|
||||
if self.project is not None:
|
||||
if self.project["_id"] != parent["_id"]:
|
||||
visualParent = parent["_id"]
|
||||
parents.extend(
|
||||
parent.get("data", {}).get("parents", [])
|
||||
)
|
||||
parents.append(parent["name"])
|
||||
data["visualParent"] = visualParent
|
||||
data["parents"] = parents
|
||||
|
||||
update_data = True
|
||||
# Process project
|
||||
if entity_type.lower() == "project":
|
||||
entity = io.find_one({"type": "project"})
|
||||
|
|
@ -58,8 +62,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
assert (entity is not None), "Did not find project in DB"
|
||||
|
||||
# get data from already existing project
|
||||
for key, value in entity.get("data", {}).items():
|
||||
data[key] = value
|
||||
cur_entity_data = entity.get("data") or {}
|
||||
cur_entity_data.update(data)
|
||||
data = cur_entity_data
|
||||
|
||||
self.project = entity
|
||||
# Raise error if project or parent are not set
|
||||
|
|
@ -70,16 +75,63 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
|
|||
# Else process assset
|
||||
else:
|
||||
entity = io.find_one({"type": "asset", "name": name})
|
||||
# Create entity if doesn"t exist
|
||||
if entity is None:
|
||||
entity = self.create_avalon_asset(name, data)
|
||||
if entity:
|
||||
# Do not override data, only update
|
||||
cur_entity_data = entity.get("data") or {}
|
||||
cur_entity_data.update(data)
|
||||
data = cur_entity_data
|
||||
else:
|
||||
# Skip updating data
|
||||
update_data = False
|
||||
|
||||
# Update entity data with input data
|
||||
io.update_many({"_id": entity["_id"]}, {"$set": {"data": data}})
|
||||
archived_entities = io.find({
|
||||
"type": "archived_asset",
|
||||
"name": name
|
||||
})
|
||||
unarchive_entity = None
|
||||
for archived_entity in archived_entities:
|
||||
archived_parents = (
|
||||
archived_entity
|
||||
.get("data", {})
|
||||
.get("parents")
|
||||
)
|
||||
if data["parents"] == archived_parents:
|
||||
unarchive_entity = archived_entity
|
||||
break
|
||||
|
||||
if unarchive_entity is None:
|
||||
# Create entity if doesn"t exist
|
||||
entity = self.create_avalon_asset(name, data)
|
||||
else:
|
||||
# Unarchive if entity was archived
|
||||
entity = self.unarchive_entity(unarchive_entity, data)
|
||||
|
||||
if update_data:
|
||||
# Update entity data with input data
|
||||
io.update_many(
|
||||
{"_id": entity["_id"]},
|
||||
{"$set": {"data": data}}
|
||||
)
|
||||
|
||||
if "childs" in entity_data:
|
||||
self.import_to_avalon(entity_data["childs"], entity)
|
||||
|
||||
def unarchive_entity(self, entity, data):
|
||||
# Unarchived asset should not use same data
|
||||
new_entity = {
|
||||
"_id": entity["_id"],
|
||||
"schema": "avalon-core:asset-3.0",
|
||||
"name": entity["name"],
|
||||
"parent": self.project["_id"],
|
||||
"type": "asset",
|
||||
"data": data
|
||||
}
|
||||
io.replace_one(
|
||||
{"_id": entity["_id"]},
|
||||
new_entity
|
||||
)
|
||||
return new_entity
|
||||
|
||||
def create_avalon_asset(self, name, data):
|
||||
item = {
|
||||
"schema": "avalon-core:asset-3.0",
|
||||
|
|
|
|||
|
|
@ -1,20 +1,12 @@
|
|||
import os
|
||||
|
||||
import pyblish.api
|
||||
import clique
|
||||
import pype.api
|
||||
import pype.lib
|
||||
|
||||
|
||||
class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
||||
"""Resolve any dependency issues
|
||||
|
||||
This plug-in resolves any paths which, if not updated might break
|
||||
the published file.
|
||||
|
||||
The order of families is important, when working with lookdev you want to
|
||||
first publish the texture, update the texture paths in the nodes and then
|
||||
publish the shading network. Same goes for file dependent assets.
|
||||
"""
|
||||
"""Create jpg thumbnail from sequence using ffmpeg"""
|
||||
|
||||
label = "Extract Jpeg EXR"
|
||||
hosts = ["shell"]
|
||||
|
|
@ -23,11 +15,6 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
enabled = False
|
||||
|
||||
def process(self, instance):
|
||||
start = instance.data.get("frameStart")
|
||||
stagingdir = os.path.normpath(instance.data.get("stagingDir"))
|
||||
|
||||
collected_frames = os.listdir(stagingdir)
|
||||
collections, remainder = clique.assemble(collected_frames)
|
||||
|
||||
self.log.info("subset {}".format(instance.data['subset']))
|
||||
if 'crypto' in instance.data['subset']:
|
||||
|
|
@ -48,6 +35,7 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
if not isinstance(repre['files'], list):
|
||||
continue
|
||||
|
||||
stagingdir = os.path.normpath(repre.get("stagingDir"))
|
||||
input_file = repre['files'][0]
|
||||
|
||||
# input_file = (
|
||||
|
|
@ -69,9 +57,10 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
|
|||
proj_name = os.environ.get('AVALON_PROJECT', '__default__')
|
||||
profile = config_data.get(proj_name, config_data['__default__'])
|
||||
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
jpeg_items = []
|
||||
jpeg_items.append(
|
||||
os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg"))
|
||||
jpeg_items.append(ffmpeg_path)
|
||||
# override file if already exists
|
||||
jpeg_items.append("-y")
|
||||
# use same input args like with mov
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ import os
|
|||
import pyblish.api
|
||||
import clique
|
||||
import pype.api
|
||||
import pype.lib
|
||||
|
||||
|
||||
class ExtractReview(pyblish.api.InstancePlugin):
|
||||
|
|
@ -40,6 +41,8 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
# get representation and loop them
|
||||
representations = inst_data["representations"]
|
||||
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
# filter out mov and img sequences
|
||||
representations_new = representations[:]
|
||||
for repre in representations:
|
||||
|
|
@ -149,6 +152,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
# necessary input data
|
||||
# adds start arg only if image sequence
|
||||
if isinstance(repre["files"], list):
|
||||
|
||||
if start_frame != repre.get("detectedStart", start_frame):
|
||||
start_frame = repre.get("detectedStart")
|
||||
input_args.append(
|
||||
"-start_number {0} -framerate {1}".format(
|
||||
start_frame, fps))
|
||||
|
|
@ -324,10 +330,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
|
|||
os.mkdir(stg_dir)
|
||||
|
||||
mov_args = [
|
||||
os.path.join(
|
||||
os.environ.get(
|
||||
"FFMPEG_PATH",
|
||||
""), "ffmpeg"),
|
||||
ffmpeg_path,
|
||||
" ".join(input_args),
|
||||
" ".join(output_args)
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
import os
|
||||
import pype.api
|
||||
import pype.lib
|
||||
import pyblish
|
||||
|
||||
|
||||
|
|
@ -21,7 +22,7 @@ class ExtractReviewSlate(pype.api.Extractor):
|
|||
|
||||
suffix = "_slate"
|
||||
slate_path = inst_data.get("slateFrame")
|
||||
ffmpeg_path = os.path.join(os.environ.get("FFMPEG_PATH", ""), "ffmpeg")
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
to_width = 1920
|
||||
to_height = 1080
|
||||
|
|
|
|||
|
|
@ -523,7 +523,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
|
|||
"""
|
||||
src = self.unc_convert(src)
|
||||
dst = self.unc_convert(dst)
|
||||
|
||||
src = os.path.normpath(src)
|
||||
dst = os.path.normpath(dst)
|
||||
self.log.debug("Copying file .. {} -> {}".format(src, dst))
|
||||
dirname = os.path.dirname(dst)
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import os
|
||||
import json
|
||||
import re
|
||||
import logging
|
||||
from copy import copy
|
||||
|
||||
from avalon import api, io
|
||||
from avalon.vendor import requests, clique
|
||||
|
|
@ -14,16 +14,15 @@ def _get_script():
|
|||
try:
|
||||
from pype.scripts import publish_filesequence
|
||||
except Exception:
|
||||
raise RuntimeError("Expected module 'publish_deadline'"
|
||||
"to be available")
|
||||
assert False, "Expected module 'publish_deadline'to be available"
|
||||
|
||||
module_path = publish_filesequence.__file__
|
||||
if module_path.endswith(".pyc"):
|
||||
module_path = module_path[:-len(".pyc")] + ".py"
|
||||
module_path = module_path[: -len(".pyc")] + ".py"
|
||||
|
||||
module_path = os.path.normpath(module_path)
|
||||
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_MOUNT'])
|
||||
network_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_PATH'])
|
||||
mount_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_MOUNT"])
|
||||
network_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_PATH"])
|
||||
|
||||
module_path = module_path.replace(mount_root, network_root)
|
||||
|
||||
|
|
@ -34,39 +33,29 @@ def _get_script():
|
|||
def get_latest_version(asset_name, subset_name, family):
|
||||
# Get asset
|
||||
asset_name = io.find_one(
|
||||
{
|
||||
"type": "asset",
|
||||
"name": asset_name
|
||||
},
|
||||
projection={"name": True}
|
||||
{"type": "asset", "name": asset_name}, projection={"name": True}
|
||||
)
|
||||
|
||||
subset = io.find_one(
|
||||
{
|
||||
"type": "subset",
|
||||
"name": subset_name,
|
||||
"parent": asset_name["_id"]
|
||||
},
|
||||
projection={"_id": True, "name": True}
|
||||
{"type": "subset", "name": subset_name, "parent": asset_name["_id"]},
|
||||
projection={"_id": True, "name": True},
|
||||
)
|
||||
|
||||
# Check if subsets actually exists (pre-run check)
|
||||
assert subset, "No subsets found, please publish with `extendFrames` off"
|
||||
|
||||
# Get version
|
||||
version_projection = {"name": True,
|
||||
"data.startFrame": True,
|
||||
"data.endFrame": True,
|
||||
"parent": True}
|
||||
version_projection = {
|
||||
"name": True,
|
||||
"data.startFrame": True,
|
||||
"data.endFrame": True,
|
||||
"parent": True,
|
||||
}
|
||||
|
||||
version = io.find_one(
|
||||
{
|
||||
"type": "version",
|
||||
"parent": subset["_id"],
|
||||
"data.families": family
|
||||
},
|
||||
{"type": "version", "parent": subset["_id"], "data.families": family},
|
||||
projection=version_projection,
|
||||
sort=[("name", -1)]
|
||||
sort=[("name", -1)],
|
||||
)
|
||||
|
||||
assert version, "No version found, this is a bug"
|
||||
|
|
@ -87,8 +76,12 @@ def get_resources(version, extension=None):
|
|||
|
||||
directory = api.get_representation_path(representation)
|
||||
print("Source: ", directory)
|
||||
resources = sorted([os.path.normpath(os.path.join(directory, fname))
|
||||
for fname in os.listdir(directory)])
|
||||
resources = sorted(
|
||||
[
|
||||
os.path.normpath(os.path.join(directory, fname))
|
||||
for fname in os.listdir(directory)
|
||||
]
|
||||
)
|
||||
|
||||
return resources
|
||||
|
||||
|
|
@ -138,9 +131,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
- publishJobState (str, Optional): "Active" or "Suspended"
|
||||
This defaults to "Suspended"
|
||||
|
||||
This requires a "frameStart" and "frameEnd" to be present in instance.data
|
||||
or in context.data.
|
||||
|
||||
"""
|
||||
|
||||
label = "Submit image sequence jobs to Deadline or Muster"
|
||||
|
|
@ -149,26 +139,39 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
|
||||
hosts = ["fusion", "maya", "nuke"]
|
||||
|
||||
families = [
|
||||
"render.farm",
|
||||
"renderlayer",
|
||||
"imagesequence"
|
||||
]
|
||||
families = ["render.farm", "renderlayer", "imagesequence"]
|
||||
|
||||
aov_filter = {"maya": ["beauty"]}
|
||||
|
||||
enviro_filter = [
|
||||
"PATH",
|
||||
"PYTHONPATH",
|
||||
"FTRACK_API_USER",
|
||||
"FTRACK_API_KEY",
|
||||
"FTRACK_SERVER",
|
||||
"PYPE_ROOT",
|
||||
"PYPE_METADATA_FILE",
|
||||
"PYPE_STUDIO_PROJECTS_PATH",
|
||||
"PYPE_STUDIO_PROJECTS_MOUNT"
|
||||
]
|
||||
|
||||
"PATH",
|
||||
"PYTHONPATH",
|
||||
"FTRACK_API_USER",
|
||||
"FTRACK_API_KEY",
|
||||
"FTRACK_SERVER",
|
||||
"PYPE_ROOT",
|
||||
"PYPE_METADATA_FILE",
|
||||
"PYPE_STUDIO_PROJECTS_PATH",
|
||||
"PYPE_STUDIO_PROJECTS_MOUNT",
|
||||
]
|
||||
|
||||
# pool used to do the publishing job
|
||||
deadline_pool = ""
|
||||
|
||||
# regex for finding frame number in string
|
||||
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
|
||||
|
||||
# mapping of instance properties to be transfered to new instance for every
|
||||
# specified family
|
||||
instance_transfer = {
|
||||
"slate": ["slateFrame"],
|
||||
"review": ["lutPath"],
|
||||
"render.farm": ["bakeScriptPath", "bakeRenderPath", "bakeWriteNodeName"]
|
||||
}
|
||||
|
||||
# list of family names to transfer to new family if present
|
||||
families_transfer = ["render3d", "render2d", "ftrack", "slate"]
|
||||
|
||||
def _submit_deadline_post_job(self, instance, job):
|
||||
"""
|
||||
Deadline specific code separated from :meth:`process` for sake of
|
||||
|
|
@ -178,8 +181,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
data = instance.data.copy()
|
||||
subset = data["subset"]
|
||||
job_name = "{batch} - {subset} [publish image sequence]".format(
|
||||
batch=job["Props"]["Name"],
|
||||
subset=subset
|
||||
batch=job["Props"]["Name"], subset=subset
|
||||
)
|
||||
|
||||
metadata_filename = "{}_metadata.json".format(subset)
|
||||
|
|
@ -187,9 +189,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
metadata_path = os.path.join(output_dir, metadata_filename)
|
||||
|
||||
metadata_path = os.path.normpath(metadata_path)
|
||||
mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT'])
|
||||
mount_root = os.path.normpath(os.environ["PYPE_STUDIO_PROJECTS_MOUNT"])
|
||||
network_root = os.path.normpath(
|
||||
os.environ['PYPE_STUDIO_PROJECTS_PATH'])
|
||||
os.environ["PYPE_STUDIO_PROJECTS_PATH"]
|
||||
)
|
||||
|
||||
metadata_path = metadata_path.replace(mount_root, network_root)
|
||||
|
||||
|
|
@ -199,7 +202,6 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"Plugin": "Python",
|
||||
"BatchName": job["Props"]["Batch"],
|
||||
"Name": job_name,
|
||||
"JobType": "Normal",
|
||||
"JobDependency0": job["_id"],
|
||||
"UserName": job["Props"]["User"],
|
||||
"Comment": instance.context.data.get("comment", ""),
|
||||
|
|
@ -210,11 +212,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
"Version": "3.6",
|
||||
"ScriptFile": _get_script(),
|
||||
"Arguments": "",
|
||||
"SingleFrameOnly": "True"
|
||||
"SingleFrameOnly": "True",
|
||||
},
|
||||
|
||||
# Mandatory for Deadline, may be empty
|
||||
"AuxFiles": []
|
||||
"AuxFiles": [],
|
||||
}
|
||||
|
||||
# Transfer the environment from the original job to this dependent
|
||||
|
|
@ -224,30 +225,257 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
environment["PYPE_METADATA_FILE"] = metadata_path
|
||||
i = 0
|
||||
for index, key in enumerate(environment):
|
||||
self.log.info("KEY: {}".format(key))
|
||||
self.log.info("FILTER: {}".format(self.enviro_filter))
|
||||
|
||||
if key.upper() in self.enviro_filter:
|
||||
payload["JobInfo"].update({
|
||||
"EnvironmentKeyValue%d" % i: "{key}={value}".format(
|
||||
key=key,
|
||||
value=environment[key]
|
||||
)
|
||||
})
|
||||
payload["JobInfo"].update(
|
||||
{
|
||||
"EnvironmentKeyValue%d"
|
||||
% i: "{key}={value}".format(
|
||||
key=key, value=environment[key]
|
||||
)
|
||||
}
|
||||
)
|
||||
i += 1
|
||||
|
||||
# Avoid copied pools and remove secondary pool
|
||||
payload["JobInfo"]["Pool"] = "none"
|
||||
payload["JobInfo"].pop("SecondaryPool", None)
|
||||
|
||||
self.log.info("Submitting..")
|
||||
self.log.info(json.dumps(payload, indent=4, sort_keys=True))
|
||||
self.log.info("Submitting Deadline job ...")
|
||||
# self.log.info(json.dumps(payload, indent=4, sort_keys=True))
|
||||
|
||||
url = "{}/api/jobs".format(self.DEADLINE_REST_URL)
|
||||
response = requests.post(url, json=payload)
|
||||
if not response.ok:
|
||||
raise Exception(response.text)
|
||||
|
||||
def _copy_extend_frames(self, instance, representation):
|
||||
"""
|
||||
This will copy all existing frames from subset's latest version back
|
||||
to render directory and rename them to what renderer is expecting.
|
||||
|
||||
:param instance: instance to get required data from
|
||||
:type instance: pyblish.plugin.Instance
|
||||
"""
|
||||
|
||||
import speedcopy
|
||||
|
||||
self.log.info("Preparing to copy ...")
|
||||
start = instance.data.get("startFrame")
|
||||
end = instance.data.get("endFrame")
|
||||
|
||||
# get latest version of subset
|
||||
# this will stop if subset wasn't published yet
|
||||
version = get_latest_version(
|
||||
instance.data.get("asset"),
|
||||
instance.data.get("subset"), "render")
|
||||
# get its files based on extension
|
||||
subset_resources = get_resources(version, representation.get("ext"))
|
||||
r_col, _ = clique.assemble(subset_resources)
|
||||
|
||||
# if override remove all frames we are expecting to be rendered
|
||||
# so we'll copy only those missing from current render
|
||||
if instance.data.get("overrideExistingFrame"):
|
||||
for frame in range(start, end+1):
|
||||
if frame not in r_col.indexes:
|
||||
continue
|
||||
r_col.indexes.remove(frame)
|
||||
|
||||
# now we need to translate published names from represenation
|
||||
# back. This is tricky, right now we'll just use same naming
|
||||
# and only switch frame numbers
|
||||
resource_files = []
|
||||
r_filename = os.path.basename(
|
||||
representation.get("files")[0]) # first file
|
||||
op = re.search(self.R_FRAME_NUMBER, r_filename)
|
||||
pre = r_filename[:op.start("frame")]
|
||||
post = r_filename[op.end("frame"):]
|
||||
assert op is not None, "padding string wasn't found"
|
||||
for frame in list(r_col):
|
||||
fn = re.search(self.R_FRAME_NUMBER, frame)
|
||||
# silencing linter as we need to compare to True, not to
|
||||
# type
|
||||
assert fn is not None, "padding string wasn't found"
|
||||
# list of tuples (source, destination)
|
||||
resource_files.append(
|
||||
(frame,
|
||||
os.path.join(representation.get("stagingDir"),
|
||||
"{}{}{}".format(pre,
|
||||
fn.group("frame"),
|
||||
post)))
|
||||
)
|
||||
|
||||
# test if destination dir exists and create it if not
|
||||
output_dir = os.path.dirname(representation.get("files")[0])
|
||||
if not os.path.isdir(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
|
||||
# copy files
|
||||
for source in resource_files:
|
||||
speedcopy.copy(source[0], source[1])
|
||||
self.log.info(" > {}".format(source[1]))
|
||||
|
||||
self.log.info(
|
||||
"Finished copying %i files" % len(resource_files))
|
||||
|
||||
def _create_instances_for_aov(self, instance_data, exp_files):
|
||||
"""
|
||||
This will create new instance for every aov it can detect in expected
|
||||
files list.
|
||||
|
||||
:param instance_data: skeleton data for instance (those needed) later
|
||||
by collector
|
||||
:type instance_data: pyblish.plugin.Instance
|
||||
:param exp_files: list of expected files divided by aovs
|
||||
:type exp_files: list
|
||||
:returns: list of instances
|
||||
:rtype: list(publish.plugin.Instance)
|
||||
"""
|
||||
|
||||
task = os.environ["AVALON_TASK"]
|
||||
subset = instance_data["subset"]
|
||||
instances = []
|
||||
# go through aovs in expected files
|
||||
for aov, files in exp_files[0].items():
|
||||
cols, rem = clique.assemble(files)
|
||||
# we shouldn't have any reminders
|
||||
if rem:
|
||||
self.log.warning(
|
||||
"skipping unexpected files found "
|
||||
"in sequence: {}".format(rem))
|
||||
|
||||
# but we really expect only one collection, nothing else make sense
|
||||
assert len(cols) == 1, "only one image sequence type is expected"
|
||||
|
||||
# create subset name `familyTaskSubset_AOV`
|
||||
subset_name = 'render{}{}{}{}_{}'.format(
|
||||
task[0].upper(), task[1:],
|
||||
subset[0].upper(), subset[1:],
|
||||
aov)
|
||||
|
||||
staging = os.path.dirname(list(cols[0])[0])
|
||||
start = int(instance_data.get("frameStart"))
|
||||
end = int(instance_data.get("frameEnd"))
|
||||
|
||||
self.log.info("Creating data for: {}".format(subset_name))
|
||||
|
||||
app = os.environ.get("AVALON_APP", "")
|
||||
|
||||
preview = False
|
||||
if app in self.aov_filter.keys():
|
||||
if aov in self.aov_filter[app]:
|
||||
preview = True
|
||||
|
||||
new_instance = copy(instance_data)
|
||||
new_instance["subset"] = subset_name
|
||||
|
||||
ext = cols[0].tail.lstrip(".")
|
||||
|
||||
# create represenation
|
||||
rep = {
|
||||
"name": ext,
|
||||
"ext": ext,
|
||||
"files": [os.path.basename(f) for f in list(cols[0])],
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": staging,
|
||||
"anatomy_template": "render",
|
||||
"fps": new_instance.get("fps"),
|
||||
"tags": ["review"] if preview else []
|
||||
}
|
||||
|
||||
# add tags
|
||||
if preview:
|
||||
if "ftrack" not in new_instance["families"]:
|
||||
if os.environ.get("FTRACK_SERVER"):
|
||||
new_instance["families"].append("ftrack")
|
||||
if "review" not in new_instance["families"]:
|
||||
new_instance["families"].append("review")
|
||||
|
||||
new_instance["representations"] = [rep]
|
||||
|
||||
# if extending frames from existing version, copy files from there
|
||||
# into our destination directory
|
||||
if new_instance.get("extendFrames", False):
|
||||
self._copy_extend_frames(new_instance, rep)
|
||||
instances.append(new_instance)
|
||||
return instances
|
||||
|
||||
def _get_representations(self, instance, exp_files):
|
||||
"""
|
||||
This will return representations of expected files if they are not
|
||||
in hierarchy of aovs. There should be only one sequence of files for
|
||||
most cases, but if not - we create representation from each of them.
|
||||
|
||||
:param instance: instance for which we are setting representations
|
||||
:type instance: pyblish.plugin.Instance
|
||||
:param exp_files: list of expected files
|
||||
:type exp_files: list
|
||||
:returns: list of representations
|
||||
:rtype: list(dict)
|
||||
"""
|
||||
|
||||
representations = []
|
||||
start = int(instance.get("frameStart"))
|
||||
end = int(instance.get("frameEnd"))
|
||||
cols, rem = clique.assemble(exp_files)
|
||||
# create representation for every collected sequence
|
||||
for c in cols:
|
||||
ext = c.tail.lstrip(".")
|
||||
preview = False
|
||||
# if filtered aov name is found in filename, toggle it for
|
||||
# preview video rendering
|
||||
for app in self.aov_filter:
|
||||
if os.environ.get("AVALON_APP", "") == app:
|
||||
for aov in self.aov_filter[app]:
|
||||
if re.match(
|
||||
r".+(?:\.|_)({})(?:\.|_).*".format(aov),
|
||||
list(c)[0]
|
||||
):
|
||||
preview = True
|
||||
break
|
||||
break
|
||||
rep = {
|
||||
"name": str(c),
|
||||
"ext": ext,
|
||||
"files": [os.path.basename(f) for f in list(c)],
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
# If expectedFile are absolute, we need only filenames
|
||||
"stagingDir": os.path.dirname(list(c)[0]),
|
||||
"anatomy_template": "render",
|
||||
"fps": instance.get("fps"),
|
||||
"tags": ["review", "preview"] if preview else [],
|
||||
}
|
||||
|
||||
representations.append(rep)
|
||||
|
||||
families = instance.get("families")
|
||||
# if we have one representation with preview tag
|
||||
# flag whole instance for review and for ftrack
|
||||
if preview:
|
||||
if "ftrack" not in families:
|
||||
if os.environ.get("FTRACK_SERVER"):
|
||||
families.append("ftrack")
|
||||
if "review" not in families:
|
||||
families.append("review")
|
||||
instance["families"] = families
|
||||
|
||||
# add reminders as representations
|
||||
for r in rem:
|
||||
ext = r.split(".")[-1]
|
||||
rep = {
|
||||
"name": r,
|
||||
"ext": ext,
|
||||
"files": os.path.basename(r),
|
||||
"stagingDir": os.path.dirname(r),
|
||||
"anatomy_template": "publish",
|
||||
}
|
||||
|
||||
representations.append(rep)
|
||||
|
||||
return representations
|
||||
|
||||
def process(self, instance):
|
||||
"""
|
||||
Detect type of renderfarm submission and create and post dependend job
|
||||
|
|
@ -257,34 +485,36 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
:param instance: Instance data
|
||||
:type instance: dict
|
||||
"""
|
||||
# Get a submission job
|
||||
|
||||
data = instance.data.copy()
|
||||
context = instance.context
|
||||
self.context = context
|
||||
|
||||
if hasattr(instance, "_log"):
|
||||
data['_log'] = instance._log
|
||||
render_job = data.pop("deadlineSubmissionJob", None)
|
||||
submission_type = "deadline"
|
||||
|
||||
if not render_job:
|
||||
# No deadline job. Try Muster: musterSubmissionJob
|
||||
render_job = data.pop("musterSubmissionJob", None)
|
||||
submission_type = "muster"
|
||||
if not render_job:
|
||||
raise RuntimeError("Can't continue without valid Deadline "
|
||||
"or Muster submission prior to this "
|
||||
"plug-in.")
|
||||
assert render_job, (
|
||||
"Can't continue without valid Deadline "
|
||||
"or Muster submission prior to this "
|
||||
"plug-in."
|
||||
)
|
||||
|
||||
if submission_type == "deadline":
|
||||
self.DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
|
||||
"http://localhost:8082")
|
||||
self.DEADLINE_REST_URL = os.environ.get(
|
||||
"DEADLINE_REST_URL", "http://localhost:8082"
|
||||
)
|
||||
assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL"
|
||||
|
||||
self._submit_deadline_post_job(instance, render_job)
|
||||
|
||||
asset = data.get("asset") or api.Session["AVALON_ASSET"]
|
||||
subset = data["subset"]
|
||||
subset = data.get("subset")
|
||||
|
||||
# Get start/end frame from instance, if not available get from context
|
||||
context = instance.context
|
||||
start = instance.data.get("frameStart")
|
||||
if start is None:
|
||||
start = context.data["frameStart"]
|
||||
|
|
@ -292,177 +522,217 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
|
|||
if end is None:
|
||||
end = context.data["frameEnd"]
|
||||
|
||||
# Add in regex for sequence filename
|
||||
# This assumes the output files start with subset name and ends with
|
||||
# a file extension. The "ext" key includes the dot with the extension.
|
||||
if "ext" in instance.data:
|
||||
ext = r"\." + re.escape(instance.data["ext"])
|
||||
else:
|
||||
ext = r"\.\D+"
|
||||
|
||||
regex = r"^{subset}.*\d+{ext}$".format(subset=re.escape(subset),
|
||||
ext=ext)
|
||||
if data.get("extendFrames", False):
|
||||
start, end = self._extend_frames(
|
||||
asset,
|
||||
subset,
|
||||
start,
|
||||
end,
|
||||
data["overrideExistingFrame"])
|
||||
|
||||
try:
|
||||
source = data['source']
|
||||
source = data["source"]
|
||||
except KeyError:
|
||||
source = context.data["currentFile"]
|
||||
|
||||
source = source.replace(os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"),
|
||||
api.registered_root())
|
||||
|
||||
source = source.replace(
|
||||
os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), api.registered_root()
|
||||
)
|
||||
relative_path = os.path.relpath(source, api.registered_root())
|
||||
source = os.path.join("{root}", relative_path).replace("\\", "/")
|
||||
|
||||
# find subsets and version to attach render to
|
||||
attach_to = instance.data.get("attachTo")
|
||||
attach_subset_versions = []
|
||||
if attach_to:
|
||||
for subset in attach_to:
|
||||
for instance in context:
|
||||
if instance.data["subset"] != subset["subset"]:
|
||||
continue
|
||||
attach_subset_versions.append(
|
||||
{"version": instance.data["version"],
|
||||
"subset": subset["subset"],
|
||||
"family": subset["family"]})
|
||||
families = ["render"]
|
||||
|
||||
# Write metadata for publish job
|
||||
metadata = {
|
||||
instance_skeleton_data = {
|
||||
"family": "render",
|
||||
"subset": subset,
|
||||
"families": families,
|
||||
"asset": asset,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"fps": data.get("fps", 25),
|
||||
"source": source,
|
||||
"extendFrames": data.get("extendFrames"),
|
||||
"overrideExistingFrame": data.get("overrideExistingFrame"),
|
||||
"pixelAspect": data.get("pixelAspect", 1),
|
||||
"resolutionWidth": data.get("resolutionWidth", 1920),
|
||||
"resolutionHeight": data.get("resolutionHeight", 1080),
|
||||
}
|
||||
|
||||
# transfer specific families from original instance to new render
|
||||
for item in self.families_transfer:
|
||||
if item in instance.data.get("families", []):
|
||||
instance_skeleton_data["families"] += [item]
|
||||
|
||||
# transfer specific properties from original instance based on
|
||||
# mapping dictionary `instance_transfer`
|
||||
for key, values in self.instance_transfer.items():
|
||||
if key in instance.data.get("families", []):
|
||||
for v in values:
|
||||
instance_skeleton_data[v] = instance.data.get(v)
|
||||
|
||||
instances = None
|
||||
assert data.get("expectedFiles"), ("Submission from old Pype version"
|
||||
" - missing expectedFiles")
|
||||
|
||||
"""
|
||||
if content of `expectedFiles` are dictionaries, we will handle
|
||||
it as list of AOVs, creating instance from every one of them.
|
||||
|
||||
Example:
|
||||
--------
|
||||
|
||||
expectedFiles = [
|
||||
{
|
||||
"beauty": [
|
||||
"foo_v01.0001.exr",
|
||||
"foo_v01.0002.exr"
|
||||
],
|
||||
|
||||
"Z": [
|
||||
"boo_v01.0001.exr",
|
||||
"boo_v01.0002.exr"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
This will create instances for `beauty` and `Z` subset
|
||||
adding those files to their respective representations.
|
||||
|
||||
If we've got only list of files, we collect all filesequences.
|
||||
More then one doesn't probably make sense, but we'll handle it
|
||||
like creating one instance with multiple representations.
|
||||
|
||||
Example:
|
||||
--------
|
||||
|
||||
expectedFiles = [
|
||||
"foo_v01.0001.exr",
|
||||
"foo_v01.0002.exr",
|
||||
"xxx_v01.0001.exr",
|
||||
"xxx_v01.0002.exr"
|
||||
]
|
||||
|
||||
This will result in one instance with two representations:
|
||||
`foo` and `xxx`
|
||||
"""
|
||||
|
||||
self.log.info(data.get("expectedFiles"))
|
||||
|
||||
if isinstance(data.get("expectedFiles")[0], dict):
|
||||
# we cannot attach AOVs to other subsets as we consider every
|
||||
# AOV subset of its own.
|
||||
|
||||
if len(data.get("attachTo")) > 0:
|
||||
assert len(data.get("expectedFiles")[0].keys()) == 1, (
|
||||
"attaching multiple AOVs or renderable cameras to "
|
||||
"subset is not supported")
|
||||
|
||||
# create instances for every AOV we found in expected files.
|
||||
# note: this is done for every AOV and every render camere (if
|
||||
# there are multiple renderable cameras in scene)
|
||||
instances = self._create_instances_for_aov(
|
||||
instance_skeleton_data,
|
||||
data.get("expectedFiles"))
|
||||
self.log.info("got {} instance{}".format(
|
||||
len(instances),
|
||||
"s" if len(instances) > 1 else ""))
|
||||
|
||||
else:
|
||||
representations = self._get_representations(
|
||||
instance_skeleton_data,
|
||||
data.get("expectedFiles")
|
||||
)
|
||||
|
||||
if "representations" not in instance_skeleton_data:
|
||||
instance_skeleton_data["representations"] = []
|
||||
|
||||
# add representation
|
||||
instance_skeleton_data["representations"] += representations
|
||||
instances = [instance_skeleton_data]
|
||||
|
||||
# if we are attaching to other subsets, create copy of existing
|
||||
# instances, change data to match thats subset and replace
|
||||
# existing instances with modified data
|
||||
if instance.data.get("attachTo"):
|
||||
self.log.info("Attaching render to subset:")
|
||||
new_instances = []
|
||||
for at in instance.data.get("attachTo"):
|
||||
for i in instances:
|
||||
new_i = copy(i)
|
||||
new_i["version"] = at.get("version")
|
||||
new_i["subset"] = at.get("subset")
|
||||
new_i["append"] = True
|
||||
new_i["families"].append(at.get("family"))
|
||||
new_instances.append(new_i)
|
||||
self.log.info(" - {} / v{}".format(
|
||||
at.get("subset"), at.get("version")))
|
||||
instances = new_instances
|
||||
|
||||
# publish job file
|
||||
publish_job = {
|
||||
"asset": asset,
|
||||
"regex": regex,
|
||||
"frameStart": start,
|
||||
"frameEnd": end,
|
||||
"fps": context.data.get("fps", None),
|
||||
"families": ["render"],
|
||||
"source": source,
|
||||
"user": context.data["user"],
|
||||
"version": context.data["version"],
|
||||
"version": context.data["version"], # this is workfile version
|
||||
"intent": context.data.get("intent"),
|
||||
"comment": context.data.get("comment"),
|
||||
# Optional metadata (for debugging)
|
||||
"metadata": {
|
||||
"instance": data,
|
||||
"job": render_job,
|
||||
"session": api.Session.copy()
|
||||
}
|
||||
"job": render_job,
|
||||
"session": api.Session.copy(),
|
||||
"instances": instances
|
||||
}
|
||||
|
||||
if api.Session["AVALON_APP"] == "nuke":
|
||||
metadata['subset'] = subset
|
||||
|
||||
# pass Ftrack credentials in case of Muster
|
||||
if submission_type == "muster":
|
||||
ftrack = {
|
||||
"FTRACK_API_USER": os.environ.get("FTRACK_API_USER"),
|
||||
"FTRACK_API_KEY": os.environ.get("FTRACK_API_KEY"),
|
||||
"FTRACK_SERVER": os.environ.get("FTRACK_SERVER")
|
||||
"FTRACK_SERVER": os.environ.get("FTRACK_SERVER"),
|
||||
}
|
||||
metadata.update({"ftrack": ftrack})
|
||||
publish_job.update({"ftrack": ftrack})
|
||||
|
||||
# Ensure output dir exists
|
||||
output_dir = instance.data["outputDir"]
|
||||
if not os.path.isdir(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
|
||||
if data.get("extendFrames", False):
|
||||
|
||||
family = "render"
|
||||
override = data["overrideExistingFrame"]
|
||||
|
||||
# override = data.get("overrideExistingFrame", False)
|
||||
out_file = render_job.get("OutFile")
|
||||
if not out_file:
|
||||
raise RuntimeError("OutFile not found in render job!")
|
||||
|
||||
extension = os.path.splitext(out_file[0])[1]
|
||||
_ext = extension[1:]
|
||||
|
||||
# Frame comparison
|
||||
prev_start = None
|
||||
prev_end = None
|
||||
resource_range = range(int(start), int(end)+1)
|
||||
|
||||
# Gather all the subset files (one subset per render pass!)
|
||||
subset_names = [data["subset"]]
|
||||
subset_names.extend(data.get("renderPasses", []))
|
||||
resources = []
|
||||
for subset_name in subset_names:
|
||||
version = get_latest_version(asset_name=data["asset"],
|
||||
subset_name=subset_name,
|
||||
family=family)
|
||||
|
||||
# Set prev start / end frames for comparison
|
||||
if not prev_start and not prev_end:
|
||||
prev_start = version["data"]["frameStart"]
|
||||
prev_end = version["data"]["frameEnd"]
|
||||
|
||||
subset_resources = get_resources(version, _ext)
|
||||
resource_files = get_resource_files(subset_resources,
|
||||
resource_range,
|
||||
override)
|
||||
|
||||
resources.extend(resource_files)
|
||||
|
||||
updated_start = min(start, prev_start)
|
||||
updated_end = max(end, prev_end)
|
||||
|
||||
# Update metadata and instance start / end frame
|
||||
self.log.info("Updating start / end frame : "
|
||||
"{} - {}".format(updated_start, updated_end))
|
||||
|
||||
# TODO : Improve logic to get new frame range for the
|
||||
# publish job (publish_filesequence.py)
|
||||
# The current approach is not following Pyblish logic
|
||||
# which is based
|
||||
# on Collect / Validate / Extract.
|
||||
|
||||
# ---- Collect Plugins ---
|
||||
# Collect Extend Frames - Only run if extendFrames is toggled
|
||||
# # # Store in instance:
|
||||
# # # Previous rendered files per subset based on frames
|
||||
# # # --> Add to instance.data[resources]
|
||||
# # # Update publish frame range
|
||||
|
||||
# ---- Validate Plugins ---
|
||||
# Validate Extend Frames
|
||||
# # # Check if instance has the requirements to extend frames
|
||||
# There might have been some things which can be added to the list
|
||||
# Please do so when fixing this.
|
||||
|
||||
# Start frame
|
||||
metadata["frameStart"] = updated_start
|
||||
metadata["metadata"]["instance"]["frameStart"] = updated_start
|
||||
|
||||
# End frame
|
||||
metadata["frameEnd"] = updated_end
|
||||
metadata["metadata"]["instance"]["frameEnd"] = updated_end
|
||||
|
||||
metadata_filename = "{}_metadata.json".format(subset)
|
||||
|
||||
metadata_path = os.path.join(output_dir, metadata_filename)
|
||||
# convert log messages if they are `LogRecord` to their
|
||||
# string format to allow serializing as JSON later on.
|
||||
rendered_logs = []
|
||||
for log in metadata["metadata"]["instance"].get("_log", []):
|
||||
if isinstance(log, logging.LogRecord):
|
||||
rendered_logs.append(log.getMessage())
|
||||
else:
|
||||
rendered_logs.append(log)
|
||||
|
||||
metadata["metadata"]["instance"]["_log"] = rendered_logs
|
||||
self.log.info("Writing json file: {}".format(metadata_path))
|
||||
with open(metadata_path, "w") as f:
|
||||
json.dump(metadata, f, indent=4, sort_keys=True)
|
||||
json.dump(publish_job, f, indent=4, sort_keys=True)
|
||||
|
||||
# Copy files from previous render if extendFrame is True
|
||||
if data.get("extendFrames", False):
|
||||
def _extend_frames(self, asset, subset, start, end, override):
|
||||
"""
|
||||
This will get latest version of asset and update frame range based
|
||||
on minimum and maximuma values
|
||||
"""
|
||||
|
||||
self.log.info("Preparing to copy ..")
|
||||
import shutil
|
||||
# Frame comparison
|
||||
prev_start = None
|
||||
prev_end = None
|
||||
|
||||
dest_path = data["outputDir"]
|
||||
for source in resources:
|
||||
src_file = os.path.basename(source)
|
||||
dest = os.path.join(dest_path, src_file)
|
||||
shutil.copy(source, dest)
|
||||
version = get_latest_version(
|
||||
asset_name=asset,
|
||||
subset_name=subset,
|
||||
family='render'
|
||||
)
|
||||
|
||||
self.log.info("Finished copying %i files" % len(resources))
|
||||
# Set prev start / end frames for comparison
|
||||
if not prev_start and not prev_end:
|
||||
prev_start = version["data"]["frameStart"]
|
||||
prev_end = version["data"]["frameEnd"]
|
||||
|
||||
updated_start = min(start, prev_start)
|
||||
updated_end = max(end, prev_end)
|
||||
|
||||
self.log.info(
|
||||
"Updating start / end frame : "
|
||||
"{} - {}".format(updated_start, updated_end)
|
||||
)
|
||||
|
||||
return updated_start, updated_end
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
import pyblish.api
|
||||
import os
|
||||
import subprocess
|
||||
import pype.lib
|
||||
try:
|
||||
import os.errno as errno
|
||||
except ImportError:
|
||||
import errno
|
||||
|
||||
|
||||
class ValidateFfmpegInstallef(pyblish.api.Validator):
|
||||
class ValidateFFmpegInstalled(pyblish.api.Validator):
|
||||
"""Validate availability of ffmpeg tool in PATH"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
|
|
@ -27,10 +28,8 @@ class ValidateFfmpegInstallef(pyblish.api.Validator):
|
|||
return True
|
||||
|
||||
def process(self, instance):
|
||||
self.log.info("ffmpeg path: `{}`".format(
|
||||
os.environ.get("FFMPEG_PATH", "")))
|
||||
if self.is_tool(
|
||||
os.path.join(
|
||||
os.environ.get("FFMPEG_PATH", ""), "ffmpeg")) is False:
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
self.log.info("ffmpeg path: `{}`".format(ffmpeg_path))
|
||||
if self.is_tool(ffmpeg_path) is False:
|
||||
self.log.error("ffmpeg not found in PATH")
|
||||
raise RuntimeError('ffmpeg not installed.')
|
||||
|
|
|
|||
25
pype/plugins/global/publish/validate_version.py
Normal file
25
pype/plugins/global/publish/validate_version.py
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
import pyblish.api
|
||||
|
||||
|
||||
class ValidateVersion(pyblish.api.InstancePlugin):
|
||||
"""Validate instance version.
|
||||
|
||||
Pype is not allowing overwiting previously published versions.
|
||||
"""
|
||||
|
||||
order = pyblish.api.ValidatorOrder
|
||||
|
||||
label = "Validate Version"
|
||||
hosts = ["nuke", "maya", "blender"]
|
||||
|
||||
def process(self, instance):
|
||||
version = instance.data.get("version")
|
||||
latest_version = instance.data.get("latestVersion")
|
||||
|
||||
if latest_version is not None:
|
||||
msg = ("Version `{0}` that you are"
|
||||
" trying to publish, already"
|
||||
" exists in the"
|
||||
" database.").format(
|
||||
version, latest_version)
|
||||
assert (int(version) > int(latest_version)), msg
|
||||
|
|
@ -2,43 +2,108 @@ import os
|
|||
import json
|
||||
import appdirs
|
||||
import requests
|
||||
|
||||
from maya import cmds
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
||||
import pype.maya.lib as lib
|
||||
import avalon.maya
|
||||
|
||||
|
||||
class CreateRenderGlobals(avalon.maya.Creator):
|
||||
class CreateRender(avalon.maya.Creator):
|
||||
"""Create render layer for export"""
|
||||
|
||||
label = "Render Globals"
|
||||
family = "renderglobals"
|
||||
icon = "gears"
|
||||
defaults = ['Main']
|
||||
label = "Render"
|
||||
family = "rendering"
|
||||
icon = "eye"
|
||||
defaults = ["Main"]
|
||||
|
||||
_token = None
|
||||
_user = None
|
||||
_password = None
|
||||
|
||||
# renderSetup instance
|
||||
_rs = None
|
||||
|
||||
_image_prefix_nodes = {
|
||||
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
}
|
||||
|
||||
_image_prefixes = {
|
||||
'mentalray': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>',
|
||||
'vray': '"maya/<scene>/<Layer>/<Layer>',
|
||||
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>',
|
||||
'renderman': 'maya/<Scene>/<layer>/<layer>_<aov>',
|
||||
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>'
|
||||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CreateRenderGlobals, self).__init__(*args, **kwargs)
|
||||
super(CreateRender, self).__init__(*args, **kwargs)
|
||||
|
||||
# We won't be publishing this one
|
||||
self.data["id"] = "avalon.renderglobals"
|
||||
def process(self):
|
||||
exists = cmds.ls(self.name)
|
||||
if exists:
|
||||
return cmds.warning("%s already exists." % exists[0])
|
||||
|
||||
use_selection = self.options.get("useSelection")
|
||||
with lib.undo_chunk():
|
||||
self._create_render_settings()
|
||||
instance = super(CreateRender, self).process()
|
||||
cmds.setAttr("{}.machineList".format(instance), lock=True)
|
||||
self._rs = renderSetup.instance()
|
||||
layers = self._rs.getRenderLayers()
|
||||
if use_selection:
|
||||
print(">>> processing existing layers")
|
||||
sets = []
|
||||
for layer in layers:
|
||||
print(" - creating set for {}".format(layer.name()))
|
||||
render_set = cmds.sets(n="LAYER_{}".format(layer.name()))
|
||||
sets.append(render_set)
|
||||
cmds.sets(sets, forceElement=instance)
|
||||
|
||||
# if no render layers are present, create default one with
|
||||
# asterix selector
|
||||
if not layers:
|
||||
rl = self._rs.createRenderLayer('Main')
|
||||
cl = rl.createCollection("defaultCollection")
|
||||
cl.getSelector().setPattern('*')
|
||||
|
||||
renderer = cmds.getAttr(
|
||||
'defaultRenderGlobals.currentRenderer').lower()
|
||||
# handle various renderman names
|
||||
if renderer.startswith('renderman'):
|
||||
renderer = 'renderman'
|
||||
|
||||
cmds.setAttr(self._image_prefix_nodes[renderer],
|
||||
self._image_prefixes[renderer],
|
||||
type="string")
|
||||
|
||||
def _create_render_settings(self):
|
||||
# get pools
|
||||
pools = []
|
||||
|
||||
deadline_url = os.environ.get('DEADLINE_REST_URL', None)
|
||||
muster_url = os.environ.get('MUSTER_REST_URL', None)
|
||||
deadline_url = os.environ.get("DEADLINE_REST_URL", None)
|
||||
muster_url = os.environ.get("MUSTER_REST_URL", None)
|
||||
if deadline_url and muster_url:
|
||||
self.log.error("Both Deadline and Muster are enabled. "
|
||||
"Cannot support both.")
|
||||
self.log.error(
|
||||
"Both Deadline and Muster are enabled. " "Cannot support both."
|
||||
)
|
||||
raise RuntimeError("Both Deadline and Muster are enabled")
|
||||
|
||||
if deadline_url is None:
|
||||
self.log.warning("Deadline REST API url not found.")
|
||||
else:
|
||||
argument = "{}/api/pools?NamesOnly=true".format(deadline_url)
|
||||
response = self._requests_get(argument)
|
||||
try:
|
||||
response = self._requests_get(argument)
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
msg = 'Cannot connect to deadline web service'
|
||||
self.log.error(msg)
|
||||
raise RuntimeError('{} - {}'.format(msg, e))
|
||||
if not response.ok:
|
||||
self.log.warning("No pools retrieved")
|
||||
else:
|
||||
|
|
@ -57,8 +122,8 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
try:
|
||||
pools = self._get_muster_pools()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.startswith('401'):
|
||||
self.log.warning('access token expired')
|
||||
if e.startswith("401"):
|
||||
self.log.warning("access token expired")
|
||||
self._show_login()
|
||||
raise RuntimeError("Access token expired")
|
||||
except requests.exceptions.ConnectionError:
|
||||
|
|
@ -66,20 +131,15 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
raise RuntimeError("Cannot connect to {}".format(muster_url))
|
||||
pool_names = []
|
||||
for pool in pools:
|
||||
self.log.info(" - pool: {}".format(pool['name']))
|
||||
pool_names.append(pool['name'])
|
||||
self.log.info(" - pool: {}".format(pool["name"]))
|
||||
pool_names.append(pool["name"])
|
||||
|
||||
self.data["primaryPool"] = pool_names
|
||||
|
||||
# We don't need subset or asset attributes
|
||||
# self.data.pop("subset", None)
|
||||
# self.data.pop("asset", None)
|
||||
# self.data.pop("active", None)
|
||||
|
||||
self.data["suspendPublishJob"] = False
|
||||
self.data["extendFrames"] = False
|
||||
self.data["overrideExistingFrame"] = True
|
||||
self.data["useLegacyRenderLayers"] = True
|
||||
# self.data["useLegacyRenderLayers"] = True
|
||||
self.data["priority"] = 50
|
||||
self.data["framesPerTask"] = 1
|
||||
self.data["whitelist"] = False
|
||||
|
|
@ -88,20 +148,6 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
|
||||
self.options = {"useSelection": False} # Force no content
|
||||
|
||||
def process(self):
|
||||
|
||||
exists = cmds.ls(self.name)
|
||||
assert len(exists) <= 1, (
|
||||
"More than one renderglobal exists, this is a bug"
|
||||
)
|
||||
|
||||
if exists:
|
||||
return cmds.warning("%s already exists." % exists[0])
|
||||
|
||||
with lib.undo_chunk():
|
||||
super(CreateRenderGlobals, self).process()
|
||||
cmds.setAttr("{}.machineList".format(self.name), lock=True)
|
||||
|
||||
def _load_credentials(self):
|
||||
"""
|
||||
Load Muster credentials from file and set `MUSTER_USER`,
|
||||
|
|
@ -111,14 +157,12 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
|
||||
Show login dialog if access token is invalid or missing.
|
||||
"""
|
||||
app_dir = os.path.normpath(
|
||||
appdirs.user_data_dir('pype-app', 'pype')
|
||||
)
|
||||
file_name = 'muster_cred.json'
|
||||
app_dir = os.path.normpath(appdirs.user_data_dir("pype-app", "pype"))
|
||||
file_name = "muster_cred.json"
|
||||
fpath = os.path.join(app_dir, file_name)
|
||||
file = open(fpath, 'r')
|
||||
file = open(fpath, "r")
|
||||
muster_json = json.load(file)
|
||||
self._token = muster_json.get('token', None)
|
||||
self._token = muster_json.get("token", None)
|
||||
if not self._token:
|
||||
self._show_login()
|
||||
raise RuntimeError("Invalid access token for Muster")
|
||||
|
|
@ -131,26 +175,25 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
"""
|
||||
Get render pools from muster
|
||||
"""
|
||||
params = {
|
||||
'authToken': self._token
|
||||
}
|
||||
api_entry = '/api/pools/list'
|
||||
response = self._requests_get(
|
||||
self.MUSTER_REST_URL + api_entry, params=params)
|
||||
params = {"authToken": self._token}
|
||||
api_entry = "/api/pools/list"
|
||||
response = self._requests_get(self.MUSTER_REST_URL + api_entry,
|
||||
params=params)
|
||||
if response.status_code != 200:
|
||||
if response.status_code == 401:
|
||||
self.log.warning('Authentication token expired.')
|
||||
self.log.warning("Authentication token expired.")
|
||||
self._show_login()
|
||||
else:
|
||||
self.log.error(
|
||||
'Cannot get pools from Muster: {}'.format(
|
||||
response.status_code))
|
||||
raise Exception('Cannot get pools from Muster')
|
||||
("Cannot get pools from "
|
||||
"Muster: {}").format(response.status_code)
|
||||
)
|
||||
raise Exception("Cannot get pools from Muster")
|
||||
try:
|
||||
pools = response.json()['ResponseData']['pools']
|
||||
pools = response.json()["ResponseData"]["pools"]
|
||||
except ValueError as e:
|
||||
self.log.error('Invalid response from Muster server {}'.format(e))
|
||||
raise Exception('Invalid response from Muster server')
|
||||
self.log.error("Invalid response from Muster server {}".format(e))
|
||||
raise Exception("Invalid response from Muster server")
|
||||
|
||||
return pools
|
||||
|
||||
|
|
@ -162,8 +205,8 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
self.log.debug(api_url)
|
||||
login_response = self._requests_post(api_url, timeout=1)
|
||||
if login_response.status_code != 200:
|
||||
self.log.error('Cannot show login form to Muster')
|
||||
raise Exception('Cannot show login form to Muster')
|
||||
self.log.error("Cannot show login form to Muster")
|
||||
raise Exception("Cannot show login form to Muster")
|
||||
|
||||
def _requests_post(self, *args, **kwargs):
|
||||
""" Wrapper for requests, disabling SSL certificate validation if
|
||||
|
|
@ -175,8 +218,10 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
WARNING: disabling SSL certificate validation is defeating one line
|
||||
of defense SSL is providing and it is not recommended.
|
||||
"""
|
||||
if 'verify' not in kwargs:
|
||||
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
|
||||
if "verify" not in kwargs:
|
||||
kwargs["verify"] = (
|
||||
False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True
|
||||
) # noqa
|
||||
return requests.post(*args, **kwargs)
|
||||
|
||||
def _requests_get(self, *args, **kwargs):
|
||||
|
|
@ -189,6 +234,8 @@ class CreateRenderGlobals(avalon.maya.Creator):
|
|||
WARNING: disabling SSL certificate validation is defeating one line
|
||||
of defense SSL is providing and it is not recommended.
|
||||
"""
|
||||
if 'verify' not in kwargs:
|
||||
kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa
|
||||
if "verify" not in kwargs:
|
||||
kwargs["verify"] = (
|
||||
False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True
|
||||
) # noqa
|
||||
return requests.get(*args, **kwargs)
|
||||
907
pype/plugins/maya/publish/collect_render.py
Normal file
907
pype/plugins/maya/publish/collect_render.py
Normal file
|
|
@ -0,0 +1,907 @@
|
|||
"""
|
||||
This collector will go through render layers in maya and prepare all data
|
||||
needed to create instances and their representations for submition and
|
||||
publishing on farm.
|
||||
|
||||
Requires:
|
||||
instance -> families
|
||||
instance -> setMembers
|
||||
|
||||
context -> currentFile
|
||||
context -> workspaceDir
|
||||
context -> user
|
||||
|
||||
session -> AVALON_ASSET
|
||||
|
||||
Optional:
|
||||
|
||||
Provides:
|
||||
instance -> label
|
||||
instance -> subset
|
||||
instance -> attachTo
|
||||
instance -> setMembers
|
||||
instance -> publish
|
||||
instance -> frameStart
|
||||
instance -> frameEnd
|
||||
instance -> byFrameStep
|
||||
instance -> renderer
|
||||
instance -> family
|
||||
instance -> families
|
||||
instance -> asset
|
||||
instance -> time
|
||||
instance -> author
|
||||
instance -> source
|
||||
instance -> expectedFiles
|
||||
instance -> resolutionWidth
|
||||
instance -> resolutionHeight
|
||||
instance -> pixelAspect
|
||||
"""
|
||||
|
||||
import re
|
||||
import os
|
||||
import types
|
||||
import six
|
||||
from abc import ABCMeta, abstractmethod
|
||||
|
||||
from maya import cmds
|
||||
import maya.app.renderSetup.model.renderSetup as renderSetup
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from avalon import maya, api
|
||||
import pype.maya.lib as lib
|
||||
|
||||
|
||||
R_SINGLE_FRAME = re.compile(r'^(-?)\d+$')
|
||||
R_FRAME_RANGE = re.compile(r'^(?P<sf>(-?)\d+)-(?P<ef>(-?)\d+)$')
|
||||
R_FRAME_NUMBER = re.compile(r'.+\.(?P<frame>[0-9]+)\..+')
|
||||
R_LAYER_TOKEN = re.compile(
|
||||
r'.*%l.*|.*<layer>.*|.*<renderlayer>.*', re.IGNORECASE)
|
||||
R_AOV_TOKEN = re.compile(r'.*%a.*|.*<aov>.*|.*<renderpass>.*', re.IGNORECASE)
|
||||
R_SUBSTITUTE_AOV_TOKEN = re.compile(r'%a|<aov>|<renderpass>', re.IGNORECASE)
|
||||
R_REMOVE_AOV_TOKEN = re.compile(r'_%a|_<aov>|_<renderpass>', re.IGNORECASE)
|
||||
# to remove unused renderman tokens
|
||||
R_CLEAN_FRAME_TOKEN = re.compile(r'\.?<f\d>\.?', re.IGNORECASE)
|
||||
R_CLEAN_EXT_TOKEN = re.compile(r'\.?<ext>\.?', re.IGNORECASE)
|
||||
|
||||
R_SUBSTITUTE_LAYER_TOKEN = re.compile(
|
||||
r'%l|<layer>|<renderlayer>', re.IGNORECASE)
|
||||
R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r'%c|<camera>', re.IGNORECASE)
|
||||
R_SUBSTITUTE_SCENE_TOKEN = re.compile(r'%s|<scene>', re.IGNORECASE)
|
||||
|
||||
RENDERER_NAMES = {
|
||||
'mentalray': 'MentalRay',
|
||||
'vray': 'V-Ray',
|
||||
'arnold': 'Arnold',
|
||||
'renderman': 'Renderman',
|
||||
'redshift': 'Redshift'
|
||||
}
|
||||
|
||||
# not sure about the renderman image prefix
|
||||
ImagePrefixes = {
|
||||
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'rmanGlobals.imageFileFormat',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
}
|
||||
|
||||
|
||||
class CollectMayaRender(pyblish.api.ContextPlugin):
|
||||
"""Gather all publishable render layers from renderSetup"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
hosts = ["maya"]
|
||||
label = "Collect Render Layers"
|
||||
|
||||
def process(self, context):
|
||||
render_instance = None
|
||||
for instance in context:
|
||||
if 'rendering' in instance.data['families']:
|
||||
render_instance = instance
|
||||
render_instance.data["remove"] = True
|
||||
|
||||
# make sure workfile instance publishing is enabled
|
||||
if 'workfile' in instance.data['families']:
|
||||
instance.data["publish"] = True
|
||||
|
||||
if not render_instance:
|
||||
self.log.info("No render instance found, skipping render "
|
||||
"layer collection.")
|
||||
return
|
||||
|
||||
render_globals = render_instance
|
||||
collected_render_layers = render_instance.data['setMembers']
|
||||
filepath = context.data["currentFile"].replace("\\", "/")
|
||||
asset = api.Session["AVALON_ASSET"]
|
||||
workspace = context.data["workspaceDir"]
|
||||
|
||||
self._rs = renderSetup.instance()
|
||||
maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()}
|
||||
|
||||
self.maya_layers = maya_render_layers
|
||||
|
||||
for layer in collected_render_layers:
|
||||
# every layer in set should start with `LAYER_` prefix
|
||||
try:
|
||||
expected_layer_name = re.search(r"^LAYER_(.*)", layer).group(1)
|
||||
except IndexError:
|
||||
msg = ("Invalid layer name in set [ {} ]".format(layer))
|
||||
self.log.warnig(msg)
|
||||
continue
|
||||
|
||||
self.log.info("processing %s" % layer)
|
||||
# check if layer is part of renderSetup
|
||||
if expected_layer_name not in maya_render_layers:
|
||||
msg = ("Render layer [ {} ] is not in "
|
||||
"Render Setup".format(expected_layer_name))
|
||||
self.log.warning(msg)
|
||||
continue
|
||||
|
||||
# check if layer is renderable
|
||||
if not maya_render_layers[expected_layer_name].isRenderable():
|
||||
msg = ("Render layer [ {} ] is not "
|
||||
"renderable".format(expected_layer_name))
|
||||
self.log.warning(msg)
|
||||
continue
|
||||
|
||||
# test if there are sets (subsets) to attach render to
|
||||
sets = cmds.sets(layer, query=True) or []
|
||||
attachTo = []
|
||||
if sets:
|
||||
for s in sets:
|
||||
attachTo.append({
|
||||
"version": None, # we need integrator to get version
|
||||
"subset": s,
|
||||
"family": cmds.getAttr("{}.family".format(s))
|
||||
})
|
||||
self.log.info(" -> attach render to: {}".format(s))
|
||||
|
||||
layer_name = "rs_{}".format(expected_layer_name)
|
||||
|
||||
# collect all frames we are expecting to be rendered
|
||||
renderer = cmds.getAttr(
|
||||
'defaultRenderGlobals.currentRenderer').lower()
|
||||
# handle various renderman names
|
||||
if renderer.startswith('renderman'):
|
||||
renderer = 'renderman'
|
||||
|
||||
# return all expected files for all cameras and aovs in given
|
||||
# frame range
|
||||
exp_files = ExpectedFiles().get(renderer, layer_name)
|
||||
assert exp_files, ("no file names were generated, this is bug")
|
||||
|
||||
# if we want to attach render to subset, check if we have AOV's
|
||||
# in expectedFiles. If so, raise error as we cannot attach AOV
|
||||
# (considered to be subset on its own) to another subset
|
||||
if attachTo:
|
||||
assert len(exp_files[0].keys()) == 1, (
|
||||
"attaching multiple AOVs or renderable cameras to "
|
||||
"subset is not supported")
|
||||
|
||||
# append full path
|
||||
full_exp_files = []
|
||||
aov_dict = {}
|
||||
|
||||
# we either get AOVs or just list of files. List of files can
|
||||
# mean two things - there are no AOVs enabled or multipass EXR
|
||||
# is produced. In either case we treat those as `beauty`.
|
||||
if isinstance(exp_files[0], dict):
|
||||
for aov, files in exp_files[0].items():
|
||||
full_paths = []
|
||||
for ef in files:
|
||||
full_path = os.path.join(workspace, "renders", ef)
|
||||
full_path = full_path.replace("\\", "/")
|
||||
full_paths.append(full_path)
|
||||
aov_dict[aov] = full_paths
|
||||
else:
|
||||
full_paths = []
|
||||
for ef in exp_files:
|
||||
full_path = os.path.join(workspace, "renders", ef)
|
||||
full_path = full_path.replace("\\", "/")
|
||||
full_paths.append(full_path)
|
||||
aov_dict["beauty"] = full_paths
|
||||
|
||||
full_exp_files.append(aov_dict)
|
||||
self.log.info(full_exp_files)
|
||||
self.log.info("collecting layer: {}".format(layer_name))
|
||||
# Get layer specific settings, might be overrides
|
||||
data = {
|
||||
"subset": expected_layer_name,
|
||||
"attachTo": attachTo,
|
||||
"setMembers": layer_name,
|
||||
"publish": True,
|
||||
"frameStart": int(self.get_render_attribute("startFrame",
|
||||
layer=layer_name)),
|
||||
"frameEnd": int(self.get_render_attribute("endFrame",
|
||||
layer=layer_name)),
|
||||
"byFrameStep": int(
|
||||
self.get_render_attribute("byFrameStep",
|
||||
layer=layer_name)),
|
||||
"renderer": self.get_render_attribute("currentRenderer",
|
||||
layer=layer_name),
|
||||
|
||||
# instance subset
|
||||
"family": "renderlayer",
|
||||
"families": ["renderlayer"],
|
||||
"asset": asset,
|
||||
"time": api.time(),
|
||||
"author": context.data["user"],
|
||||
|
||||
# Add source to allow tracing back to the scene from
|
||||
# which was submitted originally
|
||||
"source": filepath,
|
||||
"expectedFiles": full_exp_files,
|
||||
"resolutionWidth": cmds.getAttr("defaultResolution.width"),
|
||||
"resolutionHeight": cmds.getAttr("defaultResolution.height"),
|
||||
"pixelAspect": cmds.getAttr("defaultResolution.height")
|
||||
}
|
||||
|
||||
# Apply each user defined attribute as data
|
||||
for attr in cmds.listAttr(layer, userDefined=True) or list():
|
||||
try:
|
||||
value = cmds.getAttr("{}.{}".format(layer, attr))
|
||||
except Exception:
|
||||
# Some attributes cannot be read directly,
|
||||
# such as mesh and color attributes. These
|
||||
# are considered non-essential to this
|
||||
# particular publishing pipeline.
|
||||
value = None
|
||||
|
||||
data[attr] = value
|
||||
|
||||
# Include (optional) global settings
|
||||
# Get global overrides and translate to Deadline values
|
||||
overrides = self.parse_options(str(render_globals))
|
||||
data.update(**overrides)
|
||||
|
||||
# Define nice label
|
||||
label = "{0} ({1})".format(expected_layer_name, data["asset"])
|
||||
label += " [{0}-{1}]".format(int(data["frameStart"]),
|
||||
int(data["frameEnd"]))
|
||||
|
||||
instance = context.create_instance(expected_layer_name)
|
||||
instance.data["label"] = label
|
||||
instance.data.update(data)
|
||||
pass
|
||||
|
||||
def parse_options(self, render_globals):
|
||||
"""Get all overrides with a value, skip those without
|
||||
|
||||
Here's the kicker. These globals override defaults in the submission
|
||||
integrator, but an empty value means no overriding is made.
|
||||
Otherwise, Frames would override the default frames set under globals.
|
||||
|
||||
Args:
|
||||
render_globals (str): collection of render globals
|
||||
|
||||
Returns:
|
||||
dict: only overrides with values
|
||||
"""
|
||||
|
||||
attributes = maya.read(render_globals)
|
||||
|
||||
options = {"renderGlobals": {}}
|
||||
options["renderGlobals"]["Priority"] = attributes["priority"]
|
||||
|
||||
# Check for specific pools
|
||||
pool_a, pool_b = self._discover_pools(attributes)
|
||||
options["renderGlobals"].update({"Pool": pool_a})
|
||||
if pool_b:
|
||||
options["renderGlobals"].update({"SecondaryPool": pool_b})
|
||||
|
||||
# Machine list
|
||||
machine_list = attributes["machineList"]
|
||||
if machine_list:
|
||||
key = "Whitelist" if attributes["whitelist"] else "Blacklist"
|
||||
options['renderGlobals'][key] = machine_list
|
||||
|
||||
# Suspend publish job
|
||||
state = "Suspended" if attributes["suspendPublishJob"] else "Active"
|
||||
options["publishJobState"] = state
|
||||
|
||||
chunksize = attributes.get("framesPerTask", 1)
|
||||
options["renderGlobals"]["ChunkSize"] = chunksize
|
||||
|
||||
# Override frames should be False if extendFrames is False. This is
|
||||
# to ensure it doesn't go off doing crazy unpredictable things
|
||||
override_frames = False
|
||||
extend_frames = attributes.get("extendFrames", False)
|
||||
if extend_frames:
|
||||
override_frames = attributes.get("overrideExistingFrame", False)
|
||||
|
||||
options["extendFrames"] = extend_frames
|
||||
options["overrideExistingFrame"] = override_frames
|
||||
|
||||
maya_render_plugin = "MayaBatch"
|
||||
if not attributes.get("useMayaBatch", True):
|
||||
maya_render_plugin = "MayaCmd"
|
||||
|
||||
options["mayaRenderPlugin"] = maya_render_plugin
|
||||
|
||||
return options
|
||||
|
||||
def _discover_pools(self, attributes):
|
||||
|
||||
pool_a = None
|
||||
pool_b = None
|
||||
|
||||
# Check for specific pools
|
||||
pool_b = []
|
||||
if "primaryPool" in attributes:
|
||||
pool_a = attributes["primaryPool"]
|
||||
if "secondaryPool" in attributes:
|
||||
pool_b = attributes["secondaryPool"]
|
||||
|
||||
else:
|
||||
# Backwards compatibility
|
||||
pool_str = attributes.get("pools", None)
|
||||
if pool_str:
|
||||
pool_a, pool_b = pool_str.split(";")
|
||||
|
||||
# Ensure empty entry token is caught
|
||||
if pool_b == "-":
|
||||
pool_b = None
|
||||
|
||||
return pool_a, pool_b
|
||||
|
||||
def _get_overrides(self, layer):
|
||||
rset = self.maya_layers[layer].renderSettingsCollectionInstance()
|
||||
return rset.getOverrides()
|
||||
|
||||
def get_render_attribute(self, attr, layer):
|
||||
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
|
||||
layer=layer)
|
||||
|
||||
|
||||
class ExpectedFiles:
|
||||
|
||||
def get(self, renderer, layer):
|
||||
if renderer.lower() == 'arnold':
|
||||
return ExpectedFilesArnold(layer).get_files()
|
||||
elif renderer.lower() == 'vray':
|
||||
return ExpectedFilesVray(layer).get_files()
|
||||
elif renderer.lower() == 'redshift':
|
||||
return ExpectedFilesRedshift(layer).get_files()
|
||||
elif renderer.lower() == 'mentalray':
|
||||
return ExpectedFilesMentalray(layer).get_files()
|
||||
elif renderer.lower() == 'renderman':
|
||||
return ExpectedFilesRenderman(layer).get_files()
|
||||
else:
|
||||
raise UnsupportedRendererException(
|
||||
"unsupported {}".format(renderer))
|
||||
|
||||
|
||||
@six.add_metaclass(ABCMeta)
|
||||
class AExpectedFiles:
|
||||
renderer = None
|
||||
layer = None
|
||||
|
||||
def __init__(self, layer):
|
||||
self.layer = layer
|
||||
|
||||
@abstractmethod
|
||||
def get_aovs(self):
|
||||
pass
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
try:
|
||||
file_prefix = cmds.getAttr(ImagePrefixes[self.renderer])
|
||||
except KeyError:
|
||||
raise UnsupportedRendererException(
|
||||
"Unsupported renderer {}".format(self.renderer))
|
||||
return file_prefix
|
||||
|
||||
def _get_layer_data(self):
|
||||
# ______________________________________________
|
||||
# ____________________/ ____________________________________________/
|
||||
# 1 - get scene name /__________________/
|
||||
# ____________________/
|
||||
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
|
||||
scene_name, _ = os.path.splitext(scene_basename)
|
||||
|
||||
# ______________________________________________
|
||||
# ____________________/ ____________________________________________/
|
||||
# 2 - detect renderer /__________________/
|
||||
# ____________________/
|
||||
renderer = self.renderer
|
||||
|
||||
# ________________________________________________
|
||||
# __________________/ ______________________________________________/
|
||||
# 3 - image prefix /__________________/
|
||||
# __________________/
|
||||
file_prefix = self.get_renderer_prefix()
|
||||
|
||||
if not file_prefix:
|
||||
raise RuntimeError("Image prefix not set")
|
||||
|
||||
default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey')
|
||||
|
||||
# ________________________________________________
|
||||
# __________________/ ______________________________________________/
|
||||
# 4 - get renderable cameras_____________/
|
||||
# __________________/
|
||||
|
||||
# if we have <camera> token in prefix path we'll expect output for
|
||||
# every renderable camera in layer.
|
||||
|
||||
renderable_cameras = self.get_renderable_cameras()
|
||||
# ________________________________________________
|
||||
# __________________/ ______________________________________________/
|
||||
# 5 - get AOVs /____________________/
|
||||
# __________________/
|
||||
|
||||
enabled_aovs = self.get_aovs()
|
||||
|
||||
layer_name = self.layer
|
||||
if self.layer.startswith("rs_"):
|
||||
layer_name = self.layer[3:]
|
||||
start_frame = int(self.get_render_attribute('startFrame'))
|
||||
end_frame = int(self.get_render_attribute('endFrame'))
|
||||
frame_step = int(self.get_render_attribute('byFrameStep'))
|
||||
padding = int(self.get_render_attribute('extensionPadding'))
|
||||
|
||||
scene_data = {
|
||||
"frameStart": start_frame,
|
||||
"frameEnd": end_frame,
|
||||
"frameStep": frame_step,
|
||||
"padding": padding,
|
||||
"cameras": renderable_cameras,
|
||||
"sceneName": scene_name,
|
||||
"layerName": layer_name,
|
||||
"renderer": renderer,
|
||||
"defaultExt": default_ext,
|
||||
"filePrefix": file_prefix,
|
||||
"enabledAOVs": enabled_aovs
|
||||
}
|
||||
return scene_data
|
||||
|
||||
def _generate_single_file_sequence(self, layer_data):
|
||||
expected_files = []
|
||||
file_prefix = layer_data["filePrefix"]
|
||||
for cam in layer_data["cameras"]:
|
||||
mappings = (
|
||||
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
|
||||
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
|
||||
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
|
||||
# this is required to remove unfilled aov token, for example
|
||||
# in Redshift
|
||||
(R_REMOVE_AOV_TOKEN, ""),
|
||||
(R_CLEAN_FRAME_TOKEN, ""),
|
||||
(R_CLEAN_EXT_TOKEN, "")
|
||||
)
|
||||
|
||||
for regex, value in mappings:
|
||||
file_prefix = re.sub(regex, value, file_prefix)
|
||||
|
||||
for frame in range(
|
||||
int(layer_data["frameStart"]),
|
||||
int(layer_data["frameEnd"]) + 1,
|
||||
int(layer_data["frameStep"])):
|
||||
expected_files.append(
|
||||
'{}.{}.{}'.format(file_prefix,
|
||||
str(frame).rjust(
|
||||
layer_data["padding"], "0"),
|
||||
layer_data["defaultExt"]))
|
||||
return expected_files
|
||||
|
||||
def _generate_aov_file_sequences(self, layer_data):
|
||||
expected_files = []
|
||||
aov_file_list = {}
|
||||
file_prefix = layer_data["filePrefix"]
|
||||
for aov in layer_data["enabledAOVs"]:
|
||||
for cam in layer_data["cameras"]:
|
||||
|
||||
mappings = (
|
||||
(R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]),
|
||||
(R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]),
|
||||
(R_SUBSTITUTE_CAMERA_TOKEN, cam),
|
||||
(R_SUBSTITUTE_AOV_TOKEN, aov[0]),
|
||||
(R_CLEAN_FRAME_TOKEN, ""),
|
||||
(R_CLEAN_EXT_TOKEN, "")
|
||||
)
|
||||
|
||||
for regex, value in mappings:
|
||||
file_prefix = re.sub(regex, value, file_prefix)
|
||||
|
||||
aov_files = []
|
||||
for frame in range(
|
||||
int(layer_data["frameStart"]),
|
||||
int(layer_data["frameEnd"]) + 1,
|
||||
int(layer_data["frameStep"])):
|
||||
aov_files.append(
|
||||
'{}.{}.{}'.format(
|
||||
file_prefix,
|
||||
str(frame).rjust(layer_data["padding"], "0"),
|
||||
aov[1]))
|
||||
|
||||
# if we have more then one renderable camera, append
|
||||
# camera name to AOV to allow per camera AOVs.
|
||||
aov_name = aov[0]
|
||||
if len(layer_data["cameras"]) > 1:
|
||||
aov_name = "{}_{}".format(aov[0], cam)
|
||||
|
||||
aov_file_list[aov_name] = aov_files
|
||||
file_prefix = layer_data["filePrefix"]
|
||||
|
||||
expected_files.append(aov_file_list)
|
||||
return expected_files
|
||||
|
||||
def get_files(self):
|
||||
"""
|
||||
This method will return list of expected files.
|
||||
|
||||
It will translate render token strings ('<RenderPass>', etc.) to
|
||||
their values. This task is tricky as every renderer deals with this
|
||||
differently. It depends on `get_aovs()` abstract method implemented
|
||||
for every supported renderer.
|
||||
"""
|
||||
layer_data = self._get_layer_data()
|
||||
|
||||
expected_files = []
|
||||
if layer_data.get("enabledAOVs"):
|
||||
expected_files = self._generate_aov_file_sequences(layer_data)
|
||||
else:
|
||||
expected_files = self._generate_single_file_sequence(layer_data)
|
||||
|
||||
return expected_files
|
||||
|
||||
def get_renderable_cameras(self):
|
||||
cam_parents = [cmds.listRelatives(x, ap=True)[-1]
|
||||
for x in cmds.ls(cameras=True)]
|
||||
|
||||
renderable_cameras = []
|
||||
for cam in cam_parents:
|
||||
renderable = False
|
||||
if self.maya_is_true(cmds.getAttr('{}.renderable'.format(cam))):
|
||||
renderable = True
|
||||
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.renderable'.format(cam), self.layer):
|
||||
renderable = self.maya_is_true(override)
|
||||
|
||||
if renderable:
|
||||
renderable_cameras.append(cam)
|
||||
return renderable_cameras
|
||||
|
||||
def maya_is_true(self, attr_val):
|
||||
"""
|
||||
Whether a Maya attr evaluates to True.
|
||||
When querying an attribute value from an ambiguous object the
|
||||
Maya API will return a list of values, which need to be properly
|
||||
handled to evaluate properly.
|
||||
"""
|
||||
if isinstance(attr_val, types.BooleanType):
|
||||
return attr_val
|
||||
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
|
||||
return any(attr_val)
|
||||
else:
|
||||
return bool(attr_val)
|
||||
|
||||
def get_layer_overrides(self, attr, layer):
|
||||
connections = cmds.listConnections(attr, plugs=True)
|
||||
if connections:
|
||||
for connection in connections:
|
||||
if connection:
|
||||
node_name = connection.split('.')[0]
|
||||
if cmds.nodeType(node_name) == 'renderLayer':
|
||||
attr_name = '%s.value' % '.'.join(
|
||||
connection.split('.')[:-1])
|
||||
if node_name == layer:
|
||||
yield cmds.getAttr(attr_name)
|
||||
|
||||
def get_render_attribute(self, attr):
|
||||
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
|
||||
layer=self.layer)
|
||||
|
||||
|
||||
class ExpectedFilesArnold(AExpectedFiles):
|
||||
|
||||
# Arnold AOV driver extension mapping
|
||||
# Is there a better way?
|
||||
aiDriverExtension = {
|
||||
'jpeg': 'jpg',
|
||||
'exr': 'exr',
|
||||
'deepexr': 'exr',
|
||||
'png': 'png',
|
||||
'tiff': 'tif',
|
||||
'mtoa_shaders': 'ass', # TODO: research what those last two should be
|
||||
'maya': ''
|
||||
}
|
||||
|
||||
def __init__(self, layer):
|
||||
super(ExpectedFilesArnold, self).__init__(layer)
|
||||
self.renderer = 'arnold'
|
||||
|
||||
def get_aovs(self):
|
||||
enabled_aovs = []
|
||||
try:
|
||||
if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode')
|
||||
and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')):
|
||||
# AOVs are merged in mutli-channel file
|
||||
return enabled_aovs
|
||||
except ValueError:
|
||||
# this occurs when Render Setting windows was not opened yet. In
|
||||
# such case there are no Arnold options created so query for AOVs
|
||||
# will fail. We terminate here as there are no AOVs specified then.
|
||||
# This state will most probably fail later on some Validator
|
||||
# anyway.
|
||||
return enabled_aovs
|
||||
|
||||
# AOVs are set to be rendered separately. We should expect
|
||||
# <RenderPass> token in path.
|
||||
|
||||
ai_aovs = [n for n in cmds.ls(type='aiAOV')]
|
||||
|
||||
for aov in ai_aovs:
|
||||
enabled = self.maya_is_true(
|
||||
cmds.getAttr('{}.enabled'.format(aov)))
|
||||
ai_driver = cmds.listConnections(
|
||||
'{}.outputs'.format(aov))[0]
|
||||
ai_translator = cmds.getAttr(
|
||||
'{}.aiTranslator'.format(ai_driver))
|
||||
try:
|
||||
aov_ext = self.aiDriverExtension[ai_translator]
|
||||
except KeyError:
|
||||
msg = ('Unrecognized arnold '
|
||||
'driver format for AOV - {}').format(
|
||||
cmds.getAttr('{}.name'.format(aov))
|
||||
)
|
||||
raise AOVError(msg)
|
||||
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.enabled'.format(aov), self.layer):
|
||||
enabled = self.maya_is_true(override)
|
||||
if enabled:
|
||||
# If aov RGBA is selected, arnold will translate it to `beauty`
|
||||
aov_name = cmds.getAttr('%s.name' % aov)
|
||||
if aov_name == 'RGBA':
|
||||
aov_name = 'beauty'
|
||||
enabled_aovs.append(
|
||||
(
|
||||
aov_name,
|
||||
aov_ext
|
||||
)
|
||||
)
|
||||
# Append 'beauty' as this is arnolds
|
||||
# default. If <RenderPass> token is specified and no AOVs are
|
||||
# defined, this will be used.
|
||||
enabled_aovs.append(
|
||||
(
|
||||
u'beauty',
|
||||
cmds.getAttr('defaultRenderGlobals.imfPluginKey')
|
||||
)
|
||||
)
|
||||
return enabled_aovs
|
||||
|
||||
|
||||
class ExpectedFilesVray(AExpectedFiles):
|
||||
|
||||
# V-ray file extension mapping
|
||||
# 5 - exr
|
||||
# 6 - multichannel exr
|
||||
# 13 - deep exr
|
||||
|
||||
def __init__(self, layer):
|
||||
super(ExpectedFilesVray, self).__init__(layer)
|
||||
self.renderer = 'vray'
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
prefix = super(ExpectedFilesVray, self).get_renderer_prefix()
|
||||
prefix = "{}_<aov>".format(prefix)
|
||||
return prefix
|
||||
|
||||
def get_files(self):
|
||||
expected_files = super(ExpectedFilesVray, self).get_files()
|
||||
|
||||
# we need to add one sequence for plain beauty if AOVs are enabled.
|
||||
# as vray output beauty without 'beauty' in filename.
|
||||
|
||||
layer_data = self._get_layer_data()
|
||||
if layer_data.get("enabledAOVs"):
|
||||
expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501
|
||||
|
||||
return expected_files
|
||||
|
||||
def get_aovs(self):
|
||||
enabled_aovs = []
|
||||
|
||||
try:
|
||||
# really? do we set it in vray just by selecting multichannel exr?
|
||||
if cmds.getAttr(
|
||||
"vraySettings.imageFormatStr") == "exr (multichannel)":
|
||||
# AOVs are merged in mutli-channel file
|
||||
return enabled_aovs
|
||||
except ValueError:
|
||||
# this occurs when Render Setting windows was not opened yet. In
|
||||
# such case there are no Arnold options created so query for AOVs
|
||||
# will fail. We terminate here as there are no AOVs specified then.
|
||||
# This state will most probably fail later on some Validator
|
||||
# anyway.
|
||||
return enabled_aovs
|
||||
|
||||
default_ext = cmds.getAttr('vraySettings.imageFormatStr')
|
||||
if default_ext == "exr (multichannel)" or default_ext == "exr (deep)":
|
||||
default_ext = "exr"
|
||||
|
||||
vr_aovs = [n for n in cmds.ls(
|
||||
type=["VRayRenderElement", "VRayRenderElementSet"])]
|
||||
|
||||
# todo: find out how to detect multichannel exr for vray
|
||||
for aov in vr_aovs:
|
||||
enabled = self.maya_is_true(
|
||||
cmds.getAttr('{}.enabled'.format(aov)))
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.enabled'.format(aov), 'rs_{}'.format(self.layer)):
|
||||
enabled = self.maya_is_true(override)
|
||||
|
||||
if enabled:
|
||||
# todo: find how vray set format for AOVs
|
||||
enabled_aovs.append(
|
||||
(
|
||||
self._get_vray_aov_name(aov),
|
||||
default_ext)
|
||||
)
|
||||
return enabled_aovs
|
||||
|
||||
def _get_vray_aov_name(self, node):
|
||||
|
||||
# Get render element pass type
|
||||
vray_node_attr = next(attr for attr in cmds.listAttr(node)
|
||||
if attr.startswith("vray_name"))
|
||||
pass_type = vray_node_attr.rsplit("_", 1)[-1]
|
||||
|
||||
# Support V-Ray extratex explicit name (if set by user)
|
||||
if pass_type == "extratex":
|
||||
explicit_attr = "{}.vray_explicit_name_extratex".format(node)
|
||||
explicit_name = cmds.getAttr(explicit_attr)
|
||||
if explicit_name:
|
||||
return explicit_name
|
||||
|
||||
# Node type is in the attribute name but we need to check if value
|
||||
# of the attribute as it can be changed
|
||||
return cmds.getAttr("{}.{}".format(node, vray_node_attr))
|
||||
|
||||
|
||||
class ExpectedFilesRedshift(AExpectedFiles):
|
||||
|
||||
# mapping redshift extension dropdown values to strings
|
||||
ext_mapping = ['iff', 'exr', 'tif', 'png', 'tga', 'jpg']
|
||||
|
||||
def __init__(self, layer):
|
||||
super(ExpectedFilesRedshift, self).__init__(layer)
|
||||
self.renderer = 'redshift'
|
||||
|
||||
def get_renderer_prefix(self):
|
||||
prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix()
|
||||
prefix = "{}_<aov>".format(prefix)
|
||||
return prefix
|
||||
|
||||
def get_files(self):
|
||||
expected_files = super(ExpectedFilesRedshift, self).get_files()
|
||||
|
||||
# we need to add one sequence for plain beauty if AOVs are enabled.
|
||||
# as redshift output beauty without 'beauty' in filename.
|
||||
|
||||
layer_data = self._get_layer_data()
|
||||
if layer_data.get("enabledAOVs"):
|
||||
expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501
|
||||
|
||||
return expected_files
|
||||
|
||||
def get_aovs(self):
|
||||
enabled_aovs = []
|
||||
|
||||
try:
|
||||
if self.maya_is_true(
|
||||
cmds.getAttr("redshiftOptions.exrForceMultilayer")):
|
||||
# AOVs are merged in mutli-channel file
|
||||
return enabled_aovs
|
||||
except ValueError:
|
||||
# this occurs when Render Setting windows was not opened yet. In
|
||||
# such case there are no Arnold options created so query for AOVs
|
||||
# will fail. We terminate here as there are no AOVs specified then.
|
||||
# This state will most probably fail later on some Validator
|
||||
# anyway.
|
||||
return enabled_aovs
|
||||
|
||||
default_ext = self.ext_mapping[
|
||||
cmds.getAttr('redshiftOptions.imageFormat')
|
||||
]
|
||||
rs_aovs = [n for n in cmds.ls(type='RedshiftAOV')]
|
||||
|
||||
# todo: find out how to detect multichannel exr for redshift
|
||||
for aov in rs_aovs:
|
||||
enabled = self.maya_is_true(
|
||||
cmds.getAttr('{}.enabled'.format(aov)))
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.enabled'.format(aov), self.layer):
|
||||
enabled = self.maya_is_true(override)
|
||||
|
||||
if enabled:
|
||||
enabled_aovs.append(
|
||||
(
|
||||
cmds.getAttr('%s.name' % aov),
|
||||
default_ext
|
||||
)
|
||||
)
|
||||
|
||||
return enabled_aovs
|
||||
|
||||
|
||||
class ExpectedFilesRenderman(AExpectedFiles):
|
||||
|
||||
def __init__(self, layer):
|
||||
super(ExpectedFilesRenderman, self).__init__(layer)
|
||||
self.renderer = 'renderman'
|
||||
|
||||
def get_aovs(self):
|
||||
enabled_aovs = []
|
||||
|
||||
default_ext = "exr"
|
||||
displays = cmds.listConnections("rmanGlobals.displays")
|
||||
for aov in displays:
|
||||
aov_name = str(aov)
|
||||
if aov_name == "rmanDefaultDisplay":
|
||||
aov_name = "beauty"
|
||||
|
||||
enabled = self.maya_is_true(
|
||||
cmds.getAttr("{}.enable".format(aov)))
|
||||
for override in self.get_layer_overrides(
|
||||
'{}.enable'.format(aov), self.layer):
|
||||
enabled = self.maya_is_true(override)
|
||||
|
||||
if enabled:
|
||||
enabled_aovs.append(
|
||||
(
|
||||
aov_name,
|
||||
default_ext
|
||||
)
|
||||
)
|
||||
|
||||
return enabled_aovs
|
||||
|
||||
def get_files(self):
|
||||
"""
|
||||
In renderman we hack it with prepending path. This path would
|
||||
normally be translated from `rmanGlobals.imageOutputDir`. We skip
|
||||
this and harcode prepend path we expect. There is no place for user
|
||||
to mess around with this settings anyway and it is enforced in
|
||||
render settings validator.
|
||||
"""
|
||||
layer_data = self._get_layer_data()
|
||||
new_aovs = {}
|
||||
|
||||
expected_files = super(ExpectedFilesRenderman, self).get_files()
|
||||
# we always get beauty
|
||||
for aov, files in expected_files[0].items():
|
||||
new_files = []
|
||||
for file in files:
|
||||
new_file = "{}/{}/{}".format(layer_data["sceneName"],
|
||||
layer_data["layerName"],
|
||||
file)
|
||||
new_files.append(new_file)
|
||||
new_aovs[aov] = new_files
|
||||
|
||||
return [new_aovs]
|
||||
|
||||
|
||||
class ExpectedFilesMentalray(AExpectedFiles):
|
||||
|
||||
def __init__(self, layer):
|
||||
raise UnimplementedRendererException('Mentalray not implemented')
|
||||
|
||||
def get_aovs(self):
|
||||
return []
|
||||
|
||||
|
||||
class AOVError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UnsupportedRendererException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UnimplementedRendererException(Exception):
|
||||
pass
|
||||
|
|
@ -17,7 +17,7 @@ class CollectRenderableCamera(pyblish.api.InstancePlugin):
|
|||
|
||||
def process(self, instance):
|
||||
layer = instance.data["setMembers"]
|
||||
|
||||
self.log.info("layer: {}".format(layer))
|
||||
cameras = cmds.ls(type="camera", long=True)
|
||||
renderable = [c for c in cameras if
|
||||
lib.get_attr_in_layer("%s.renderable" % c, layer=layer)]
|
||||
|
|
|
|||
|
|
@ -1,201 +0,0 @@
|
|||
from maya import cmds
|
||||
|
||||
import pyblish.api
|
||||
|
||||
from avalon import maya, api
|
||||
import pype.maya.lib as lib
|
||||
|
||||
|
||||
class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
|
||||
"""Gather instances by active render layers"""
|
||||
|
||||
order = pyblish.api.CollectorOrder + 0.01
|
||||
hosts = ["maya"]
|
||||
label = "Render Layers"
|
||||
|
||||
def process(self, context):
|
||||
|
||||
asset = api.Session["AVALON_ASSET"]
|
||||
filepath = context.data["currentFile"].replace("\\", "/")
|
||||
|
||||
# Get render globals node
|
||||
try:
|
||||
render_globals = cmds.ls("renderglobalsMain")[0]
|
||||
for instance in context:
|
||||
self.log.debug(instance.name)
|
||||
if instance.data['family'] == 'workfile':
|
||||
instance.data['publish'] = True
|
||||
except IndexError:
|
||||
self.log.info("Skipping renderlayer collection, no "
|
||||
"renderGlobalsDefault found..")
|
||||
return
|
||||
# Get all valid renderlayers
|
||||
# This is how Maya populates the renderlayer display
|
||||
rlm_attribute = "renderLayerManager.renderLayerId"
|
||||
connected_layers = cmds.listConnections(rlm_attribute) or []
|
||||
valid_layers = set(connected_layers)
|
||||
|
||||
# Get all renderlayers and check their state
|
||||
renderlayers = [i for i in cmds.ls(type="renderLayer") if
|
||||
cmds.getAttr("{}.renderable".format(i)) and not
|
||||
cmds.referenceQuery(i, isNodeReferenced=True)]
|
||||
|
||||
# Sort by displayOrder
|
||||
def sort_by_display_order(layer):
|
||||
return cmds.getAttr("%s.displayOrder" % layer)
|
||||
|
||||
renderlayers = sorted(renderlayers, key=sort_by_display_order)
|
||||
|
||||
for layer in renderlayers:
|
||||
|
||||
# Check if layer is in valid (linked) layers
|
||||
if layer not in valid_layers:
|
||||
self.log.warning("%s is invalid, skipping" % layer)
|
||||
continue
|
||||
|
||||
if layer.endswith("defaultRenderLayer"):
|
||||
continue
|
||||
else:
|
||||
# Remove Maya render setup prefix `rs_`
|
||||
layername = layer.split("rs_", 1)[-1]
|
||||
|
||||
# Get layer specific settings, might be overrides
|
||||
data = {
|
||||
"subset": layername,
|
||||
"setMembers": layer,
|
||||
"publish": True,
|
||||
"frameStart": self.get_render_attribute("startFrame",
|
||||
layer=layer),
|
||||
"frameEnd": self.get_render_attribute("endFrame",
|
||||
layer=layer),
|
||||
"byFrameStep": self.get_render_attribute("byFrameStep",
|
||||
layer=layer),
|
||||
"renderer": self.get_render_attribute("currentRenderer",
|
||||
layer=layer),
|
||||
|
||||
# instance subset
|
||||
"family": "Render Layers",
|
||||
"families": ["renderlayer"],
|
||||
"asset": asset,
|
||||
"time": api.time(),
|
||||
"author": context.data["user"],
|
||||
|
||||
# Add source to allow tracing back to the scene from
|
||||
# which was submitted originally
|
||||
"source": filepath
|
||||
}
|
||||
|
||||
# Apply each user defined attribute as data
|
||||
for attr in cmds.listAttr(layer, userDefined=True) or list():
|
||||
try:
|
||||
value = cmds.getAttr("{}.{}".format(layer, attr))
|
||||
except Exception:
|
||||
# Some attributes cannot be read directly,
|
||||
# such as mesh and color attributes. These
|
||||
# are considered non-essential to this
|
||||
# particular publishing pipeline.
|
||||
value = None
|
||||
|
||||
data[attr] = value
|
||||
|
||||
# Include (optional) global settings
|
||||
# TODO(marcus): Take into account layer overrides
|
||||
# Get global overrides and translate to Deadline values
|
||||
overrides = self.parse_options(render_globals)
|
||||
data.update(**overrides)
|
||||
|
||||
# Define nice label
|
||||
label = "{0} ({1})".format(layername, data["asset"])
|
||||
label += " [{0}-{1}]".format(int(data["frameStart"]),
|
||||
int(data["frameEnd"]))
|
||||
|
||||
instance = context.create_instance(layername)
|
||||
instance.data["label"] = label
|
||||
instance.data.update(data)
|
||||
|
||||
def get_render_attribute(self, attr, layer):
|
||||
return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr),
|
||||
layer=layer)
|
||||
|
||||
def parse_options(self, render_globals):
|
||||
"""Get all overrides with a value, skip those without
|
||||
|
||||
Here's the kicker. These globals override defaults in the submission
|
||||
integrator, but an empty value means no overriding is made.
|
||||
Otherwise, Frames would override the default frames set under globals.
|
||||
|
||||
Args:
|
||||
render_globals (str): collection of render globals
|
||||
|
||||
Returns:
|
||||
dict: only overrides with values
|
||||
"""
|
||||
|
||||
attributes = maya.read(render_globals)
|
||||
|
||||
options = {"renderGlobals": {}}
|
||||
options["renderGlobals"]["Priority"] = attributes["priority"]
|
||||
|
||||
# Check for specific pools
|
||||
pool_a, pool_b = self._discover_pools(attributes)
|
||||
options["renderGlobals"].update({"Pool": pool_a})
|
||||
if pool_b:
|
||||
options["renderGlobals"].update({"SecondaryPool": pool_b})
|
||||
|
||||
legacy = attributes["useLegacyRenderLayers"]
|
||||
options["renderGlobals"]["UseLegacyRenderLayers"] = legacy
|
||||
|
||||
# Machine list
|
||||
machine_list = attributes["machineList"]
|
||||
if machine_list:
|
||||
key = "Whitelist" if attributes["whitelist"] else "Blacklist"
|
||||
options['renderGlobals'][key] = machine_list
|
||||
|
||||
# Suspend publish job
|
||||
state = "Suspended" if attributes["suspendPublishJob"] else "Active"
|
||||
options["publishJobState"] = state
|
||||
|
||||
chunksize = attributes.get("framesPerTask", 1)
|
||||
options["renderGlobals"]["ChunkSize"] = chunksize
|
||||
|
||||
# Override frames should be False if extendFrames is False. This is
|
||||
# to ensure it doesn't go off doing crazy unpredictable things
|
||||
override_frames = False
|
||||
extend_frames = attributes.get("extendFrames", False)
|
||||
if extend_frames:
|
||||
override_frames = attributes.get("overrideExistingFrame", False)
|
||||
|
||||
options["extendFrames"] = extend_frames
|
||||
options["overrideExistingFrame"] = override_frames
|
||||
|
||||
maya_render_plugin = "MayaBatch"
|
||||
if not attributes.get("useMayaBatch", True):
|
||||
maya_render_plugin = "MayaCmd"
|
||||
|
||||
options["mayaRenderPlugin"] = maya_render_plugin
|
||||
|
||||
return options
|
||||
|
||||
def _discover_pools(self, attributes):
|
||||
|
||||
pool_a = None
|
||||
pool_b = None
|
||||
|
||||
# Check for specific pools
|
||||
pool_b = []
|
||||
if "primaryPool" in attributes:
|
||||
pool_a = attributes["primaryPool"]
|
||||
if "secondaryPool" in attributes:
|
||||
pool_b = attributes["secondaryPool"]
|
||||
|
||||
else:
|
||||
# Backwards compatibility
|
||||
pool_str = attributes.get("pools", None)
|
||||
if pool_str:
|
||||
pool_a, pool_b = pool_str.split(";")
|
||||
|
||||
# Ensure empty entry token is caught
|
||||
if pool_b == "-":
|
||||
pool_b = None
|
||||
|
||||
return pool_a, pool_b
|
||||
28
pype/plugins/maya/publish/determine_future_version.py
Normal file
28
pype/plugins/maya/publish/determine_future_version.py
Normal file
|
|
@ -0,0 +1,28 @@
|
|||
import pyblish
|
||||
|
||||
class DetermineFutureVersion(pyblish.api.InstancePlugin):
|
||||
"""
|
||||
This will determine version of subset if we want render to be attached to.
|
||||
"""
|
||||
label = "Determine Subset Version"
|
||||
order = pyblish.api.IntegratorOrder
|
||||
hosts = ["maya"]
|
||||
families = ["renderlayer"]
|
||||
|
||||
def process(self, instance):
|
||||
context = instance.context
|
||||
attach_to_subsets = [s["subset"] for s in instance.data['attachTo']]
|
||||
|
||||
if not attach_to_subsets:
|
||||
return
|
||||
|
||||
for i in context:
|
||||
if i.data["subset"] in attach_to_subsets:
|
||||
# # this will get corresponding subset in attachTo list
|
||||
# # so we can set version there
|
||||
sub = next(item for item in instance.data['attachTo'] if item["subset"] == i.data["subset"]) # noqa: E501
|
||||
|
||||
sub["version"] = i.data.get("version", 1)
|
||||
self.log.info("render will be attached to {} v{}".format(
|
||||
sub["subset"], sub["version"]
|
||||
))
|
||||
|
|
@ -117,6 +117,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
else:
|
||||
optional = True
|
||||
|
||||
use_published = True
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
|
||||
|
|
@ -125,21 +127,66 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
|
||||
context = instance.context
|
||||
workspace = context.data["workspaceDir"]
|
||||
anatomy = context.data['anatomy']
|
||||
|
||||
filepath = None
|
||||
|
||||
if self.use_published:
|
||||
for i in context:
|
||||
if "workfile" in i.data["families"]:
|
||||
assert i.data["publish"] is True, (
|
||||
"Workfile (scene) must be published along")
|
||||
template_data = i.data.get("anatomyData")
|
||||
rep = i.data.get("representations")[0].get("name")
|
||||
template_data["representation"] = rep
|
||||
template_data["ext"] = rep
|
||||
template_data["comment"] = None
|
||||
anatomy_filled = anatomy.format(template_data)
|
||||
template_filled = anatomy_filled["publish"]["path"]
|
||||
filepath = os.path.normpath(template_filled)
|
||||
self.log.info("Using published scene for render {}".format(
|
||||
filepath))
|
||||
|
||||
# now we need to switch scene in expected files
|
||||
# because <scene> token will now point to published
|
||||
# scene file and that might differ from current one
|
||||
new_scene = os.path.splitext(
|
||||
os.path.basename(filepath))[0]
|
||||
orig_scene = os.path.splitext(
|
||||
os.path.basename(context.data["currentFile"]))[0]
|
||||
exp = instance.data.get("expectedFiles")
|
||||
|
||||
if isinstance(exp[0], dict):
|
||||
# we have aovs and we need to iterate over them
|
||||
new_exp = {}
|
||||
for aov, files in exp[0].items():
|
||||
replaced_files = []
|
||||
for f in files:
|
||||
replaced_files.append(
|
||||
f.replace(orig_scene, new_scene)
|
||||
)
|
||||
new_exp[aov] = replaced_files
|
||||
instance.data["expectedFiles"] = [new_exp]
|
||||
else:
|
||||
new_exp = []
|
||||
for f in exp:
|
||||
new_exp.append(
|
||||
f.replace(orig_scene, new_scene)
|
||||
)
|
||||
instance.data["expectedFiles"] = [new_exp]
|
||||
self.log.info("Scene name was switched {} -> {}".format(
|
||||
orig_scene, new_scene
|
||||
))
|
||||
|
||||
allInstances = []
|
||||
for result in context.data["results"]:
|
||||
if (result["instance"] is not None and
|
||||
result["instance"] not in allInstances):
|
||||
allInstances.append(result["instance"])
|
||||
|
||||
for inst in allInstances:
|
||||
print(inst)
|
||||
if inst.data['family'] == 'scene':
|
||||
filepath = inst.data['destination_list'][0]
|
||||
|
||||
# fallback if nothing was set
|
||||
if not filepath:
|
||||
self.log.warning("Falling back to workfile")
|
||||
filepath = context.data["currentFile"]
|
||||
|
||||
self.log.debug(filepath)
|
||||
|
|
@ -150,8 +197,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
dirname = os.path.join(workspace, "renders")
|
||||
renderlayer = instance.data['setMembers'] # rs_beauty
|
||||
renderlayer_name = instance.data['subset'] # beauty
|
||||
renderlayer_globals = instance.data["renderGlobals"]
|
||||
legacy_layers = renderlayer_globals["UseLegacyRenderLayers"]
|
||||
# renderlayer_globals = instance.data["renderGlobals"]
|
||||
# legacy_layers = renderlayer_globals["UseLegacyRenderLayers"]
|
||||
deadline_user = context.data.get("deadlineUser", getpass.getuser())
|
||||
jobname = "%s - %s" % (filename, instance.name)
|
||||
|
||||
|
|
@ -211,9 +258,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
|
|||
# Only render layers are considered renderable in this pipeline
|
||||
"UsingRenderLayers": True,
|
||||
|
||||
# Use legacy Render Layer system
|
||||
"UseLegacyRenderLayers": legacy_layers,
|
||||
|
||||
# Render only this layer
|
||||
"RenderLayer": renderlayer,
|
||||
|
||||
|
|
|
|||
97
pype/plugins/maya/publish/validate_ass_relative_paths.py
Normal file
97
pype/plugins/maya/publish/validate_ass_relative_paths.py
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
import os
|
||||
import types
|
||||
|
||||
import maya.cmds as cmds
|
||||
|
||||
import pyblish.api
|
||||
import pype.api
|
||||
import pype.maya.action
|
||||
|
||||
|
||||
class ValidateAssRelativePaths(pyblish.api.InstancePlugin):
|
||||
"""Ensure exporting ass file has set relative texture paths"""
|
||||
|
||||
order = pype.api.ValidateContentsOrder
|
||||
hosts = ['maya']
|
||||
families = ['ass']
|
||||
label = "ASS has relative texture paths"
|
||||
actions = [pype.api.RepairAction]
|
||||
|
||||
def process(self, instance):
|
||||
# we cannot ask this until user open render settings as
|
||||
# `defaultArnoldRenderOptions` doesn't exists
|
||||
try:
|
||||
relative_texture = cmds.getAttr(
|
||||
"defaultArnoldRenderOptions.absolute_texture_paths")
|
||||
relative_procedural = cmds.getAttr(
|
||||
"defaultArnoldRenderOptions.absolute_procedural_paths")
|
||||
texture_search_path = cmds.getAttr(
|
||||
"defaultArnoldRenderOptions.tspath"
|
||||
)
|
||||
procedural_search_path = cmds.getAttr(
|
||||
"defaultArnoldRenderOptions.pspath"
|
||||
)
|
||||
except ValueError:
|
||||
assert False, ("Can not validate, render setting were not opened "
|
||||
"yet so Arnold setting cannot be validate")
|
||||
|
||||
scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True))
|
||||
scene_name, _ = os.path.splitext(scene_basename)
|
||||
project_root = "{}{}{}".format(
|
||||
os.environ.get("AVALON_PROJECTS"),
|
||||
os.path.sep,
|
||||
os.environ.get("AVALON_PROJECT")
|
||||
)
|
||||
assert self.maya_is_true(relative_texture) is not True, \
|
||||
("Texture path is set to be absolute")
|
||||
assert self.maya_is_true(relative_procedural) is not True, \
|
||||
("Procedural path is set to be absolute")
|
||||
|
||||
texture_search_path = texture_search_path.replace("\\", "/")
|
||||
procedural_search_path = procedural_search_path.replace("\\", "/")
|
||||
project_root = project_root.replace("\\", "/")
|
||||
|
||||
assert project_root in texture_search_path, \
|
||||
("Project root is not in texture_search_path")
|
||||
assert project_root in procedural_search_path, \
|
||||
("Project root is not in procedural_search_path")
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
texture_search_path = cmds.getAttr(
|
||||
"defaultArnoldRenderOptions.tspath"
|
||||
)
|
||||
procedural_search_path = cmds.getAttr(
|
||||
"defaultArnoldRenderOptions.pspath"
|
||||
)
|
||||
|
||||
project_root = "{}{}{}".format(
|
||||
os.environ.get("AVALON_PROJECTS"),
|
||||
os.path.sep,
|
||||
os.environ.get("AVALON_PROJECT"),
|
||||
).replace("\\", "/")
|
||||
|
||||
cmds.setAttr("defaultArnoldRenderOptions.tspath",
|
||||
project_root + os.pathsep + texture_search_path,
|
||||
type="string")
|
||||
cmds.setAttr("defaultArnoldRenderOptions.pspath",
|
||||
project_root + os.pathsep + procedural_search_path,
|
||||
type="string")
|
||||
cmds.setAttr("defaultArnoldRenderOptions.absolute_procedural_paths",
|
||||
False)
|
||||
cmds.setAttr("defaultArnoldRenderOptions.absolute_texture_paths",
|
||||
False)
|
||||
|
||||
def maya_is_true(self, attr_val):
|
||||
"""
|
||||
Whether a Maya attr evaluates to True.
|
||||
When querying an attribute value from an ambiguous object the
|
||||
Maya API will return a list of values, which need to be properly
|
||||
handled to evaluate properly.
|
||||
"""
|
||||
if isinstance(attr_val, types.BooleanType):
|
||||
return attr_val
|
||||
elif isinstance(attr_val, (types.ListType, types.GeneratorType)):
|
||||
return any(attr_val)
|
||||
else:
|
||||
return bool(attr_val)
|
||||
|
|
@ -1,17 +1,26 @@
|
|||
import re
|
||||
|
||||
import pyblish.api
|
||||
import pype.api
|
||||
import pype.maya.action
|
||||
|
||||
from maya import cmds
|
||||
|
||||
|
||||
ImagePrefixes = {
|
||||
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
}
|
||||
|
||||
|
||||
class ValidateRenderSingleCamera(pyblish.api.InstancePlugin):
|
||||
"""Only one camera may be renderable in a layer.
|
||||
|
||||
Currently the pipeline supports only a single camera per layer.
|
||||
This is because when multiple cameras are rendered the output files
|
||||
automatically get different names because the <Camera> render token
|
||||
is not in the output path. As such the output files conflict with how
|
||||
our pipeline expects the output.
|
||||
"""Validate renderable camera count for layer and <Camera> token.
|
||||
|
||||
Pipeline is supporting multiple renderable cameras per layer, but image
|
||||
prefix must contain <Camera> token.
|
||||
"""
|
||||
|
||||
order = pype.api.ValidateContentsOrder
|
||||
|
|
@ -21,6 +30,8 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin):
|
|||
"vrayscene"]
|
||||
actions = [pype.maya.action.SelectInvalidAction]
|
||||
|
||||
R_CAMERA_TOKEN = re.compile(r'%c|<camera>', re.IGNORECASE)
|
||||
|
||||
def process(self, instance):
|
||||
"""Process all the cameras in the instance"""
|
||||
invalid = self.get_invalid(instance)
|
||||
|
|
@ -31,8 +42,17 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin):
|
|||
def get_invalid(cls, instance):
|
||||
|
||||
cameras = instance.data.get("cameras", [])
|
||||
renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower()
|
||||
# handle various renderman names
|
||||
if renderer.startswith('renderman'):
|
||||
renderer = 'renderman'
|
||||
file_prefix = cmds.getAttr(ImagePrefixes[renderer])
|
||||
|
||||
if len(cameras) > 1:
|
||||
if re.search(cls.R_CAMERA_TOKEN, file_prefix):
|
||||
# if there is <Camera> token in prefix and we have more then
|
||||
# 1 camera, all is ok.
|
||||
return
|
||||
cls.log.error("Multiple renderable cameras found for %s: %s " %
|
||||
(instance.data["setMembers"], cameras))
|
||||
return [instance.data["setMembers"]] + cameras
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import os
|
||||
import re
|
||||
|
||||
from maya import cmds, mel
|
||||
import pymel.core as pm
|
||||
|
|
@ -11,9 +12,13 @@ import pype.maya.lib as lib
|
|||
class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
||||
"""Validates the global render settings
|
||||
|
||||
* File Name Prefix must be as followed:
|
||||
* vray: maya/<Scene>/<Layer>/<Layer>
|
||||
* default: maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>
|
||||
* File Name Prefix must start with: `maya/<Scene>`
|
||||
all other token are customizable but sane values are:
|
||||
|
||||
`maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>`
|
||||
|
||||
<Camera> token is supported also, usefull for multiple renderable
|
||||
cameras per render layer.
|
||||
|
||||
* Frame Padding must be:
|
||||
* default: 4
|
||||
|
|
@ -35,16 +40,47 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
families = ["renderlayer"]
|
||||
actions = [pype.api.RepairAction]
|
||||
|
||||
ImagePrefixes = {
|
||||
'mentalray': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'vray': 'vraySettings.fileNamePrefix',
|
||||
'arnold': 'defaultRenderGlobals.imageFilePrefix',
|
||||
'renderman': 'rmanGlobals.imageFileFormat',
|
||||
'redshift': 'defaultRenderGlobals.imageFilePrefix'
|
||||
}
|
||||
|
||||
ImagePrefixTokens = {
|
||||
|
||||
'arnold': 'maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>',
|
||||
'redshift': 'maya/<Scene>/<RenderLayer>/<RenderLayer>',
|
||||
'vray': 'maya/<scene>/<Layer>/<Layer>',
|
||||
'renderman': '<layer>_<aov>.<f4>.<ext>'
|
||||
}
|
||||
|
||||
# WARNING: There is bug? in renderman, translating <scene> token
|
||||
# to something left behind mayas default image prefix. So instead
|
||||
# `SceneName_v01` it translates to:
|
||||
# `SceneName_v01/<RenderLayer>/<RenderLayers_<RenderPass>` that means
|
||||
# for example:
|
||||
# `SceneName_v01/Main/Main_<RenderPass>`. Possible solution is to define
|
||||
# custom token like <scene_name> to point to determined scene name.
|
||||
RendermanDirPrefix = "<ws>/renders/maya/<scene>/<layer>"
|
||||
|
||||
R_AOV_TOKEN = re.compile(
|
||||
r'%a|<aov>|<renderpass>', re.IGNORECASE)
|
||||
R_LAYER_TOKEN = re.compile(
|
||||
r'%l|<layer>|<renderlayer>', re.IGNORECASE)
|
||||
R_CAMERA_TOKEN = re.compile(r'%c|<camera>', re.IGNORECASE)
|
||||
R_SCENE_TOKEN = re.compile(r'%s|<scene>', re.IGNORECASE)
|
||||
|
||||
DEFAULT_PADDING = 4
|
||||
RENDERER_PREFIX = {"vray": "maya/<scene>/<Layer>/<Layer>"}
|
||||
VRAY_PREFIX = "maya/<scene>/<Layer>/<Layer>"
|
||||
DEFAULT_PREFIX = "maya/<Scene>/<RenderLayer>/<RenderLayer>_<RenderPass>"
|
||||
|
||||
def process(self, instance):
|
||||
|
||||
invalid = self.get_invalid(instance)
|
||||
if invalid:
|
||||
raise ValueError("Invalid render settings found for '%s'!"
|
||||
% instance.name)
|
||||
assert invalid is False, ("Invalid render settings "
|
||||
"found for '{}'!".format(instance.name))
|
||||
|
||||
@classmethod
|
||||
def get_invalid(cls, instance):
|
||||
|
|
@ -53,10 +89,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
|
||||
renderer = instance.data['renderer']
|
||||
layer = instance.data['setMembers']
|
||||
cameras = instance.data.get("cameras", [])
|
||||
|
||||
# Get the node attributes for current renderer
|
||||
attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS['default'])
|
||||
prefix = lib.get_attr_in_layer("{node}.{prefix}".format(**attrs),
|
||||
prefix = lib.get_attr_in_layer(cls.ImagePrefixes[renderer],
|
||||
layer=layer)
|
||||
padding = lib.get_attr_in_layer("{node}.{padding}".format(**attrs),
|
||||
layer=layer)
|
||||
|
|
@ -68,12 +105,63 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
cls.log.error("Animation needs to be enabled. Use the same "
|
||||
"frame for start and end to render single frame")
|
||||
|
||||
fname_prefix = cls.get_prefix(renderer)
|
||||
|
||||
if prefix != fname_prefix:
|
||||
if not prefix.lower().startswith("maya/<scene>"):
|
||||
invalid = True
|
||||
cls.log.error("Wrong file name prefix: %s (expected: %s)"
|
||||
% (prefix, fname_prefix))
|
||||
cls.log.error("Wrong image prefix [ {} ] - "
|
||||
"doesn't start with: 'maya/<scene>'".format(prefix))
|
||||
|
||||
if not re.search(cls.R_LAYER_TOKEN, prefix):
|
||||
invalid = True
|
||||
cls.log.error("Wrong image prefix [ {} ] - "
|
||||
"doesn't have: '<renderlayer>' or "
|
||||
"'<layer>' token".format(prefix))
|
||||
|
||||
if len(cameras) > 1:
|
||||
if not re.search(cls.R_CAMERA_TOKEN, prefix):
|
||||
invalid = True
|
||||
cls.log.error("Wrong image prefix [ {} ] - "
|
||||
"doesn't have: '<camera>' token".format(prefix))
|
||||
|
||||
# renderer specific checks
|
||||
if renderer == "vray":
|
||||
# no vray checks implemented yet
|
||||
pass
|
||||
elif renderer == "redshift":
|
||||
# no redshift check implemented yet
|
||||
pass
|
||||
elif renderer == "renderman":
|
||||
file_prefix = cmds.getAttr("rmanGlobals.imageFileFormat")
|
||||
dir_prefix = cmds.getAttr("rmanGlobals.imageOutputDir")
|
||||
|
||||
if file_prefix.lower() != cls.ImagePrefixTokens[renderer].lower():
|
||||
invalid = True
|
||||
cls.log.error("Wrong image prefix [ {} ]".format(file_prefix))
|
||||
|
||||
if dir_prefix.lower() != cls.RendermanDirPrefix.lower():
|
||||
invalid = True
|
||||
cls.log.error("Wrong directory prefix [ {} ]".format(
|
||||
dir_prefix))
|
||||
|
||||
else:
|
||||
multichannel = cmds.getAttr("defaultArnoldDriver.mergeAOVs")
|
||||
if multichannel:
|
||||
if re.search(cls.R_AOV_TOKEN, prefix):
|
||||
invalid = True
|
||||
cls.log.error("Wrong image prefix [ {} ] - "
|
||||
"You can't use '<renderpass>' token "
|
||||
"with merge AOVs turned on".format(prefix))
|
||||
else:
|
||||
if not re.search(cls.R_AOV_TOKEN, prefix):
|
||||
invalid = True
|
||||
cls.log.error("Wrong image prefix [ {} ] - "
|
||||
"doesn't have: '<renderpass>' or "
|
||||
"token".format(prefix))
|
||||
|
||||
# prefix check
|
||||
if prefix.lower() != cls.ImagePrefixTokens[renderer].lower():
|
||||
cls.log.warning("warning: prefix differs from "
|
||||
"recommended {}".format(
|
||||
cls.ImagePrefixTokens[renderer]))
|
||||
|
||||
if padding != cls.DEFAULT_PADDING:
|
||||
invalid = True
|
||||
|
|
@ -82,21 +170,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
|
||||
return invalid
|
||||
|
||||
@classmethod
|
||||
def get_prefix(cls, renderer):
|
||||
prefix = cls.RENDERER_PREFIX.get(renderer, cls.DEFAULT_PREFIX)
|
||||
# maya.cmds and pymel.core return only default project directory and
|
||||
# not the current one but only default.
|
||||
output_path = os.path.join(
|
||||
mel.eval("workspace -q -rd;"), pm.workspace.fileRules["images"]
|
||||
)
|
||||
# Workfile paths can be configured to have host name in file path.
|
||||
# In this case we want to avoid duplicate folder names.
|
||||
if "maya" in output_path.lower():
|
||||
prefix = prefix.replace("maya/", "")
|
||||
|
||||
return prefix
|
||||
|
||||
@classmethod
|
||||
def repair(cls, instance):
|
||||
|
||||
|
|
@ -108,14 +181,23 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
|
|||
render_attrs = lib.RENDER_ATTRS.get(renderer, default)
|
||||
|
||||
# Repair prefix
|
||||
node = render_attrs["node"]
|
||||
prefix_attr = render_attrs["prefix"]
|
||||
if renderer != "renderman":
|
||||
node = render_attrs["node"]
|
||||
prefix_attr = render_attrs["prefix"]
|
||||
|
||||
fname_prefix = cls.get_prefix(renderer)
|
||||
cmds.setAttr("{}.{}".format(node, prefix_attr),
|
||||
fname_prefix, type="string")
|
||||
fname_prefix = cls.ImagePrefixTokens[renderer]
|
||||
cmds.setAttr("{}.{}".format(node, prefix_attr),
|
||||
fname_prefix, type="string")
|
||||
|
||||
# Repair padding
|
||||
padding_attr = render_attrs["padding"]
|
||||
cmds.setAttr("{}.{}".format(node, padding_attr),
|
||||
cls.DEFAULT_PADDING)
|
||||
# Repair padding
|
||||
padding_attr = render_attrs["padding"]
|
||||
cmds.setAttr("{}.{}".format(node, padding_attr),
|
||||
cls.DEFAULT_PADDING)
|
||||
else:
|
||||
# renderman handles stuff differently
|
||||
cmds.setAttr("rmanGlobals.imageFileFormat",
|
||||
cls.ImagePrefixTokens[renderer],
|
||||
type="string")
|
||||
cmds.setAttr("rmanGlobals.imageOutputDir",
|
||||
cls.RendermanDirPrefix,
|
||||
type="string")
|
||||
|
|
|
|||
|
|
@ -55,8 +55,6 @@ class CollectClipHandles(api.ContextPlugin):
|
|||
# debug printing
|
||||
self.log.debug("_ s_asset_data: `{}`".format(
|
||||
s_asset_data))
|
||||
self.log.debug("_ instance.data[handles]: `{}`".format(
|
||||
instance.data["handles"]))
|
||||
self.log.debug("_ instance.data[handleStart]: `{}`".format(
|
||||
instance.data["handleStart"]))
|
||||
self.log.debug("_ instance.data[handleEnd]: `{}`".format(
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import tempfile
|
|||
import pyblish.api
|
||||
import clique
|
||||
import pype.api
|
||||
import pype.lib
|
||||
|
||||
|
||||
class ExtractReviewSP(pyblish.api.InstancePlugin):
|
||||
|
|
@ -148,12 +149,7 @@ class ExtractReviewSP(pyblish.api.InstancePlugin):
|
|||
# output filename
|
||||
output_args.append(full_output_path)
|
||||
|
||||
ffmpeg_path = os.getenv("FFMPEG_PATH", "")
|
||||
if ffmpeg_path:
|
||||
ffmpeg_path += "/ffmpeg"
|
||||
else:
|
||||
ffmpeg_path = "ffmpeg"
|
||||
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
mov_args = [
|
||||
ffmpeg_path,
|
||||
" ".join(input_args),
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ import tempfile
|
|||
import subprocess
|
||||
import pyblish.api
|
||||
import pype.api
|
||||
import pype.lib
|
||||
|
||||
|
||||
class ExtractThumbnailSP(pyblish.api.InstancePlugin):
|
||||
|
|
@ -73,11 +74,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin):
|
|||
config_data.get("__default__", {})
|
||||
)
|
||||
|
||||
ffmpeg_path = os.getenv("FFMPEG_PATH", "")
|
||||
if ffmpeg_path:
|
||||
ffmpeg_path += "/ffmpeg"
|
||||
else:
|
||||
ffmpeg_path = "ffmpeg"
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
|
||||
jpeg_items = []
|
||||
jpeg_items.append(ffmpeg_path)
|
||||
|
|
|
|||
|
|
@ -1,33 +1,27 @@
|
|||
import os
|
||||
import sys
|
||||
import re
|
||||
import datetime
|
||||
import subprocess
|
||||
import json
|
||||
import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins
|
||||
from pypeapp.lib import config
|
||||
from pype import api as pype
|
||||
from subprocess import Popen, PIPE
|
||||
# FFmpeg in PATH is required
|
||||
from pypeapp import Logger
|
||||
import pype.lib
|
||||
|
||||
log = Logger().get_logger("BurninWrapper", "burninwrap")
|
||||
|
||||
|
||||
log = pype.Logger().get_logger("BurninWrapper", "burninwrap")
|
||||
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
|
||||
ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
|
||||
|
||||
|
||||
ffmpeg_path = os.environ.get("FFMPEG_PATH")
|
||||
if ffmpeg_path and os.path.exists(ffmpeg_path):
|
||||
# add separator "/" or "\" to be prepared for next part
|
||||
ffmpeg_path += os.path.sep
|
||||
else:
|
||||
ffmpeg_path = ""
|
||||
|
||||
FFMPEG = (
|
||||
'{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s'
|
||||
).format(os.path.normpath(ffmpeg_path + "ffmpeg"))
|
||||
).format(ffmpeg_path)
|
||||
|
||||
FFPROBE = (
|
||||
'{} -v quiet -print_format json -show_format -show_streams %(source)s'
|
||||
).format(os.path.normpath(ffmpeg_path + "ffprobe"))
|
||||
).format(ffprobe_path)
|
||||
|
||||
DRAWTEXT = (
|
||||
"drawtext=text=\\'%(text)s\\':x=%(x)s:y=%(y)s:fontcolor="
|
||||
|
|
@ -41,6 +35,7 @@ TIMECODE = (
|
|||
|
||||
MISSING_KEY_VALUE = "N/A"
|
||||
CURRENT_FRAME_KEY = "{current_frame}"
|
||||
CURRENT_FRAME_SPLITTER = "_-_CURRENT_FRAME_-_"
|
||||
TIME_CODE_KEY = "{timecode}"
|
||||
|
||||
|
||||
|
|
@ -136,7 +131,9 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
if options_init:
|
||||
self.options_init.update(options_init)
|
||||
|
||||
def add_text(self, text, align, frame_start=None, options=None):
|
||||
def add_text(
|
||||
self, text, align, frame_start=None, frame_end=None, options=None
|
||||
):
|
||||
"""
|
||||
Adding static text to a filter.
|
||||
|
||||
|
|
@ -152,11 +149,15 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
if frame_start:
|
||||
options["frame_offset"] = frame_start
|
||||
|
||||
# `frame_end` is only for meassurements of text position
|
||||
if frame_end:
|
||||
options["frame_end"] = frame_end
|
||||
|
||||
self._add_burnin(text, align, options, DRAWTEXT)
|
||||
|
||||
def add_timecode(
|
||||
self, align, frame_start=None, frame_start_tc=None, text=None,
|
||||
options=None
|
||||
self, align, frame_start=None, frame_end=None, frame_start_tc=None,
|
||||
text=None, options=None
|
||||
):
|
||||
"""
|
||||
Convenience method to create the frame number expression.
|
||||
|
|
@ -174,6 +175,10 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
if frame_start:
|
||||
options["frame_offset"] = frame_start
|
||||
|
||||
# `frame_end` is only for meassurements of text position
|
||||
if frame_end:
|
||||
options["frame_end"] = frame_end
|
||||
|
||||
if not frame_start_tc:
|
||||
frame_start_tc = options["frame_offset"]
|
||||
|
||||
|
|
@ -197,10 +202,31 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
:param enum align: alignment, must use provided enum flags
|
||||
:param dict options:
|
||||
"""
|
||||
|
||||
final_text = text
|
||||
text_for_size = text
|
||||
if CURRENT_FRAME_SPLITTER in text:
|
||||
frame_start = options["frame_offset"]
|
||||
frame_end = options.get("frame_end", frame_start)
|
||||
if not frame_start:
|
||||
replacement_final = replacement_size = str(MISSING_KEY_VALUE)
|
||||
else:
|
||||
replacement_final = "\\'{}\\'".format(
|
||||
r'%%{eif\:n+%d\:d}' % frame_start
|
||||
)
|
||||
replacement_size = str(frame_end)
|
||||
|
||||
final_text = final_text.replace(
|
||||
CURRENT_FRAME_SPLITTER, replacement_final
|
||||
)
|
||||
text_for_size = text_for_size.replace(
|
||||
CURRENT_FRAME_SPLITTER, replacement_size
|
||||
)
|
||||
|
||||
resolution = self.resolution
|
||||
data = {
|
||||
'text': (
|
||||
text
|
||||
final_text
|
||||
.replace(",", r"\,")
|
||||
.replace(':', r'\:')
|
||||
),
|
||||
|
|
@ -208,7 +234,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
'size': options['font_size']
|
||||
}
|
||||
timecode_text = options.get("timecode") or ""
|
||||
text_for_size = text + timecode_text
|
||||
text_for_size += timecode_text
|
||||
data.update(options)
|
||||
data.update(
|
||||
ffmpeg_burnins._drawtext(align, resolution, text_for_size, options)
|
||||
|
|
@ -272,7 +298,7 @@ class ModifiedBurnins(ffmpeg_burnins.Burnins):
|
|||
)
|
||||
print(command)
|
||||
|
||||
proc = Popen(command, shell=True)
|
||||
proc = subprocess.Popen(command, shell=True)
|
||||
proc.communicate()
|
||||
if proc.returncode != 0:
|
||||
raise RuntimeError("Failed to render '%s': %s'"
|
||||
|
|
@ -368,6 +394,7 @@ def burnins_from_data(
|
|||
burnin = ModifiedBurnins(input_path, options_init=options_init)
|
||||
|
||||
frame_start = data.get("frame_start")
|
||||
frame_end = data.get("frame_end")
|
||||
frame_start_tc = data.get('frame_start_tc', frame_start)
|
||||
|
||||
stream = burnin._streams[0]
|
||||
|
|
@ -382,7 +409,7 @@ def burnins_from_data(
|
|||
|
||||
# Check frame start and add expression if is available
|
||||
if frame_start is not None:
|
||||
data[CURRENT_FRAME_KEY[1:-1]] = r'%%{eif\:n+%d\:d}' % frame_start
|
||||
data[CURRENT_FRAME_KEY[1:-1]] = CURRENT_FRAME_SPLITTER
|
||||
|
||||
if frame_start_tc is not None:
|
||||
data[TIME_CODE_KEY[1:-1]] = TIME_CODE_KEY
|
||||
|
|
@ -391,6 +418,13 @@ def burnins_from_data(
|
|||
if not value:
|
||||
continue
|
||||
|
||||
if isinstance(value, (dict, list, tuple)):
|
||||
raise TypeError((
|
||||
"Expected string or number type."
|
||||
" Got: {} - \"{}\""
|
||||
" (Make sure you have new burnin presets)."
|
||||
).format(str(type(value)), str(value)))
|
||||
|
||||
has_timecode = TIME_CODE_KEY in value
|
||||
|
||||
align = None
|
||||
|
|
@ -432,7 +466,7 @@ def burnins_from_data(
|
|||
|
||||
# Handle timecode differently
|
||||
if has_timecode:
|
||||
args = [align, frame_start, frame_start_tc]
|
||||
args = [align, frame_start, frame_end, frame_start_tc]
|
||||
if not value.startswith(TIME_CODE_KEY):
|
||||
value_items = value.split(TIME_CODE_KEY)
|
||||
text = value_items[0].format(**data)
|
||||
|
|
@ -442,7 +476,7 @@ def burnins_from_data(
|
|||
continue
|
||||
|
||||
text = value.format(**data)
|
||||
burnin.add_text(text, align, frame_start)
|
||||
burnin.add_text(text, align, frame_start, frame_end)
|
||||
|
||||
codec_args = ""
|
||||
if codec_data:
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ import argparse
|
|||
import logging
|
||||
import subprocess
|
||||
import platform
|
||||
import json
|
||||
|
||||
try:
|
||||
from shutil import which
|
||||
|
|
@ -24,6 +25,18 @@ log.setLevel(logging.DEBUG)
|
|||
|
||||
error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}"
|
||||
|
||||
def _load_json(path):
|
||||
assert os.path.isfile(path), ("path to json file doesn't exist")
|
||||
data = None
|
||||
with open(path, "r") as json_file:
|
||||
try:
|
||||
data = json.load(json_file)
|
||||
except Exception as exc:
|
||||
log.error(
|
||||
"Error loading json: "
|
||||
"{} - Exception: {}".format(path, exc)
|
||||
)
|
||||
return data
|
||||
|
||||
def __main__():
|
||||
parser = argparse.ArgumentParser()
|
||||
|
|
@ -77,6 +90,12 @@ def __main__():
|
|||
|
||||
paths = kwargs.paths or [os.environ.get("PYPE_METADATA_FILE")] or [os.getcwd()] # noqa
|
||||
|
||||
for path in paths:
|
||||
data = _load_json(path)
|
||||
log.info("Setting session using data from file")
|
||||
os.environ["AVALON_PROJECT"] = data["session"]["AVALON_PROJECT"]
|
||||
break
|
||||
|
||||
args = [
|
||||
os.path.join(pype_root, pype_command),
|
||||
"publish",
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
PUBLISH_PATHS = []
|
||||
|
||||
from .standalonepublish_module import StandAlonePublishModule
|
||||
from .app import (
|
||||
show,
|
||||
|
|
|
|||
|
|
@ -5,14 +5,14 @@ import tempfile
|
|||
import random
|
||||
import string
|
||||
|
||||
from avalon import io
|
||||
from avalon import api as avalon
|
||||
from avalon import io, api
|
||||
from avalon.tools import publish as av_publish
|
||||
|
||||
import pype
|
||||
from pypeapp import execute
|
||||
|
||||
import pyblish.api
|
||||
from . import PUBLISH_PATHS
|
||||
|
||||
|
||||
def set_context(project, asset, task, app):
|
||||
|
|
@ -31,7 +31,6 @@ def set_context(project, asset, task, app):
|
|||
os.environ["AVALON_TASK"] = task
|
||||
io.Session["AVALON_TASK"] = task
|
||||
|
||||
|
||||
io.install()
|
||||
|
||||
av_project = io.find_one({'type': 'project'})
|
||||
|
|
@ -76,7 +75,7 @@ def avalon_api_publish(data, gui=True):
|
|||
io.install()
|
||||
|
||||
# Create hash name folder in temp
|
||||
chars = "".join( [random.choice(string.ascii_letters) for i in range(15)] )
|
||||
chars = "".join([random.choice(string.ascii_letters) for i in range(15)])
|
||||
staging_dir = tempfile.mkdtemp(chars)
|
||||
|
||||
# create also json and fill with data
|
||||
|
|
@ -105,8 +104,27 @@ def avalon_api_publish(data, gui=True):
|
|||
def cli_publish(data, gui=True):
|
||||
io.install()
|
||||
|
||||
pyblish.api.deregister_all_plugins()
|
||||
# Registers Global pyblish plugins
|
||||
pype.install()
|
||||
# Registers Standalone pyblish plugins
|
||||
for path in PUBLISH_PATHS:
|
||||
pyblish.api.register_plugin_path(path)
|
||||
|
||||
project_plugins_paths = os.environ.get("PYPE_PROJECT_PLUGINS")
|
||||
project_name = os.environ["AVALON_PROJECT"]
|
||||
if project_plugins_paths and project_name:
|
||||
for path in project_plugins_paths.split(os.pathsep):
|
||||
if not path:
|
||||
continue
|
||||
plugin_path = os.path.join(path, project_name, "plugins")
|
||||
if os.path.exists(plugin_path):
|
||||
pyblish.api.register_plugin_path(plugin_path)
|
||||
api.register_plugin_path(api.Loader, plugin_path)
|
||||
api.register_plugin_path(api.Creator, plugin_path)
|
||||
|
||||
# Create hash name folder in temp
|
||||
chars = "".join( [random.choice(string.ascii_letters) for i in range(15)] )
|
||||
chars = "".join([random.choice(string.ascii_letters) for i in range(15)])
|
||||
staging_dir = tempfile.mkdtemp(chars)
|
||||
|
||||
# create json for return data
|
||||
|
|
|
|||
|
|
@ -2,16 +2,16 @@ import os
|
|||
from .app import show
|
||||
from .widgets import QtWidgets
|
||||
import pype
|
||||
import pyblish.api
|
||||
from . import PUBLISH_PATHS
|
||||
|
||||
|
||||
class StandAlonePublishModule:
|
||||
PUBLISH_PATHS = []
|
||||
|
||||
def __init__(self, main_parent=None, parent=None):
|
||||
self.main_parent = main_parent
|
||||
self.parent_widget = parent
|
||||
self.PUBLISH_PATHS.append(os.path.sep.join(
|
||||
PUBLISH_PATHS.clear()
|
||||
PUBLISH_PATHS.append(os.path.sep.join(
|
||||
[pype.PLUGINS_DIR, "standalonepublisher", "publish"]
|
||||
))
|
||||
|
||||
|
|
@ -24,16 +24,9 @@ class StandAlonePublishModule:
|
|||
|
||||
def process_modules(self, modules):
|
||||
if "FtrackModule" in modules:
|
||||
self.PUBLISH_PATHS.append(os.path.sep.join(
|
||||
PUBLISH_PATHS.append(os.path.sep.join(
|
||||
[pype.PLUGINS_DIR, "ftrack", "publish"]
|
||||
))
|
||||
|
||||
def tray_start(self):
|
||||
# Registers Global pyblish plugins
|
||||
pype.install()
|
||||
# Registers Standalone pyblish plugins
|
||||
for path in self.PUBLISH_PATHS:
|
||||
pyblish.api.register_plugin_path(path)
|
||||
|
||||
def show(self):
|
||||
show(self.main_parent, False)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import json
|
|||
import clique
|
||||
import subprocess
|
||||
from pypeapp import config
|
||||
import pype.lib
|
||||
from . import QtWidgets, QtCore
|
||||
from . import DropEmpty, ComponentsList, ComponentItem
|
||||
|
||||
|
|
@ -224,12 +225,7 @@ class DropDataFrame(QtWidgets.QFrame):
|
|||
self._process_data(data)
|
||||
|
||||
def load_data_with_probe(self, filepath):
|
||||
ffprobe_path = os.getenv("FFMPEG_PATH", "")
|
||||
if ffprobe_path:
|
||||
ffprobe_path += '/ffprobe'
|
||||
else:
|
||||
ffprobe_path = 'ffprobe'
|
||||
|
||||
ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
|
||||
args = [
|
||||
ffprobe_path,
|
||||
'-v', 'quiet',
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ log = logging.getLogger(__name__)
|
|||
|
||||
class Window(QtWidgets.QWidget):
|
||||
def __init__(self, parent, title, message, level):
|
||||
super().__init__()
|
||||
super(Window, self).__init__()
|
||||
self.parent = parent
|
||||
self.title = title
|
||||
self.message = message
|
||||
|
|
@ -48,9 +48,10 @@ class Window(QtWidgets.QWidget):
|
|||
return
|
||||
|
||||
|
||||
def message(title=None, message=None, level="info"):
|
||||
global app
|
||||
app = QtWidgets.QApplication(sys.argv)
|
||||
def message(title=None, message=None, level="info", parent=None):
|
||||
app = parent
|
||||
if not app:
|
||||
app = QtWidgets.QApplication(sys.argv)
|
||||
ex = Window(app, title, message, level)
|
||||
ex.show()
|
||||
# sys.exit(app.exec_())
|
||||
|
|
|
|||
|
|
@ -14,12 +14,15 @@ shelf_preset = presets['maya'].get('project_shelf')
|
|||
if shelf_preset:
|
||||
project = os.environ["AVALON_PROJECT"]
|
||||
|
||||
icon_path = os.path.join(os.environ['PYPE_PROJECT_SCRIPTS'], project,"icons")
|
||||
icon_path = os.path.abspath(icon_path)
|
||||
|
||||
for i in shelf_preset['imports']:
|
||||
import_string = "from {} import {}".format(project, i)
|
||||
print(import_string)
|
||||
exec(import_string)
|
||||
|
||||
cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], preset=shelf_preset)")
|
||||
cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)")
|
||||
|
||||
|
||||
print("finished PYPE usersetup")
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue