diff --git a/pype/__init__.py b/pype/__init__.py index 89c653bf6f..5cd9832558 100644 --- a/pype/__init__.py +++ b/pype/__init__.py @@ -9,8 +9,9 @@ from pypeapp import config import logging log = logging.getLogger(__name__) -__version__ = "2.5.0" +__version__ = "2.6.0" +PROJECT_PLUGINS_PATH = os.environ.get("PYPE_PROJECT_PLUGINS") PACKAGE_DIR = os.path.dirname(__file__) PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins") @@ -72,6 +73,18 @@ def install(): pyblish.register_discovery_filter(filter_pyblish_plugins) avalon.register_plugin_path(avalon.Loader, LOAD_PATH) + # Register project specific plugins + project_name = os.environ.get("AVALON_PROJECT") + if PROJECT_PLUGINS_PATH and project_name: + for path in PROJECT_PLUGINS_PATH.split(os.pathsep): + if not path: + continue + plugin_path = os.path.join(path, project_name, "plugins") + if os.path.exists(plugin_path): + pyblish.register_plugin_path(plugin_path) + avalon.register_plugin_path(avalon.Loader, plugin_path) + avalon.register_plugin_path(avalon.Creator, plugin_path) + # apply monkey patched discover to original one avalon.discover = patched_discover diff --git a/pype/ftrack/actions/action_create_project_structure.py b/pype/ftrack/actions/action_create_project_structure.py index 4589802f3a..6124ebe843 100644 --- a/pype/ftrack/actions/action_create_project_structure.py +++ b/pype/ftrack/actions/action_create_project_structure.py @@ -19,7 +19,7 @@ class CreateProjectFolders(BaseAction): #: Action description. description = 'Creates folder structure' #: roles that are allowed to register this action - role_list = ['Pypeclub', 'Administrator'] + role_list = ['Pypeclub', 'Administrator', 'Project Manager'] icon = '{}/ftrack/action_icons/CreateProjectFolders.svg'.format( os.environ.get('PYPE_STATICS_SERVER', '') ) diff --git a/pype/ftrack/actions/action_delete_old_versions.py b/pype/ftrack/actions/action_delete_old_versions.py index bec21dae96..f6a66318c9 100644 --- a/pype/ftrack/actions/action_delete_old_versions.py +++ b/pype/ftrack/actions/action_delete_old_versions.py @@ -167,8 +167,11 @@ class DeleteOldVersions(BaseAction): asset_versions_by_parent_id = collections.defaultdict(list) subset_names_by_asset_name = collections.defaultdict(list) + ftrack_assets_by_name = {} for entity in entities: - parent_ent = entity["asset"]["parent"] + ftrack_asset = entity["asset"] + + parent_ent = ftrack_asset["parent"] parent_ftrack_id = parent_ent["id"] parent_name = parent_ent["name"] @@ -183,9 +186,12 @@ class DeleteOldVersions(BaseAction): project = parent_ent["project"] # Collect subset names per asset - subset_name = entity["asset"]["name"] + subset_name = ftrack_asset["name"] subset_names_by_asset_name[parent_name].append(subset_name) + if subset_name not in ftrack_assets_by_name: + ftrack_assets_by_name[subset_name] = ftrack_asset + # Set Mongo collection project_name = project["full_name"] self.dbcon.Session["AVALON_PROJECT"] = project_name @@ -236,7 +242,6 @@ class DeleteOldVersions(BaseAction): def sort_func(ent): return int(ent["name"]) - last_versions_by_parent = collections.defaultdict(list) all_last_versions = [] for parent_id, _versions in versions_by_parent.items(): for idx, version in enumerate( @@ -244,7 +249,6 @@ class DeleteOldVersions(BaseAction): ): if idx >= versions_count: break - last_versions_by_parent[parent_id].append(version) all_last_versions.append(version) self.log.debug("Collected versions ({})".format(len(versions))) @@ -253,6 +257,11 @@ class DeleteOldVersions(BaseAction): for version in all_last_versions: versions.remove(version) + # Update versions_by_parent without filtered versions + versions_by_parent = collections.defaultdict(list) + for ent in versions: + versions_by_parent[ent["parent"]].append(ent) + # Filter already deleted versions versions_to_pop = [] for version in versions: @@ -361,6 +370,47 @@ class DeleteOldVersions(BaseAction): self.dbcon.uninstall() + # Set attribute `is_published` to `False` on ftrack AssetVersions + for subset_id, _versions in versions_by_parent.items(): + subset_name = None + for subset in subsets: + if subset["_id"] == subset_id: + subset_name = subset["name"] + break + + if subset_name is None: + self.log.warning( + "Subset with ID `{}` was not found.".format(str(subset_id)) + ) + continue + + ftrack_asset = ftrack_assets_by_name.get(subset_name) + if not ftrack_asset: + self.log.warning(( + "Could not find Ftrack asset with name `{}`" + ).format(subset_name)) + continue + + version_numbers = [int(ver["name"]) for ver in _versions] + for version in ftrack_asset["versions"]: + if int(version["version"]) in version_numbers: + version["is_published"] = False + + try: + session.commit() + + except Exception: + msg = ( + "Could not set `is_published` attribute to `False`" + " for selected AssetVersions." + ) + self.log.warning(msg, exc_info=True) + + return { + "success": False, + "message": msg + } + return True def delete_whole_dir_paths(self, dir_paths): diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py index 53de588bcc..faf7539540 100644 --- a/pype/ftrack/events/event_sync_to_avalon.py +++ b/pype/ftrack/events/event_sync_to_avalon.py @@ -26,11 +26,7 @@ class SyncToAvalonEvent(BaseEvent): dbcon = DbConnector() - ignore_entTypes = [ - "socialfeed", "socialnotification", "note", - "assetversion", "job", "user", "reviewsessionobject", "timer", - "timelog", "auth_userrole", "appointment", "notelabellink" - ] + interest_entTypes = ["show", "task"] ignore_ent_types = ["Milestone"] ignore_keys = ["statusid", "thumbid"] @@ -137,9 +133,10 @@ class SyncToAvalonEvent(BaseEvent): if self._avalon_ents_by_id is None: self._avalon_ents_by_id = {} proj, ents = self.avalon_entities - self._avalon_ents_by_id[proj["_id"]] = proj - for ent in ents: - self._avalon_ents_by_id[ent["_id"]] = ent + if proj: + self._avalon_ents_by_id[proj["_id"]] = proj + for ent in ents: + self._avalon_ents_by_id[ent["_id"]] = ent return self._avalon_ents_by_id @property @@ -159,13 +156,14 @@ class SyncToAvalonEvent(BaseEvent): if self._avalon_ents_by_ftrack_id is None: self._avalon_ents_by_ftrack_id = {} proj, ents = self.avalon_entities - ftrack_id = proj["data"]["ftrackId"] - self._avalon_ents_by_ftrack_id[ftrack_id] = proj - for ent in ents: - ftrack_id = ent["data"].get("ftrackId") - if ftrack_id is None: - continue - self._avalon_ents_by_ftrack_id[ftrack_id] = ent + if proj: + ftrack_id = proj["data"]["ftrackId"] + self._avalon_ents_by_ftrack_id[ftrack_id] = proj + for ent in ents: + ftrack_id = ent["data"].get("ftrackId") + if ftrack_id is None: + continue + self._avalon_ents_by_ftrack_id[ftrack_id] = ent return self._avalon_ents_by_ftrack_id @property @@ -508,7 +506,7 @@ class SyncToAvalonEvent(BaseEvent): found_actions = set() for ent_info in entities_info: entityType = ent_info["entityType"] - if entityType in self.ignore_entTypes: + if entityType not in self.interest_entTypes: continue entity_type = ent_info.get("entity_type") diff --git a/pype/ftrack/ftrack_server/sub_event_status.py b/pype/ftrack/ftrack_server/sub_event_status.py index 1a15a1f28d..d3e6a3d647 100644 --- a/pype/ftrack/ftrack_server/sub_event_status.py +++ b/pype/ftrack/ftrack_server/sub_event_status.py @@ -369,13 +369,6 @@ def main(args): # store socket connection object ObjectFactory.sock = sock - statuse_names = { - "main": "Main process", - "storer": "Event Storer", - "processor": "Event Processor" - } - - ObjectFactory.status_factory = StatusFactory(statuse_names) ObjectFactory.status_factory["main"].update(server_info) _returncode = 0 try: @@ -429,6 +422,13 @@ if __name__ == "__main__": signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) + statuse_names = { + "main": "Main process", + "storer": "Event Storer", + "processor": "Event Processor" + } + ObjectFactory.status_factory = StatusFactory(statuse_names) + checker_thread = OutputChecker() ObjectFactory.checker_thread = checker_thread checker_thread.start() diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py index f5b4c4b8c3..6f928914bf 100644 --- a/pype/ftrack/lib/avalon_sync.py +++ b/pype/ftrack/lib/avalon_sync.py @@ -2067,9 +2067,10 @@ class SyncEntitiesFactory: # different hierarchy - can't recreate entity continue - _vis_parent = str(deleted_entity["data"]["visualParent"]) + _vis_parent = deleted_entity["data"]["visualParent"] if _vis_parent is None: _vis_parent = self.avalon_project_id + _vis_parent = str(_vis_parent) ftrack_parent_id = self.avalon_ftrack_mapper[_vis_parent] self.create_ftrack_ent_from_avalon_ent( deleted_entity, ftrack_parent_id diff --git a/pype/ftrack/lib/ftrack_app_handler.py b/pype/ftrack/lib/ftrack_app_handler.py index 9dc735987d..2b46dd43d8 100644 --- a/pype/ftrack/lib/ftrack_app_handler.py +++ b/pype/ftrack/lib/ftrack_app_handler.py @@ -193,6 +193,8 @@ class AppAction(BaseHandler): if parents: hierarchy = os.path.join(*parents) + os.environ["AVALON_HIERARCHY"] = hierarchy + application = avalonlib.get_application(os.environ["AVALON_APP_NAME"]) data = { diff --git a/pype/lib.py b/pype/lib.py index 796fe4f11f..4edc65f107 100644 --- a/pype/lib.py +++ b/pype/lib.py @@ -13,6 +13,62 @@ import avalon log = logging.getLogger(__name__) +def get_paths_from_environ(env_key, return_first=False): + """Return existing paths from specific envirnment variable. + + :param env_key: Environment key where should look for paths. + :type env_key: str + :param return_first: Return first path on `True`, list of all on `False`. + :type return_first: boolean + + Difference when none of paths exists: + - when `return_first` is set to `False` then function returns empty list. + - when `return_first` is set to `True` then function returns `None`. + """ + + existing_paths = [] + paths = os.environ.get(env_key) or "" + path_items = paths.split(os.pathsep) + for path in path_items: + # Skip empty string + if not path: + continue + # Normalize path + path = os.path.normpath(path) + # Check if path exists + if os.path.exists(path): + # Return path if `return_first` is set to True + if return_first: + return path + # Store path + existing_paths.append(path) + + # Return None if none of paths exists + if return_first: + return None + # Return all existing paths from environment variable + return existing_paths + + +def get_ffmpeg_tool_path(tool="ffmpeg"): + """Find path to ffmpeg tool in FFMPEG_PATH paths. + + Function looks for tool in paths set in FFMPEG_PATH environment. If tool + exists then returns it's full path. + + Returns tool name itself when tool path was not found. (FFmpeg path may be + set in PATH environment variable) + """ + + dir_paths = get_paths_from_environ("FFMPEG_PATH") + for dir_path in dir_paths: + for file_name in os.listdir(dir_path): + base, ext = os.path.splitext(file_name) + if base.lower() == tool.lower(): + return os.path.join(dir_path, tool) + return tool + + # Special naming case for subprocess since its a built-in method. def _subprocess(*args, **kwargs): """Convenience method for getting output errors for subprocess.""" diff --git a/pype/maya/__init__.py b/pype/maya/__init__.py index f027893a0e..fdc061f069 100644 --- a/pype/maya/__init__.py +++ b/pype/maya/__init__.py @@ -8,7 +8,6 @@ from avalon import api as avalon, pipeline, maya from avalon.maya.pipeline import IS_HEADLESS from avalon.tools import workfiles from pyblish import api as pyblish -from pypeapp import config from ..lib import ( any_outdated @@ -156,6 +155,12 @@ def on_open(_): from avalon.vendor.Qt import QtWidgets from ..widgets import popup + cmds.evalDeferred( + "from pype.maya import lib;lib.remove_render_layer_observer()") + cmds.evalDeferred( + "from pype.maya import lib;lib.add_render_layer_observer()") + cmds.evalDeferred( + "from pype.maya import lib;lib.add_render_layer_change_observer()") # # Update current task for the current scene # update_task_from_path(cmds.file(query=True, sceneName=True)) @@ -194,6 +199,12 @@ def on_new(_): """Set project resolution and fps when create a new file""" avalon.logger.info("Running callback on new..") with maya.suspended_refresh(): + cmds.evalDeferred( + "from pype.maya import lib;lib.remove_render_layer_observer()") + cmds.evalDeferred( + "from pype.maya import lib;lib.add_render_layer_observer()") + cmds.evalDeferred( + "from pype.maya import lib;lib.add_render_layer_change_observer()") lib.set_context_settings() @@ -218,3 +229,10 @@ def on_task_changed(*args): # Run maya.pipeline._on_task_changed() + with maya.suspended_refresh(): + lib.set_context_settings() + lib.update_content_on_context_change() + + lib.show_message("Context was changed", + ("Context was changed to {}".format( + avalon.Session["AVALON_ASSET"]))) diff --git a/pype/maya/lib.py b/pype/maya/lib.py index dafc281903..a06810ea94 100644 --- a/pype/maya/lib.py +++ b/pype/maya/lib.py @@ -2399,15 +2399,19 @@ class shelf(): if not item.get('command'): item['command'] = self._null if item['type'] == 'button': - self.addButon(item['name'], command=item['command']) + self.addButon(item['name'], + command=item['command'], + icon=item['icon']) if item['type'] == 'menuItem': self.addMenuItem(item['parent'], item['name'], - command=item['command']) + command=item['command'], + icon=item['icon']) if item['type'] == 'subMenu': self.addMenuItem(item['parent'], item['name'], - command=item['command']) + command=item['command'], + icon=item['icon']) def addButon(self, label, icon="commandButton.png", command=_null, doubleCommand=_null): @@ -2417,7 +2421,8 @@ class shelf(): ''' cmds.setParent(self.name) if icon: - icon = self.iconPath + icon + icon = os.path.join(self.iconPath, icon) + print(icon) cmds.shelfButton(width=37, height=37, image=icon, label=label, command=command, dcc=doubleCommand, imageOverlayLabel=label, olb=self.labelBackground, @@ -2429,7 +2434,8 @@ class shelf(): double click command and image. ''' if icon: - icon = self.iconPath + icon + icon = os.path.join(self.iconPath, icon) + print(icon) return cmds.menuItem(p=parent, label=label, c=command, i="") def addSubMenu(self, parent, label, icon=None): @@ -2438,7 +2444,8 @@ class shelf(): the specified parent popup menu. ''' if icon: - icon = self.iconPath + icon + icon = os.path.join(self.iconPath, icon) + print(icon) return cmds.menuItem(p=parent, label=label, i=icon, subMenu=1) def _cleanOldShelf(self): @@ -2452,3 +2459,177 @@ class shelf(): cmds.deleteUI(each) else: cmds.shelfLayout(self.name, p="ShelfLayout") + + +def _get_render_instance(): + objectset = cmds.ls("*.id", long=True, type="objectSet", + recursive=True, objectsOnly=True) + + for objset in objectset: + + if not cmds.attributeQuery("id", node=objset, exists=True): + continue + + id_attr = "{}.id".format(objset) + if cmds.getAttr(id_attr) != "pyblish.avalon.instance": + continue + + has_family = cmds.attributeQuery("family", + node=objset, + exists=True) + if not has_family: + continue + + if cmds.getAttr("{}.family".format(objset)) == 'rendering': + return objset + + return None + + +renderItemObserverList = [] + + +class RenderSetupListObserver: + + def listItemAdded(self, item): + print("--- adding ...") + self._add_render_layer(item) + + def listItemRemoved(self, item): + print("--- removing ...") + self._remove_render_layer(item.name()) + + def _add_render_layer(self, item): + render_set = _get_render_instance() + layer_name = item.name() + + if not render_set: + return + + members = cmds.sets(render_set, query=True) or [] + if not "LAYER_{}".format(layer_name) in members: + print(" - creating set for {}".format(layer_name)) + set = cmds.sets(n="LAYER_{}".format(layer_name), empty=True) + cmds.sets(set, forceElement=render_set) + rio = RenderSetupItemObserver(item) + print("- adding observer for {}".format(item.name())) + item.addItemObserver(rio.itemChanged) + renderItemObserverList.append(rio) + + def _remove_render_layer(self, layer_name): + render_set = _get_render_instance() + + if not render_set: + return + + members = cmds.sets(render_set, query=True) + if "LAYER_{}".format(layer_name) in members: + print(" - removing set for {}".format(layer_name)) + cmds.delete("LAYER_{}".format(layer_name)) + + +class RenderSetupItemObserver(): + + def __init__(self, item): + self.item = item + self.original_name = item.name() + + def itemChanged(self, *args, **kwargs): + if self.item.name() == self.original_name: + return + + render_set = _get_render_instance() + + if not render_set: + return + + members = cmds.sets(render_set, query=True) + if "LAYER_{}".format(self.original_name) in members: + print(" <> renaming {} to {}".format(self.original_name, + self.item.name())) + cmds.rename("LAYER_{}".format(self.original_name), + "LAYER_{}".format(self.item.name())) + self.original_name = self.item.name() + + +renderListObserver = RenderSetupListObserver() + + +def add_render_layer_change_observer(): + import maya.app.renderSetup.model.renderSetup as renderSetup + + rs = renderSetup.instance() + render_set = _get_render_instance() + if not render_set: + return + + members = cmds.sets(render_set, query=True) + layers = rs.getRenderLayers() + for layer in layers: + if "LAYER_{}".format(layer.name()) in members: + rio = RenderSetupItemObserver(layer) + print("- adding observer for {}".format(layer.name())) + layer.addItemObserver(rio.itemChanged) + renderItemObserverList.append(rio) + + +def add_render_layer_observer(): + import maya.app.renderSetup.model.renderSetup as renderSetup + + print("> adding renderSetup observer ...") + rs = renderSetup.instance() + rs.addListObserver(renderListObserver) + pass + + +def remove_render_layer_observer(): + import maya.app.renderSetup.model.renderSetup as renderSetup + + print("< removing renderSetup observer ...") + rs = renderSetup.instance() + try: + rs.removeListObserver(renderListObserver) + except ValueError: + # no observer set yet + pass + + +def update_content_on_context_change(): + """ + This will update scene content to match new asset on context change + """ + scene_sets = cmds.listSets(allSets=True) + new_asset = api.Session["AVALON_ASSET"] + new_data = lib.get_asset()["data"] + for s in scene_sets: + try: + if cmds.getAttr("{}.id".format(s)) == "pyblish.avalon.instance": + attr = cmds.listAttr(s) + print(s) + if "asset" in attr: + print(" - setting asset to: [ {} ]".format(new_asset)) + cmds.setAttr("{}.asset".format(s), + new_asset, type="string") + if "frameStart" in attr: + cmds.setAttr("{}.frameStart".format(s), + new_data["frameStart"]) + if "frameEnd" in attr: + cmds.setAttr("{}.frameEnd".format(s), + new_data["frameEnd"],) + except ValueError: + pass + + +def show_message(title, msg): + from avalon.vendor.Qt import QtWidgets + from ..widgets import message_window + + # Find maya main window + top_level_widgets = {w.objectName(): w for w in + QtWidgets.QApplication.topLevelWidgets()} + + parent = top_level_widgets.get("MayaWindow", None) + if parent is None: + pass + else: + message_window.message(title=title, message=msg, parent=parent) diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py index 6eb4da951c..dedc42fa1d 100644 --- a/pype/nuke/lib.py +++ b/pype/nuke/lib.py @@ -432,7 +432,7 @@ def add_deadline_tab(node): node.addKnob(nuke.Tab_Knob("Deadline")) knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size") - knob.setValue(1) + knob.setValue(0) node.addKnob(knob) knob = nuke.Int_Knob("deadlinePriority", "Priority") diff --git a/pype/nukestudio/menu.py b/pype/nukestudio/menu.py index a996389524..36ce4df34e 100644 --- a/pype/nukestudio/menu.py +++ b/pype/nukestudio/menu.py @@ -5,13 +5,6 @@ from pypeapp import Logger from avalon.api import Session from hiero.ui import findMenuAction -# this way we secure compatibility between nuke 10 and 11 -try: - from PySide.QtGui import * -except Exception: - from PySide2.QtGui import * - from PySide2.QtWidgets import * - from .tags import add_tags_from_presets from .lib import ( @@ -50,14 +43,8 @@ def install(): """ # here is the best place to add menu - from avalon.tools import ( - creator, - publish, - cbloader, - cbsceneinventory, - contextmanager, - libraryloader - ) + from avalon.tools import publish + from avalon.vendor.Qt import QtGui menu_name = os.environ['AVALON_LABEL'] @@ -67,94 +54,53 @@ def install(): self._change_context_menu = context_label - # Grab Hiero's MenuBar - M = hiero.ui.menuBar() - try: check_made_menu = findMenuAction(menu_name) except Exception: - pass + check_made_menu = None if not check_made_menu: - menu = M.addMenu(menu_name) + # Grab Hiero's MenuBar + menu = hiero.ui.menuBar().addMenu(menu_name) else: menu = check_made_menu.menu() - actions = [ - { - 'parent': context_label, - 'action': QAction('Set Context', None), - 'function': contextmanager.show, - 'icon': QIcon('icons:Position.png') - }, - "separator", - { - 'action': QAction("Work Files...", None), - 'function': set_workfiles, - 'icon': QIcon('icons:Position.png') - }, - { - 'action': QAction('Create Default Tags..', None), - 'function': add_tags_from_presets, - 'icon': QIcon('icons:Position.png') - }, - "separator", - # { - # 'action': QAction('Create...', None), - # 'function': creator.show, - # 'icon': QIcon('icons:ColorAdd.png') - # }, - # { - # 'action': QAction('Load...', None), - # 'function': cbloader.show, - # 'icon': QIcon('icons:CopyRectangle.png') - # }, - { - 'action': QAction('Publish...', None), - 'function': publish.show, - 'icon': QIcon('icons:Output.png') - }, - # { - # 'action': QAction('Manage...', None), - # 'function': cbsceneinventory.show, - # 'icon': QIcon('icons:ModifyMetaData.png') - # }, - { - 'action': QAction('Library...', None), - 'function': libraryloader.show, - 'icon': QIcon('icons:ColorAdd.png') - }, - "separator", - { - 'action': QAction('Reload pipeline...', None), - 'function': reload_config, - 'icon': QIcon('icons:ColorAdd.png') - }] + context_label_action = menu.addAction(context_label) + context_label_action.setEnabled(False) - # Create menu items - for a in actions: - add_to_menu = menu - if isinstance(a, dict): - # create action - for k in a.keys(): - if 'parent' in k: - submenus = [sm for sm in a[k].split('/')] - submenu = None - for sm in submenus: - if submenu: - submenu.addMenu(sm) - else: - submenu = menu.addMenu(sm) - add_to_menu = submenu - if 'action' in k: - action = a[k] - elif 'function' in k: - action.triggered.connect(a[k]) - elif 'icon' in k: - action.setIcon(a[k]) + menu.addSeparator() - # add action to menu - add_to_menu.addAction(action) - hiero.ui.registerAction(action) - elif isinstance(a, str): - add_to_menu.addSeparator() + workfiles_action = menu.addAction("Work Files...") + workfiles_action.setIcon(QtGui.QIcon("icons:Position.png")) + workfiles_action.triggered.connect(set_workfiles) + + default_tags_action = menu.addAction("Create Default Tags...") + default_tags_action.setIcon(QtGui.QIcon("icons:Position.png")) + default_tags_action.triggered.connect(add_tags_from_presets) + + menu.addSeparator() + + publish_action = menu.addAction("Publish...") + publish_action.setIcon(QtGui.QIcon("icons:Output.png")) + publish_action.triggered.connect( + lambda *args: publish.show(hiero.ui.mainWindow()) + ) + + menu.addSeparator() + + reload_action = menu.addAction("Reload pipeline...") + reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png")) + reload_action.triggered.connect(reload_config) + + # Is this required? + hiero.ui.registerAction(context_label_action) + hiero.ui.registerAction(workfiles_action) + hiero.ui.registerAction(default_tags_action) + hiero.ui.registerAction(publish_action) + hiero.ui.registerAction(reload_action) + + self.context_label_action = context_label_action + self.workfile_actions = workfiles_action + self.default_tags_action = default_tags_action + self.publish_action = publish_action + self.reload_action = reload_action diff --git a/pype/nukestudio/workio.py b/pype/nukestudio/workio.py index c7484b826b..1c7c77dab9 100644 --- a/pype/nukestudio/workio.py +++ b/pype/nukestudio/workio.py @@ -73,5 +73,5 @@ def current_file(): return normalised -def work_root(): - return os.path.normpath(api.Session["AVALON_WORKDIR"]).replace("\\", "/") +def work_root(session): + return os.path.normpath(session["AVALON_WORKDIR"]).replace("\\", "/") diff --git a/pype/plugins/global/_publish_unused/collect_deadline_user.py b/pype/plugins/global/_publish_unused/collect_deadline_user.py deleted file mode 100644 index f4d13a0545..0000000000 --- a/pype/plugins/global/_publish_unused/collect_deadline_user.py +++ /dev/null @@ -1,60 +0,0 @@ -import os -import subprocess - -import pyblish.api - -CREATE_NO_WINDOW = 0x08000000 - - -def deadline_command(cmd): - # Find Deadline - path = os.environ.get("DEADLINE_PATH", None) - assert path is not None, "Variable 'DEADLINE_PATH' must be set" - - executable = os.path.join(path, "deadlinecommand") - if os.name == "nt": - executable += ".exe" - assert os.path.exists( - executable), "Deadline executable not found at %s" % executable - assert cmd, "Must have a command" - - query = (executable, cmd) - - process = subprocess.Popen(query, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - creationflags=CREATE_NO_WINDOW) - out, err = process.communicate() - - return out - - -class CollectDeadlineUser(pyblish.api.ContextPlugin): - """Retrieve the local active Deadline user""" - - order = pyblish.api.CollectorOrder + 0.499 - label = "Deadline User" - - hosts = ['maya', 'fusion', 'nuke'] - families = [ - "renderlayer", - "saver.deadline", - "imagesequence" - ] - - - def process(self, context): - """Inject the current working file""" - user = None - try: - user = deadline_command("GetCurrentUserName").strip() - except: - self.log.warning("Deadline command seems not to be working") - - if not user: - self.log.warning("No Deadline user found. " - "Do you have Deadline installed?") - return - - self.log.info("Found Deadline user: {}".format(user)) - context.data['deadlineUser'] = user diff --git a/pype/plugins/global/_publish_unused/collect_json.py b/pype/plugins/global/_publish_unused/collect_json.py deleted file mode 100644 index dc5bfb9c81..0000000000 --- a/pype/plugins/global/_publish_unused/collect_json.py +++ /dev/null @@ -1,127 +0,0 @@ -import os -import json -import re - -import pyblish.api -import clique - - -class CollectJSON(pyblish.api.ContextPlugin): - """ Collecting the json files in current directory. """ - - label = "JSON" - order = pyblish.api.CollectorOrder - hosts = ['maya'] - - def version_get(self, string, prefix): - """ Extract version information from filenames. Code from Foundry"s - nukescripts.version_get() - """ - - regex = r"[/_.]{}\d+".format(prefix) - matches = re.findall(regex, string, re.IGNORECASE) - - if not len(matches): - msg = "No '_{}#' found in '{}'".format(prefix, string) - raise ValueError(msg) - return matches[-1:][0][1], re.search(r"\d+", matches[-1:][0]).group() - - def process(self, context): - current_file = context.data.get("currentFile", '') - # Skip if current file is not a directory - if not os.path.isdir(current_file): - return - - # Traverse directory and collect collections from json files. - instances = [] - for root, dirs, files in os.walk(current_file): - for f in files: - if f.endswith(".json"): - with open(os.path.join(root, f)) as json_data: - for data in json.load(json_data): - instances.append(data) - - # Validate instance based on supported families. - valid_families = ["img", "cache", "scene", "mov"] - valid_data = [] - for data in instances: - families = data.get("families", []) + [data["family"]] - family_type = list(set(families) & set(valid_families)) - if family_type: - valid_data.append(data) - - # Create existing output instance. - scanned_dirs = [] - files = [] - collections = [] - for data in valid_data: - if "collection" not in data.keys(): - continue - if data["collection"] is None: - continue - - instance_collection = clique.parse(data["collection"]) - - try: - version = self.version_get( - os.path.basename(instance_collection.format()), "v" - )[1] - except KeyError: - # Ignore any output that is not versioned - continue - - # Getting collections of all previous versions and current version - for count in range(1, int(version) + 1): - - # Generate collection - version_string = "v" + str(count).zfill(len(version)) - head = instance_collection.head.replace( - "v" + version, version_string - ) - collection = clique.Collection( - head=head.replace("\\", "/"), - padding=instance_collection.padding, - tail=instance_collection.tail - ) - collection.version = count - - # Scan collection directory - scan_dir = os.path.dirname(collection.head) - if scan_dir not in scanned_dirs and os.path.exists(scan_dir): - for f in os.listdir(scan_dir): - file_path = os.path.join(scan_dir, f) - files.append(file_path.replace("\\", "/")) - scanned_dirs.append(scan_dir) - - # Match files to collection and add - for f in files: - if collection.match(f): - collection.add(f) - - # Skip if no files were found in the collection - if not list(collection): - continue - - # Skip existing collections - if collection in collections: - continue - - instance = context.create_instance(name=data["name"]) - version = self.version_get( - os.path.basename(collection.format()), "v" - )[1] - - basename = os.path.basename(collection.format()) - instance.data["label"] = "{0} - {1}".format( - data["name"], basename - ) - - families = data["families"] + [data["family"]] - family = list(set(valid_families) & set(families))[0] - instance.data["family"] = family - instance.data["families"] = ["output"] - instance.data["collection"] = collection - instance.data["version"] = int(version) - instance.data["publish"] = False - - collections.append(collection) diff --git a/pype/plugins/global/_publish_unused/collect_textures.py b/pype/plugins/global/_publish_unused/collect_textures.py deleted file mode 100644 index c38e911033..0000000000 --- a/pype/plugins/global/_publish_unused/collect_textures.py +++ /dev/null @@ -1,88 +0,0 @@ -import os -import re -import copy -from avalon import io -from pprint import pprint - -import pyblish.api -from avalon import api - - -texture_extensions = ['.tif', '.tiff', '.jpg', '.jpeg', '.tx', '.png', '.tga', - '.psd', '.dpx', '.hdr', '.hdri', '.exr', '.sxr', '.psb'] - - -class CollectTextures(pyblish.api.ContextPlugin): - """ - Gather all texture files in working directory, traversing whole structure. - """ - - order = pyblish.api.CollectorOrder - targets = ["texture"] - label = "Textures" - hosts = ["shell"] - - def process(self, context): - - if os.environ.get("PYPE_PUBLISH_PATHS"): - paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep) - else: - cwd = context.get("workspaceDir", os.getcwd()) - paths = [cwd] - - textures = [] - for path in paths: - for dir, subdir, files in os.walk(path): - textures.extend( - os.path.join(dir, x) for x in files - if os.path.splitext(x)[1].lower() in texture_extensions) - - self.log.info("Got {} texture files.".format(len(textures))) - if len(textures) < 1: - raise RuntimeError("no textures found.") - - asset_name = os.environ.get("AVALON_ASSET") - family = 'texture' - subset = 'Main' - - project = io.find_one({'type': 'project'}) - asset = io.find_one({ - 'type': 'asset', - 'name': asset_name - }) - - context.data['project'] = project - context.data['asset'] = asset - - for tex in textures: - self.log.info("Processing: {}".format(tex)) - name, ext = os.path.splitext(tex) - simple_name = os.path.splitext(os.path.basename(tex))[0] - instance = context.create_instance(simple_name) - - instance.data.update({ - "subset": subset, - "asset": asset_name, - "label": simple_name, - "name": simple_name, - "family": family, - "families": [family, 'ftrack'], - }) - instance.data['destination_list'] = list() - instance.data['representations'] = list() - instance.data['source'] = 'pype command' - - texture_data = {} - texture_data['anatomy_template'] = 'texture' - texture_data["ext"] = ext - texture_data["label"] = simple_name - texture_data["name"] = "texture" - texture_data["stagingDir"] = os.path.dirname(tex) - texture_data["files"] = os.path.basename(tex) - texture_data["thumbnail"] = False - texture_data["preview"] = False - - instance.data["representations"].append(texture_data) - self.log.info("collected instance: {}".format(instance.data)) - - self.log.info("All collected.") diff --git a/pype/plugins/global/_publish_unused/extract_json.py b/pype/plugins/global/_publish_unused/extract_json.py deleted file mode 100644 index 8aff324574..0000000000 --- a/pype/plugins/global/_publish_unused/extract_json.py +++ /dev/null @@ -1,51 +0,0 @@ -import os -import json -import datetime -import time - -import pyblish.api -import clique - - -class ExtractJSON(pyblish.api.ContextPlugin): - """ Extract all instances to a serialized json file. """ - - order = pyblish.api.IntegratorOrder - label = "JSON" - hosts = ['maya'] - - def process(self, context): - - workspace = os.path.join( - os.path.dirname(context.data["currentFile"]), "workspace", - "instances") - - if not os.path.exists(workspace): - os.makedirs(workspace) - - output_data = [] - for instance in context: - self.log.debug(instance['data']) - - data = {} - for key, value in instance.data.iteritems(): - if isinstance(value, clique.Collection): - value = value.format() - - try: - json.dumps(value) - data[key] = value - except KeyError: - msg = "\"{0}\"".format(value) - msg += " in instance.data[\"{0}\"]".format(key) - msg += " could not be serialized." - self.log.debug(msg) - - output_data.append(data) - - timestamp = datetime.datetime.fromtimestamp( - time.time()).strftime("%Y%m%d-%H%M%S") - filename = timestamp + "_instances.json" - - with open(os.path.join(workspace, filename), "w") as outfile: - outfile.write(json.dumps(output_data, indent=4, sort_keys=True)) diff --git a/pype/plugins/global/_publish_unused/extract_quicktime.py b/pype/plugins/global/_publish_unused/extract_quicktime.py deleted file mode 100644 index 76a920b798..0000000000 --- a/pype/plugins/global/_publish_unused/extract_quicktime.py +++ /dev/null @@ -1,86 +0,0 @@ -import os -import pyblish.api -import subprocess -import clique - - -class ExtractQuicktimeEXR(pyblish.api.InstancePlugin): - """Resolve any dependency issies - - This plug-in resolves any paths which, if not updated might break - the published file. - - The order of families is important, when working with lookdev you want to - first publish the texture, update the texture paths in the nodes and then - publish the shading network. Same goes for file dependent assets. - """ - - label = "Extract Quicktime" - order = pyblish.api.ExtractorOrder - families = ["imagesequence", "render", "write", "source"] - hosts = ["shell"] - - def process(self, instance): - # fps = instance.data.get("fps") - # start = instance.data.get("startFrame") - # stagingdir = os.path.normpath(instance.data.get("stagingDir")) - # - # collected_frames = os.listdir(stagingdir) - # collections, remainder = clique.assemble(collected_frames) - # - # full_input_path = os.path.join( - # stagingdir, collections[0].format('{head}{padding}{tail}') - # ) - # self.log.info("input {}".format(full_input_path)) - # - # filename = collections[0].format('{head}') - # if not filename.endswith('.'): - # filename += "." - # movFile = filename + "mov" - # full_output_path = os.path.join(stagingdir, movFile) - # - # self.log.info("output {}".format(full_output_path)) - # - # config_data = instance.context.data['output_repre_config'] - # - # proj_name = os.environ.get('AVALON_PROJECT', '__default__') - # profile = config_data.get(proj_name, config_data['__default__']) - # - # input_args = [] - # # overrides output file - # input_args.append("-y") - # # preset's input data - # input_args.extend(profile.get('input', [])) - # # necessary input data - # input_args.append("-start_number {}".format(start)) - # input_args.append("-i {}".format(full_input_path)) - # input_args.append("-framerate {}".format(fps)) - # - # output_args = [] - # # preset's output data - # output_args.extend(profile.get('output', [])) - # # output filename - # output_args.append(full_output_path) - # mov_args = [ - # "ffmpeg", - # " ".join(input_args), - # " ".join(output_args) - # ] - # subprocess_mov = " ".join(mov_args) - # sub_proc = subprocess.Popen(subprocess_mov) - # sub_proc.wait() - # - # if not os.path.isfile(full_output_path): - # raise("Quicktime wasn't created succesfully") - # - # if "representations" not in instance.data: - # instance.data["representations"] = [] - # - # representation = { - # 'name': 'mov', - # 'ext': 'mov', - # 'files': movFile, - # "stagingDir": stagingdir, - # "preview": True - # } - # instance.data["representations"].append(representation) diff --git a/pype/plugins/global/_publish_unused/transcode.py b/pype/plugins/global/_publish_unused/transcode.py deleted file mode 100644 index 6da65e3cc7..0000000000 --- a/pype/plugins/global/_publish_unused/transcode.py +++ /dev/null @@ -1,153 +0,0 @@ -import os -import subprocess - -import pyblish.api -import filelink - - -class ExtractTranscode(pyblish.api.InstancePlugin): - """Extracts review movie from image sequence. - - Offset to get images to transcode from. - """ - - order = pyblish.api.ExtractorOrder + 0.1 - label = "Transcode" - optional = True - families = ["review"] - - def find_previous_index(self, index, indexes): - """Finds the closest previous value in a list from a value.""" - - data = [] - for i in indexes: - if i >= index: - continue - data.append(index - i) - - return indexes[data.index(min(data))] - - def process(self, instance): - - if "collection" in instance.data.keys(): - self.process_image(instance) - - if "output_path" in instance.data.keys(): - self.process_movie(instance) - - def process_image(self, instance): - - collection = instance.data.get("collection", []) - - if not list(collection): - msg = "Skipping \"{0}\" because no frames was found." - self.log.warning(msg.format(instance.data["name"])) - return - - # Temporary fill the missing frames. - missing = collection.holes() - if not collection.is_contiguous(): - pattern = collection.format("{head}{padding}{tail}") - for index in missing.indexes: - dst = pattern % index - src_index = self.find_previous_index( - index, list(collection.indexes) - ) - src = pattern % src_index - - filelink.create(src, dst) - - # Generate args. - # Has to be yuv420p for compatibility with older players and smooth - # playback. This does come with a sacrifice of more visible banding - # issues. - # -crf 18 is visually lossless. - args = [ - "ffmpeg", "-y", - "-start_number", str(min(collection.indexes)), - "-framerate", str(instance.context.data["framerate"]), - "-i", collection.format("{head}{padding}{tail}"), - "-pix_fmt", "yuv420p", - "-crf", "18", - "-timecode", "00:00:00:01", - "-vframes", - str(max(collection.indexes) - min(collection.indexes) + 1), - "-vf", - "scale=trunc(iw/2)*2:trunc(ih/2)*2", - ] - - if instance.data.get("baked_colorspace_movie"): - args = [ - "ffmpeg", "-y", - "-i", instance.data["baked_colorspace_movie"], - "-pix_fmt", "yuv420p", - "-crf", "18", - "-timecode", "00:00:00:01", - ] - - args.append(collection.format("{head}.mov")) - - self.log.debug("Executing args: {0}".format(args)) - - # Can't use subprocess.check_output, cause Houdini doesn't like that. - p = subprocess.Popen( - args, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - stdin=subprocess.PIPE, - cwd=os.path.dirname(args[-1]) - ) - - output = p.communicate()[0] - - # Remove temporary frame fillers - for f in missing: - os.remove(f) - - if p.returncode != 0: - raise ValueError(output) - - self.log.debug(output) - - def process_movie(self, instance): - # Generate args. - # Has to be yuv420p for compatibility with older players and smooth - # playback. This does come with a sacrifice of more visible banding - # issues. - args = [ - "ffmpeg", "-y", - "-i", instance.data["output_path"], - "-pix_fmt", "yuv420p", - "-crf", "18", - "-timecode", "00:00:00:01", - ] - - if instance.data.get("baked_colorspace_movie"): - args = [ - "ffmpeg", "-y", - "-i", instance.data["baked_colorspace_movie"], - "-pix_fmt", "yuv420p", - "-crf", "18", - "-timecode", "00:00:00:01", - ] - - split = os.path.splitext(instance.data["output_path"]) - args.append(split[0] + "_review.mov") - - self.log.debug("Executing args: {0}".format(args)) - - # Can't use subprocess.check_output, cause Houdini doesn't like that. - p = subprocess.Popen( - args, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - stdin=subprocess.PIPE, - cwd=os.path.dirname(args[-1]) - ) - - output = p.communicate()[0] - - if p.returncode != 0: - raise ValueError(output) - - self.log.debug(output) diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py deleted file mode 100644 index f7ce5fab00..0000000000 --- a/pype/plugins/global/publish/collect_filesequences.py +++ /dev/null @@ -1,467 +0,0 @@ -""" -Requires: - environment -> PYPE_PUBLISH_PATHS - context -> workspaceDir - -Provides: - context -> user (str) - instance -> new instance -""" - -import os -import re -import copy -import json - -import pyblish.api -from avalon import api - - -def collect(root, - regex=None, - exclude_regex=None, - frame_start=None, - frame_end=None): - """Collect sequence collections in root""" - - from avalon.vendor import clique - - files = list() - for filename in os.listdir(root): - - # Must have extension - ext = os.path.splitext(filename)[1] - if not ext: - continue - - # Only files - if not os.path.isfile(os.path.join(root, filename)): - continue - - # Include and exclude regex - if regex and not re.search(regex, filename): - continue - if exclude_regex and re.search(exclude_regex, filename): - continue - - files.append(filename) - - # Match collections - # Support filenames like: projectX_shot01_0010.tiff with this regex - pattern = r"(?P(?P0*)\d+)\.\D+\d?$" - collections, remainder = clique.assemble(files, - patterns=[pattern], - minimum_items=1) - - # Exclude any frames outside start and end frame. - for collection in collections: - for index in list(collection.indexes): - if frame_start is not None and index < frame_start: - collection.indexes.discard(index) - continue - if frame_end is not None and index > frame_end: - collection.indexes.discard(index) - continue - - # Keep only collections that have at least a single frame - collections = [c for c in collections if c.indexes] - - return collections, remainder - - -class CollectRenderedFrames(pyblish.api.ContextPlugin): - """Gather file sequences from working directory - - When "FILESEQUENCE" environment variable is set these paths (folders or - .json files) are parsed for image sequences. Otherwise the current - working directory is searched for file sequences. - - The json configuration may have the optional keys: - asset (str): The asset to publish to. If not provided fall back to - api.Session["AVALON_ASSET"] - subset (str): The subset to publish to. If not provided the sequence's - head (up to frame number) will be used. - frame_start (int): The start frame for the sequence - frame_end (int): The end frame for the sequence - root (str): The path to collect from (can be relative to the .json) - regex (str): A regex for the sequence filename - exclude_regex (str): A regex for filename to exclude from collection - metadata (dict): Custom metadata for instance.data["metadata"] - - """ - - order = pyblish.api.CollectorOrder - 0.0001 - targets = ["filesequence"] - label = "RenderedFrames" - - def process(self, context): - pixel_aspect = 1 - resolution_width = 1920 - resolution_height = 1080 - lut_path = None - slate_frame = None - families_data = None - baked_mov_path = None - subset = None - version = None - frame_start = 0 - frame_end = 0 - if os.environ.get("PYPE_PUBLISH_PATHS"): - paths = os.environ["PYPE_PUBLISH_PATHS"].split(os.pathsep) - self.log.info("Collecting paths: {}".format(paths)) - else: - cwd = context.get("workspaceDir", os.getcwd()) - paths = [cwd] - - for path in paths: - - self.log.info("Loading: {}".format(path)) - - if path.endswith(".json"): - # Search using .json configuration - with open(path, "r") as f: - try: - data = json.load(f) - except Exception as exc: - self.log.error( - "Error loading json: " - "{} - Exception: {}".format(path, exc) - ) - raise - - cwd = os.path.dirname(path) - root_override = data.get("root") - frame_start = int(data.get("frameStart")) - frame_end = int(data.get("frameEnd")) - subset = data.get("subset") - - if root_override: - if os.path.isabs(root_override): - root = root_override - else: - root = os.path.join(cwd, root_override) - else: - root = cwd - - if data.get("ftrack"): - f = data.get("ftrack") - os.environ["FTRACK_API_USER"] = f["FTRACK_API_USER"] - os.environ["FTRACK_API_KEY"] = f["FTRACK_API_KEY"] - os.environ["FTRACK_SERVER"] = f["FTRACK_SERVER"] - - metadata = data.get("metadata") - if metadata: - session = metadata.get("session") - if session: - self.log.info("setting session using metadata") - api.Session.update(session) - os.environ.update(session) - instance = metadata.get("instance") - if instance: - instance_family = instance.get("family") - pixel_aspect = instance.get("pixelAspect", 1) - resolution_width = instance.get("resolutionWidth", 1920) - resolution_height = instance.get("resolutionHeight", 1080) - lut_path = instance.get("lutPath", None) - baked_mov_path = instance.get("bakeRenderPath") - families_data = instance.get("families") - slate_frame = instance.get("slateFrame") - version = instance.get("version") - - - else: - # Search in directory - data = dict() - root = path - - self.log.info("Collecting: {}".format(root)) - - regex = data.get("regex") - if baked_mov_path: - regex = "^{}.*$".format(subset) - - if regex: - self.log.info("Using regex: {}".format(regex)) - - if "slate" in families_data: - frame_start -= 1 - - collections, remainder = collect( - root=root, - regex=regex, - exclude_regex=data.get("exclude_regex"), - frame_start=frame_start, - frame_end=frame_end, - ) - - self.log.info("Found collections: {}".format(collections)) - self.log.info("Found remainder: {}".format(remainder)) - - fps = data.get("fps", 25) - - # adding publish comment and intent to context - context.data["comment"] = data.get("comment", "") - context.data["intent"] = data.get("intent", "") - - if data.get("user"): - context.data["user"] = data["user"] - - if data.get("version"): - version = data.get("version") - - # Get family from the data - families = data.get("families", ["render"]) - if "ftrack" not in families: - families.append("ftrack") - if families_data and "render2d" in families_data: - families.append("render2d") - if families_data and "slate" in families_data: - families.append("slate") - - if data.get("attachTo"): - # we need to attach found collections to existing - # subset version as review represenation. - - for attach in data.get("attachTo"): - self.log.info( - "Attaching render {}:v{}".format( - attach["subset"], attach["version"])) - instance = context.create_instance( - attach["subset"]) - instance.data.update( - { - "name": attach["subset"], - "version": attach["version"], - "family": 'review', - "families": ['review', 'ftrack'], - "asset": data.get( - "asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, - "frameStart": frame_start, - "frameEnd": frame_end, - "fps": fps, - "source": data.get("source", ""), - "pixelAspect": pixel_aspect, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height - }) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - for collection in collections: - self.log.info( - " - adding representation: {}".format( - str(collection)) - ) - ext = collection.tail.lstrip(".") - - detected_start = min(collection.indexes) - detected_end = max(collection.indexes) - - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "frameStart": frame_start, - "detectedStart": detected_start, - "detectedEnd": detected_end, - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"], - } - instance.data["representations"].append( - representation) - - elif subset: - # if we have subset - add all collections and known - # reminder as representations - - # take out review family if mov path - # this will make imagesequence none review - - if baked_mov_path: - self.log.info( - "Baked mov is available {}".format( - baked_mov_path)) - families.append("review") - - if session['AVALON_APP'] == "maya": - families.append("review") - - self.log.info( - "Adding representations to subset {}".format( - subset)) - - instance = context.create_instance(subset) - data = copy.deepcopy(data) - - instance.data.update( - { - "name": subset, - "family": families[0], - "families": list(families), - "subset": subset, - "asset": data.get( - "asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, - "frameStart": frame_start, - "frameEnd": frame_end, - "fps": fps, - "source": data.get("source", ""), - "pixelAspect": pixel_aspect, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "slateFrame": slate_frame, - "version": version - } - ) - - if "representations" not in instance.data: - instance.data["representations"] = [] - - for collection in collections: - self.log.info(" - {}".format(str(collection))) - - ext = collection.tail.lstrip(".") - - if "slate" in instance.data["families"]: - frame_start += 1 - - detected_start = min(collection.indexes) - detected_end = max(collection.indexes) - - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "frameStart": frame_start, - "frameEnd": frame_end, - "detectedStart": detected_start, - "detectedEnd": detected_end, - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"] if not baked_mov_path else ["thumb-nuke"], - } - instance.data["representations"].append( - representation) - - # filter out only relevant mov in case baked available - self.log.debug("__ remainder {}".format(remainder)) - if baked_mov_path: - remainder = [r for r in remainder - if r in baked_mov_path] - self.log.debug("__ remainder {}".format(remainder)) - - # process reminders - for rem in remainder: - # add only known types to representation - if rem.split(".")[-1] in ['mov', 'jpg', 'mp4']: - self.log.info(" . {}".format(rem)) - - if "slate" in instance.data["families"]: - frame_start += 1 - - tags = ["review"] - - if baked_mov_path: - tags.append("delete") - - representation = { - "name": rem.split(".")[-1], - "ext": "{}".format(rem.split(".")[-1]), - "files": rem, - "stagingDir": root, - "frameStart": frame_start, - "anatomy_template": "render", - "fps": fps, - "tags": tags - } - instance.data["representations"].append( - representation) - - else: - # we have no subset so we take every collection and create one - # from it - for collection in collections: - instance = context.create_instance(str(collection)) - self.log.info("Creating subset from: %s" % str(collection)) - - # Ensure each instance gets a unique reference to the data - data = copy.deepcopy(data) - - # If no subset provided, get it from collection's head - subset = data.get("subset", collection.head.rstrip("_. ")) - - # If no start or end frame provided, get it from collection - indices = list(collection.indexes) - start = int(data.get("frameStart", indices[0])) - end = int(data.get("frameEnd", indices[-1])) - - ext = list(collection)[0].split(".")[-1] - - if "review" not in families: - families.append("review") - - detected_start = min(collection.indexes) - detected_end = max(collection.indexes) - - instance.data.update( - { - "name": str(collection), - "family": families[0], # backwards compatibility - "families": list(families), - "subset": subset, - "asset": data.get( - "asset", api.Session["AVALON_ASSET"]), - "stagingDir": root, - "frameStart": start, - "frameEnd": end, - "fps": fps, - "source": data.get("source", ""), - "pixelAspect": pixel_aspect, - "resolutionWidth": resolution_width, - "resolutionHeight": resolution_height, - "version": version - } - ) - if lut_path: - instance.data.update({"lutPath": lut_path}) - - instance.append(collection) - instance.context.data["fps"] = fps - - if "representations" not in instance.data: - instance.data["representations"] = [] - - representation = { - "name": ext, - "ext": "{}".format(ext), - "files": list(collection), - "frameStart": start, - "frameEnd": end, - "detectedStart": detected_start, - "detectedEnd": detected_end, - "stagingDir": root, - "anatomy_template": "render", - "fps": fps, - "tags": ["review"], - } - instance.data["representations"].append(representation) - - # temporary ... allow only beauty on ftrack - if session['AVALON_APP'] == "maya": - AOV_filter = ['beauty'] - for aov in AOV_filter: - if aov not in instance.data['subset']: - instance.data['families'].remove('review') - instance.data['families'].remove('ftrack') - representation["tags"].remove('review') - - self.log.debug( - "__ representations {}".format( - instance.data["representations"])) - self.log.debug( - "__ instance.data {}".format(instance.data)) diff --git a/pype/plugins/global/publish/collect_instance_anatomy_data.py b/pype/plugins/global/publish/collect_instance_anatomy_data.py index 825c48dcf4..06a25b7c8a 100644 --- a/pype/plugins/global/publish/collect_instance_anatomy_data.py +++ b/pype/plugins/global/publish/collect_instance_anatomy_data.py @@ -108,9 +108,15 @@ class CollectInstanceAnatomyData(pyblish.api.InstancePlugin): if resolution_height: anatomy_data["resolution_height"] = resolution_height + pixel_aspect = instance.data.get("pixelAspect") + if pixel_aspect: + anatomy_data["pixel_aspect"] = float("{:0.2f}".format( + float(pixel_aspect))) + fps = instance.data.get("fps") - if resolution_height: - anatomy_data["fps"] = fps + if fps: + anatomy_data["fps"] = float("{:0.2f}".format( + float(fps))) instance.data["projectEntity"] = project_entity instance.data["assetEntity"] = asset_entity diff --git a/pype/plugins/global/publish/collect_rendered_files.py b/pype/plugins/global/publish/collect_rendered_files.py new file mode 100644 index 0000000000..010cf44c15 --- /dev/null +++ b/pype/plugins/global/publish/collect_rendered_files.py @@ -0,0 +1,94 @@ +import os +import json + +import pyblish.api +from avalon import api + +from pypeapp import PypeLauncher + + +class CollectRenderedFiles(pyblish.api.ContextPlugin): + """ + This collector will try to find json files in provided + `PYPE_PUBLISH_DATA`. Those files _MUST_ share same context. + + """ + order = pyblish.api.CollectorOrder - 0.0001 + targets = ["filesequence"] + label = "Collect rendered frames" + + _context = None + + def _load_json(self, path): + assert os.path.isfile(path), ("path to json file doesn't exist") + data = None + with open(path, "r") as json_file: + try: + data = json.load(json_file) + except Exception as exc: + self.log.error( + "Error loading json: " + "{} - Exception: {}".format(path, exc) + ) + return data + + def _process_path(self, data): + # validate basic necessary data + data_err = "invalid json file - missing data" + required = ["asset", "user", "intent", "comment", + "job", "instances", "session", "version"] + assert all(elem in data.keys() for elem in required), data_err + + # set context by first json file + ctx = self._context.data + + ctx["asset"] = ctx.get("asset") or data.get("asset") + ctx["intent"] = ctx.get("intent") or data.get("intent") + ctx["comment"] = ctx.get("comment") or data.get("comment") + ctx["user"] = ctx.get("user") or data.get("user") + ctx["version"] = ctx.get("version") or data.get("version") + + # basic sanity check to see if we are working in same context + # if some other json file has different context, bail out. + ctx_err = "inconsistent contexts in json files - %s" + assert ctx.get("asset") == data.get("asset"), ctx_err % "asset" + assert ctx.get("intent") == data.get("intent"), ctx_err % "intent" + assert ctx.get("comment") == data.get("comment"), ctx_err % "comment" + assert ctx.get("user") == data.get("user"), ctx_err % "user" + assert ctx.get("version") == data.get("version"), ctx_err % "version" + + # ftrack credentials are passed as environment variables by Deadline + # to publish job, but Muster doesn't pass them. + if data.get("ftrack") and not os.environ.get("FTRACK_API_USER"): + ftrack = data.get("ftrack") + os.environ["FTRACK_API_USER"] = ftrack["FTRACK_API_USER"] + os.environ["FTRACK_API_KEY"] = ftrack["FTRACK_API_KEY"] + os.environ["FTRACK_SERVER"] = ftrack["FTRACK_SERVER"] + + # now we can just add instances from json file and we are done + for instance in data.get("instances"): + self.log.info(" - processing instance for {}".format( + instance.get("subset"))) + i = self._context.create_instance(instance.get("subset")) + self.log.info("remapping paths ...") + i.data["representations"] = [PypeLauncher().path_remapper( + data=r) for r in instance.get("representations")] + i.data.update(instance) + + def process(self, context): + self._context = context + + assert os.environ.get("PYPE_PUBLISH_DATA"), ( + "Missing `PYPE_PUBLISH_DATA`") + paths = os.environ["PYPE_PUBLISH_DATA"].split(os.pathsep) + + session_set = False + for path in paths: + data = self._load_json(path) + if not session_set: + self.log.info("Setting session using data from file") + api.Session.update(data.get("session")) + os.environ.update(data.get("session")) + session_set = True + assert data, "failed to load json file" + self._process_path(data) diff --git a/pype/plugins/global/publish/collect_scene_version.py b/pype/plugins/global/publish/collect_scene_version.py index 2844a695e2..02e913199b 100644 --- a/pype/plugins/global/publish/collect_scene_version.py +++ b/pype/plugins/global/publish/collect_scene_version.py @@ -21,7 +21,7 @@ class CollectSceneVersion(pyblish.api.ContextPlugin): if '' in filename: return - rootVersion = pype.get_version_from_path(filename) + rootVersion = int(pype.get_version_from_path(filename)) context.data['version'] = rootVersion - + self.log.info("{}".format(type(rootVersion))) self.log.info('Scene Version: %s' % context.data.get('version')) diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py index b95c15f340..008bebb271 100644 --- a/pype/plugins/global/publish/extract_burnin.py +++ b/pype/plugins/global/publish/extract_burnin.py @@ -26,13 +26,18 @@ class ExtractBurnin(pype.api.Extractor): if "representations" not in instance.data: raise RuntimeError("Burnin needs already created mov to work on.") - version = instance.context.data.get( - 'version', instance.data.get('version')) + version = instance.data.get( + 'version', instance.context.data.get('version')) frame_start = int(instance.data.get("frameStart") or 0) frame_end = int(instance.data.get("frameEnd") or 1) duration = frame_end - frame_start + 1 prep_data = copy.deepcopy(instance.data["anatomyData"]) + + if "slate.farm" in instance.data["families"]: + frame_start += 1 + duration -= 1 + prep_data.update({ "frame_start": frame_start, "frame_end": frame_end, @@ -42,22 +47,6 @@ class ExtractBurnin(pype.api.Extractor): "intent": instance.context.data.get("intent", "") }) - slate_frame_start = frame_start - slate_frame_end = frame_end - slate_duration = duration - - # exception for slate workflow - if "slate" in instance.data["families"]: - slate_frame_start = frame_start - 1 - slate_frame_end = frame_end - slate_duration = slate_frame_end - slate_frame_start + 1 - - prep_data.update({ - "slate_frame_start": slate_frame_start, - "slate_frame_end": slate_frame_end, - "slate_duration": slate_duration - }) - # get anatomy project anatomy = instance.context.data['anatomy'] @@ -101,6 +90,26 @@ class ExtractBurnin(pype.api.Extractor): filled_anatomy = anatomy.format_all(_prep_data) _prep_data["anatomy"] = filled_anatomy.get_solved() + # dealing with slates + slate_frame_start = frame_start + slate_frame_end = frame_end + slate_duration = duration + + # exception for slate workflow + if ("slate" in instance.data["families"]): + if "slate-frame" in repre.get("tags", []): + slate_frame_start = frame_start - 1 + slate_frame_end = frame_end + slate_duration = duration + 1 + + self.log.debug("__1 slate_frame_start: {}".format(slate_frame_start)) + + _prep_data.update({ + "slate_frame_start": slate_frame_start, + "slate_frame_end": slate_frame_end, + "slate_duration": slate_duration + }) + burnin_data = { "input": full_movie_path.replace("\\", "/"), "codec": repre.get("codec", []), diff --git a/pype/plugins/global/publish/extract_hierarchy_avalon.py b/pype/plugins/global/publish/extract_hierarchy_avalon.py index 318a6db105..ab8226f6ef 100644 --- a/pype/plugins/global/publish/extract_hierarchy_avalon.py +++ b/pype/plugins/global/publish/extract_hierarchy_avalon.py @@ -28,29 +28,33 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): entity_type = entity_data["entity_type"] data = {} - - data["inputs"] = entity_data.get("inputs", []) data["entityType"] = entity_type # Custom attributes. for k, val in entity_data.get("custom_attributes", {}).items(): data[k] = val - # Tasks. - tasks = entity_data.get("tasks", []) - if tasks is not None or len(tasks) > 0: - data["tasks"] = tasks - parents = [] - visualParent = None - # do not store project"s id as visualParent (silo asset) - if self.project is not None: - if self.project["_id"] != parent["_id"]: - visualParent = parent["_id"] - parents.extend(parent.get("data", {}).get("parents", [])) - parents.append(parent["name"]) - data["visualParent"] = visualParent - data["parents"] = parents + if entity_type.lower() != "project": + data["inputs"] = entity_data.get("inputs", []) + # Tasks. + tasks = entity_data.get("tasks", []) + if tasks is not None or len(tasks) > 0: + data["tasks"] = tasks + parents = [] + visualParent = None + # do not store project"s id as visualParent (silo asset) + if self.project is not None: + if self.project["_id"] != parent["_id"]: + visualParent = parent["_id"] + parents.extend( + parent.get("data", {}).get("parents", []) + ) + parents.append(parent["name"]) + data["visualParent"] = visualParent + data["parents"] = parents + + update_data = True # Process project if entity_type.lower() == "project": entity = io.find_one({"type": "project"}) @@ -58,8 +62,9 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): assert (entity is not None), "Did not find project in DB" # get data from already existing project - for key, value in entity.get("data", {}).items(): - data[key] = value + cur_entity_data = entity.get("data") or {} + cur_entity_data.update(data) + data = cur_entity_data self.project = entity # Raise error if project or parent are not set @@ -70,16 +75,63 @@ class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin): # Else process assset else: entity = io.find_one({"type": "asset", "name": name}) - # Create entity if doesn"t exist - if entity is None: - entity = self.create_avalon_asset(name, data) + if entity: + # Do not override data, only update + cur_entity_data = entity.get("data") or {} + cur_entity_data.update(data) + data = cur_entity_data + else: + # Skip updating data + update_data = False - # Update entity data with input data - io.update_many({"_id": entity["_id"]}, {"$set": {"data": data}}) + archived_entities = io.find({ + "type": "archived_asset", + "name": name + }) + unarchive_entity = None + for archived_entity in archived_entities: + archived_parents = ( + archived_entity + .get("data", {}) + .get("parents") + ) + if data["parents"] == archived_parents: + unarchive_entity = archived_entity + break + + if unarchive_entity is None: + # Create entity if doesn"t exist + entity = self.create_avalon_asset(name, data) + else: + # Unarchive if entity was archived + entity = self.unarchive_entity(unarchive_entity, data) + + if update_data: + # Update entity data with input data + io.update_many( + {"_id": entity["_id"]}, + {"$set": {"data": data}} + ) if "childs" in entity_data: self.import_to_avalon(entity_data["childs"], entity) + def unarchive_entity(self, entity, data): + # Unarchived asset should not use same data + new_entity = { + "_id": entity["_id"], + "schema": "avalon-core:asset-3.0", + "name": entity["name"], + "parent": self.project["_id"], + "type": "asset", + "data": data + } + io.replace_one( + {"_id": entity["_id"]}, + new_entity + ) + return new_entity + def create_avalon_asset(self, name, data): item = { "schema": "avalon-core:asset-3.0", diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py index abd20bb9ea..9ad6a15dfe 100644 --- a/pype/plugins/global/publish/extract_jpeg.py +++ b/pype/plugins/global/publish/extract_jpeg.py @@ -1,20 +1,12 @@ import os import pyblish.api -import clique import pype.api +import pype.lib class ExtractJpegEXR(pyblish.api.InstancePlugin): - """Resolve any dependency issues - - This plug-in resolves any paths which, if not updated might break - the published file. - - The order of families is important, when working with lookdev you want to - first publish the texture, update the texture paths in the nodes and then - publish the shading network. Same goes for file dependent assets. - """ + """Create jpg thumbnail from sequence using ffmpeg""" label = "Extract Jpeg EXR" hosts = ["shell"] @@ -23,11 +15,6 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): enabled = False def process(self, instance): - start = instance.data.get("frameStart") - stagingdir = os.path.normpath(instance.data.get("stagingDir")) - - collected_frames = os.listdir(stagingdir) - collections, remainder = clique.assemble(collected_frames) self.log.info("subset {}".format(instance.data['subset'])) if 'crypto' in instance.data['subset']: @@ -40,14 +27,16 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): representations_new = representations[:] for repre in representations: + tags = repre.get("tags", []) self.log.debug(repre) - valid = 'review' in repre['tags'] or "thumb-nuke" in repre['tags'] + valid = 'review' in tags or "thumb-nuke" in tags if not valid: continue if not isinstance(repre['files'], list): continue + stagingdir = os.path.normpath(repre.get("stagingDir")) input_file = repre['files'][0] # input_file = ( @@ -69,9 +58,10 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin): proj_name = os.environ.get('AVALON_PROJECT', '__default__') profile = config_data.get(proj_name, config_data['__default__']) + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + jpeg_items = [] - jpeg_items.append( - os.path.join(os.environ.get("FFMPEG_PATH"), "ffmpeg")) + jpeg_items.append(ffmpeg_path) # override file if already exists jpeg_items.append("-y") # use same input args like with mov diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py index 4f96491638..f5dba108c5 100644 --- a/pype/plugins/global/publish/extract_review.py +++ b/pype/plugins/global/publish/extract_review.py @@ -2,6 +2,7 @@ import os import pyblish.api import clique import pype.api +import pype.lib class ExtractReview(pyblish.api.InstancePlugin): @@ -40,6 +41,8 @@ class ExtractReview(pyblish.api.InstancePlugin): # get representation and loop them representations = inst_data["representations"] + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + # filter out mov and img sequences representations_new = representations[:] for repre in representations: @@ -327,10 +330,7 @@ class ExtractReview(pyblish.api.InstancePlugin): os.mkdir(stg_dir) mov_args = [ - os.path.join( - os.environ.get( - "FFMPEG_PATH", - ""), "ffmpeg"), + ffmpeg_path, " ".join(input_args), " ".join(output_args) ] diff --git a/pype/plugins/global/publish/extract_review_slate.py b/pype/plugins/global/publish/extract_review_slate.py index 9a720b77a9..699ed4a5eb 100644 --- a/pype/plugins/global/publish/extract_review_slate.py +++ b/pype/plugins/global/publish/extract_review_slate.py @@ -1,5 +1,6 @@ import os import pype.api +import pype.lib import pyblish @@ -21,7 +22,7 @@ class ExtractReviewSlate(pype.api.Extractor): suffix = "_slate" slate_path = inst_data.get("slateFrame") - ffmpeg_path = os.path.join(os.environ.get("FFMPEG_PATH", ""), "ffmpeg") + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") to_width = 1920 to_height = 1080 diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py index f8cde10aed..8c27ccfa84 100644 --- a/pype/plugins/global/publish/integrate_new.py +++ b/pype/plugins/global/publish/integrate_new.py @@ -80,7 +80,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): "matchmove", "image" "source", - "assembly" + "assembly", + "textures" ] exclude_families = ["clip"] db_representation_context_keys = [ @@ -283,6 +284,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): stagingdir = repre['stagingDir'] if repre.get('anatomy_template'): template_name = repre['anatomy_template'] + if repre.get("outputName"): + template_data["output"] = repre['outputName'] template = os.path.normpath( anatomy.templates[template_name]["path"]) @@ -397,9 +400,6 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): template_data["representation"] = repre['ext'] - if repre.get("outputName"): - template_data["output"] = repre['outputName'] - src = os.path.join(stagingdir, fname) anatomy_filled = anatomy.format(template_data) template_filled = anatomy_filled[template_name]["path"] @@ -452,7 +452,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): if sequence_repre and repre.get("frameStart"): representation['context']['frame'] = ( - src_padding_exp % int(repre.get("frameStart")) + dst_padding_exp % int(repre.get("frameStart")) ) self.log.debug("__ representation: {}".format(representation)) @@ -544,7 +544,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin): """ src = self.unc_convert(src) dst = self.unc_convert(dst) - + src = os.path.normpath(src) + dst = os.path.normpath(dst) self.log.debug("Copying file .. {} -> {}".format(src, dst)) dirname = os.path.dirname(dst) try: diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py index 792fc05a38..3ad7805fe7 100644 --- a/pype/plugins/global/publish/submit_publish_job.py +++ b/pype/plugins/global/publish/submit_publish_job.py @@ -1,7 +1,7 @@ import os import json import re -import logging +from copy import copy from avalon import api, io from avalon.vendor import requests, clique @@ -14,16 +14,15 @@ def _get_script(): try: from pype.scripts import publish_filesequence except Exception: - raise RuntimeError("Expected module 'publish_deadline'" - "to be available") + assert False, "Expected module 'publish_deadline'to be available" module_path = publish_filesequence.__file__ if module_path.endswith(".pyc"): - module_path = module_path[:-len(".pyc")] + ".py" + module_path = module_path[: -len(".pyc")] + ".py" module_path = os.path.normpath(module_path) - mount_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_MOUNT']) - network_root = os.path.normpath(os.environ['PYPE_STUDIO_CORE_PATH']) + mount_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_MOUNT"]) + network_root = os.path.normpath(os.environ["PYPE_STUDIO_CORE_PATH"]) module_path = module_path.replace(mount_root, network_root) @@ -34,39 +33,29 @@ def _get_script(): def get_latest_version(asset_name, subset_name, family): # Get asset asset_name = io.find_one( - { - "type": "asset", - "name": asset_name - }, - projection={"name": True} + {"type": "asset", "name": asset_name}, projection={"name": True} ) subset = io.find_one( - { - "type": "subset", - "name": subset_name, - "parent": asset_name["_id"] - }, - projection={"_id": True, "name": True} + {"type": "subset", "name": subset_name, "parent": asset_name["_id"]}, + projection={"_id": True, "name": True}, ) # Check if subsets actually exists (pre-run check) assert subset, "No subsets found, please publish with `extendFrames` off" # Get version - version_projection = {"name": True, - "data.startFrame": True, - "data.endFrame": True, - "parent": True} + version_projection = { + "name": True, + "data.startFrame": True, + "data.endFrame": True, + "parent": True, + } version = io.find_one( - { - "type": "version", - "parent": subset["_id"], - "data.families": family - }, + {"type": "version", "parent": subset["_id"], "data.families": family}, projection=version_projection, - sort=[("name", -1)] + sort=[("name", -1)], ) assert version, "No version found, this is a bug" @@ -87,8 +76,12 @@ def get_resources(version, extension=None): directory = api.get_representation_path(representation) print("Source: ", directory) - resources = sorted([os.path.normpath(os.path.join(directory, fname)) - for fname in os.listdir(directory)]) + resources = sorted( + [ + os.path.normpath(os.path.join(directory, fname)) + for fname in os.listdir(directory) + ] + ) return resources @@ -138,8 +131,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): - publishJobState (str, Optional): "Active" or "Suspended" This defaults to "Suspended" - This requires a "frameStart" and "frameEnd" to be present in instance.data - or in context.data. + - expectedFiles (list or dict): explained bellow """ @@ -149,26 +141,40 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): hosts = ["fusion", "maya", "nuke"] - families = [ - "render.farm", - "renderlayer", - "imagesequence" - ] + families = ["render.farm", "renderlayer", "imagesequence"] + + aov_filter = {"maya": ["beauty"]} enviro_filter = [ - "PATH", - "PYTHONPATH", - "FTRACK_API_USER", - "FTRACK_API_KEY", - "FTRACK_SERVER", - "PYPE_ROOT", - "PYPE_METADATA_FILE", - "PYPE_STUDIO_PROJECTS_PATH", - "PYPE_STUDIO_PROJECTS_MOUNT" - ] - + "PATH", + "PYTHONPATH", + "FTRACK_API_USER", + "FTRACK_API_KEY", + "FTRACK_SERVER", + "PYPE_ROOT", + "PYPE_METADATA_FILE", + "PYPE_STUDIO_PROJECTS_PATH", + "PYPE_STUDIO_PROJECTS_MOUNT", + ] + + # pool used to do the publishing job deadline_pool = "" + # regex for finding frame number in string + R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+') + + # mapping of instance properties to be transfered to new instance for every + # specified family + instance_transfer = { + "slate": ["slateFrame"], + "review": ["lutPath"], + "render.farm": ["bakeScriptPath", "bakeRenderPath", + "bakeWriteNodeName", "version"] + } + + # list of family names to transfer to new family if present + families_transfer = ["render3d", "render2d", "ftrack", "slate"] + def _submit_deadline_post_job(self, instance, job): """ Deadline specific code separated from :meth:`process` for sake of @@ -178,8 +184,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): data = instance.data.copy() subset = data["subset"] job_name = "{batch} - {subset} [publish image sequence]".format( - batch=job["Props"]["Name"], - subset=subset + batch=job["Props"]["Name"], subset=subset ) metadata_filename = "{}_metadata.json".format(subset) @@ -187,11 +192,10 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): metadata_path = os.path.join(output_dir, metadata_filename) metadata_path = os.path.normpath(metadata_path) - mount_root = os.path.normpath(os.environ['PYPE_STUDIO_PROJECTS_MOUNT']) - network_root = os.path.normpath( - os.environ['PYPE_STUDIO_PROJECTS_PATH']) - + mount_root = os.path.normpath(os.environ["PYPE_STUDIO_PROJECTS_MOUNT"]) + network_root = os.environ["PYPE_STUDIO_PROJECTS_PATH"] metadata_path = metadata_path.replace(mount_root, network_root) + metadata_path = os.path.normpath(metadata_path) # Generate the payload for Deadline submission payload = { @@ -199,22 +203,21 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): "Plugin": "Python", "BatchName": job["Props"]["Batch"], "Name": job_name, - "JobType": "Normal", "JobDependency0": job["_id"], "UserName": job["Props"]["User"], "Comment": instance.context.data.get("comment", ""), "Priority": job["Props"]["Pri"], - "Pool": self.deadline_pool + "Pool": self.deadline_pool, + "OutputDirectory0": output_dir }, "PluginInfo": { "Version": "3.6", "ScriptFile": _get_script(), "Arguments": "", - "SingleFrameOnly": "True" + "SingleFrameOnly": "True", }, - # Mandatory for Deadline, may be empty - "AuxFiles": [] + "AuxFiles": [], } # Transfer the environment from the original job to this dependent @@ -224,30 +227,268 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): environment["PYPE_METADATA_FILE"] = metadata_path i = 0 for index, key in enumerate(environment): - self.log.info("KEY: {}".format(key)) - self.log.info("FILTER: {}".format(self.enviro_filter)) - if key.upper() in self.enviro_filter: - payload["JobInfo"].update({ - "EnvironmentKeyValue%d" % i: "{key}={value}".format( - key=key, - value=environment[key] - ) - }) + payload["JobInfo"].update( + { + "EnvironmentKeyValue%d" + % i: "{key}={value}".format( + key=key, value=environment[key] + ) + } + ) i += 1 # Avoid copied pools and remove secondary pool payload["JobInfo"]["Pool"] = "none" payload["JobInfo"].pop("SecondaryPool", None) - self.log.info("Submitting..") - self.log.info(json.dumps(payload, indent=4, sort_keys=True)) + self.log.info("Submitting Deadline job ...") + # self.log.info(json.dumps(payload, indent=4, sort_keys=True)) url = "{}/api/jobs".format(self.DEADLINE_REST_URL) response = requests.post(url, json=payload) if not response.ok: raise Exception(response.text) + def _copy_extend_frames(self, instance, representation): + """ + This will copy all existing frames from subset's latest version back + to render directory and rename them to what renderer is expecting. + + :param instance: instance to get required data from + :type instance: pyblish.plugin.Instance + """ + + import speedcopy + + self.log.info("Preparing to copy ...") + start = instance.data.get("startFrame") + end = instance.data.get("endFrame") + + # get latest version of subset + # this will stop if subset wasn't published yet + version = get_latest_version( + instance.data.get("asset"), + instance.data.get("subset"), "render") + # get its files based on extension + subset_resources = get_resources(version, representation.get("ext")) + r_col, _ = clique.assemble(subset_resources) + + # if override remove all frames we are expecting to be rendered + # so we'll copy only those missing from current render + if instance.data.get("overrideExistingFrame"): + for frame in range(start, end+1): + if frame not in r_col.indexes: + continue + r_col.indexes.remove(frame) + + # now we need to translate published names from represenation + # back. This is tricky, right now we'll just use same naming + # and only switch frame numbers + resource_files = [] + r_filename = os.path.basename( + representation.get("files")[0]) # first file + op = re.search(self.R_FRAME_NUMBER, r_filename) + pre = r_filename[:op.start("frame")] + post = r_filename[op.end("frame"):] + assert op is not None, "padding string wasn't found" + for frame in list(r_col): + fn = re.search(self.R_FRAME_NUMBER, frame) + # silencing linter as we need to compare to True, not to + # type + assert fn is not None, "padding string wasn't found" + # list of tuples (source, destination) + resource_files.append( + (frame, + os.path.join(representation.get("stagingDir"), + "{}{}{}".format(pre, + fn.group("frame"), + post))) + ) + + # test if destination dir exists and create it if not + output_dir = os.path.dirname(representation.get("files")[0]) + if not os.path.isdir(output_dir): + os.makedirs(output_dir) + + # copy files + for source in resource_files: + speedcopy.copy(source[0], source[1]) + self.log.info(" > {}".format(source[1])) + + self.log.info( + "Finished copying %i files" % len(resource_files)) + + def _create_instances_for_aov(self, instance_data, exp_files): + """ + This will create new instance for every aov it can detect in expected + files list. + + :param instance_data: skeleton data for instance (those needed) later + by collector + :type instance_data: pyblish.plugin.Instance + :param exp_files: list of expected files divided by aovs + :type exp_files: list + :returns: list of instances + :rtype: list(publish.plugin.Instance) + """ + + task = os.environ["AVALON_TASK"] + subset = instance_data["subset"] + instances = [] + # go through aovs in expected files + for aov, files in exp_files[0].items(): + cols, rem = clique.assemble(files) + # we shouldn't have any reminders + if rem: + self.log.warning( + "skipping unexpected files found " + "in sequence: {}".format(rem)) + + # but we really expect only one collection, nothing else make sense + assert len(cols) == 1, "only one image sequence type is expected" + + # create subset name `familyTaskSubset_AOV` + subset_name = 'render{}{}{}{}_{}'.format( + task[0].upper(), task[1:], + subset[0].upper(), subset[1:], + aov) + + staging = os.path.dirname(list(cols[0])[0]) + start = int(instance_data.get("frameStart")) + end = int(instance_data.get("frameEnd")) + + self.log.info("Creating data for: {}".format(subset_name)) + + app = os.environ.get("AVALON_APP", "") + + preview = False + if app in self.aov_filter.keys(): + if aov in self.aov_filter[app]: + preview = True + + new_instance = copy(instance_data) + new_instance["subset"] = subset_name + + ext = cols[0].tail.lstrip(".") + + # create represenation + rep = { + "name": ext, + "ext": ext, + "files": [os.path.basename(f) for f in list(cols[0])], + "frameStart": start, + "frameEnd": end, + # If expectedFile are absolute, we need only filenames + "stagingDir": staging, + "anatomy_template": "render", + "fps": new_instance.get("fps"), + "tags": ["review"] if preview else [] + } + + self._solve_families(new_instance, preview) + + new_instance["representations"] = [rep] + + # if extending frames from existing version, copy files from there + # into our destination directory + if new_instance.get("extendFrames", False): + self._copy_extend_frames(new_instance, rep) + instances.append(new_instance) + + return instances + + def _get_representations(self, instance, exp_files): + """ + This will return representations of expected files if they are not + in hierarchy of aovs. There should be only one sequence of files for + most cases, but if not - we create representation from each of them. + + :param instance: instance for which we are setting representations + :type instance: pyblish.plugin.Instance + :param exp_files: list of expected files + :type exp_files: list + :returns: list of representations + :rtype: list(dict) + """ + + representations = [] + start = int(instance.get("frameStart")) + end = int(instance.get("frameEnd")) + cols, rem = clique.assemble(exp_files) + bake_render_path = instance.get("bakeRenderPath") + + # create representation for every collected sequence + for c in cols: + ext = c.tail.lstrip(".") + preview = False + # if filtered aov name is found in filename, toggle it for + # preview video rendering + for app in self.aov_filter: + if os.environ.get("AVALON_APP", "") == app: + for aov in self.aov_filter[app]: + if re.match( + r".+(?:\.|_)({})(?:\.|_).*".format(aov), + list(c)[0] + ): + preview = True + break + break + + if bake_render_path: + preview = False + + rep = { + "name": ext, + "ext": ext, + "files": [os.path.basename(f) for f in list(c)], + "frameStart": start, + "frameEnd": end, + # If expectedFile are absolute, we need only filenames + "stagingDir": os.path.dirname(list(c)[0]), + "anatomy_template": "render", + "fps": instance.get("fps"), + "tags": ["review", "preview"] if preview else [], + } + + representations.append(rep) + + self._solve_families(instance, preview) + + # add reminders as representations + for r in rem: + ext = r.split(".")[-1] + rep = { + "name": ext, + "ext": ext, + "files": os.path.basename(r), + "stagingDir": os.path.dirname(r), + "anatomy_template": "publish", + } + if r in bake_render_path: + rep.update({ + "fps": instance.get("fps"), + "anatomy_template": "render", + "tags": ["review", "delete"] + }) + # solve families with `preview` attributes + self._solve_families(instance, True) + representations.append(rep) + + return representations + + def _solve_families(self, instance, preview=False): + families = instance.get("families") + # if we have one representation with preview tag + # flag whole instance for review and for ftrack + if preview: + if "ftrack" not in families: + if os.environ.get("FTRACK_SERVER"): + families.append("ftrack") + if "review" not in families: + families.append("review") + instance["families"] = families + def process(self, instance): """ Detect type of renderfarm submission and create and post dependend job @@ -257,212 +498,278 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin): :param instance: Instance data :type instance: dict """ - # Get a submission job data = instance.data.copy() + context = instance.context + self.context = context + if hasattr(instance, "_log"): data['_log'] = instance._log render_job = data.pop("deadlineSubmissionJob", None) submission_type = "deadline" - if not render_job: # No deadline job. Try Muster: musterSubmissionJob render_job = data.pop("musterSubmissionJob", None) submission_type = "muster" - if not render_job: - raise RuntimeError("Can't continue without valid Deadline " - "or Muster submission prior to this " - "plug-in.") + assert render_job, ( + "Can't continue without valid Deadline " + "or Muster submission prior to this " + "plug-in." + ) if submission_type == "deadline": - self.DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL", - "http://localhost:8082") + self.DEADLINE_REST_URL = os.environ.get( + "DEADLINE_REST_URL", "http://localhost:8082" + ) assert self.DEADLINE_REST_URL, "Requires DEADLINE_REST_URL" self._submit_deadline_post_job(instance, render_job) asset = data.get("asset") or api.Session["AVALON_ASSET"] - subset = data["subset"] + subset = data.get("subset") - # Get start/end frame from instance, if not available get from context - context = instance.context start = instance.data.get("frameStart") if start is None: start = context.data["frameStart"] + end = instance.data.get("frameEnd") if end is None: end = context.data["frameEnd"] - # Add in regex for sequence filename - # This assumes the output files start with subset name and ends with - # a file extension. The "ext" key includes the dot with the extension. - if "ext" in instance.data: - ext = r"\." + re.escape(instance.data["ext"]) - else: - ext = r"\.\D+" + handle_start = instance.data.get("handleStart") + if handle_start is None: + handle_start = context.data["handleStart"] - regex = r"^{subset}.*\d+{ext}$".format(subset=re.escape(subset), - ext=ext) + handle_end = instance.data.get("handleEnd") + if handle_end is None: + handle_end = context.data["handleEnd"] + + fps = instance.data.get("fps") + if fps is None: + fps = context.data["fps"] + + if data.get("extendFrames", False): + start, end = self._extend_frames( + asset, + subset, + start, + end, + data["overrideExistingFrame"]) try: - source = data['source'] + source = data["source"] except KeyError: source = context.data["currentFile"] - source = source.replace(os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), - api.registered_root()) - + source = source.replace( + os.getenv("PYPE_STUDIO_PROJECTS_MOUNT"), api.registered_root() + ) relative_path = os.path.relpath(source, api.registered_root()) source = os.path.join("{root}", relative_path).replace("\\", "/") - # find subsets and version to attach render to - attach_to = instance.data.get("attachTo") - attach_subset_versions = [] - if attach_to: - for subset in attach_to: - for instance in context: - if instance.data["subset"] != subset["subset"]: - continue - attach_subset_versions.append( - {"version": instance.data["version"], - "subset": subset["subset"], - "family": subset["family"]}) + families = ["render"] - # Write metadata for publish job - metadata = { + instance_skeleton_data = { + "family": "render", + "subset": subset, + "families": families, + "asset": asset, + "frameStart": start, + "frameEnd": end, + "handleStart": handle_start, + "handleEnd": handle_end, + "fps": fps, + "source": source, + "extendFrames": data.get("extendFrames"), + "overrideExistingFrame": data.get("overrideExistingFrame"), + "pixelAspect": data.get("pixelAspect", 1), + "resolutionWidth": data.get("resolutionWidth", 1920), + "resolutionHeight": data.get("resolutionHeight", 1080), + } + + # transfer specific families from original instance to new render + for item in self.families_transfer: + if item in instance.data.get("families", []): + instance_skeleton_data["families"] += [item] + + # transfer specific properties from original instance based on + # mapping dictionary `instance_transfer` + for key, values in self.instance_transfer.items(): + if key in instance.data.get("families", []): + for v in values: + instance_skeleton_data[v] = instance.data.get(v) + + # look into instance data if representations are not having any + # which are having tag `publish_on_farm` and include them + for r in instance.data.get("representations", []): + if "publish_on_farm" in r.get("tags"): + # create representations attribute of not there + if "representations" not in instance_skeleton_data.keys(): + instance_skeleton_data["representations"] = [] + + instance_skeleton_data["representations"].append(r) + + instances = None + assert data.get("expectedFiles"), ("Submission from old Pype version" + " - missing expectedFiles") + + """ + if content of `expectedFiles` are dictionaries, we will handle + it as list of AOVs, creating instance from every one of them. + + Example: + -------- + + expectedFiles = [ + { + "beauty": [ + "foo_v01.0001.exr", + "foo_v01.0002.exr" + ], + + "Z": [ + "boo_v01.0001.exr", + "boo_v01.0002.exr" + ] + } + ] + + This will create instances for `beauty` and `Z` subset + adding those files to their respective representations. + + If we've got only list of files, we collect all filesequences. + More then one doesn't probably make sense, but we'll handle it + like creating one instance with multiple representations. + + Example: + -------- + + expectedFiles = [ + "foo_v01.0001.exr", + "foo_v01.0002.exr", + "xxx_v01.0001.exr", + "xxx_v01.0002.exr" + ] + + This will result in one instance with two representations: + `foo` and `xxx` + """ + + self.log.info(data.get("expectedFiles")) + + if isinstance(data.get("expectedFiles")[0], dict): + # we cannot attach AOVs to other subsets as we consider every + # AOV subset of its own. + + if len(data.get("attachTo")) > 0: + assert len(data.get("expectedFiles")[0].keys()) == 1, ( + "attaching multiple AOVs or renderable cameras to " + "subset is not supported") + + # create instances for every AOV we found in expected files. + # note: this is done for every AOV and every render camere (if + # there are multiple renderable cameras in scene) + instances = self._create_instances_for_aov( + instance_skeleton_data, + data.get("expectedFiles")) + self.log.info("got {} instance{}".format( + len(instances), + "s" if len(instances) > 1 else "")) + + else: + representations = self._get_representations( + instance_skeleton_data, + data.get("expectedFiles") + ) + + if "representations" not in instance_skeleton_data.keys(): + instance_skeleton_data["representations"] = [] + + # add representation + instance_skeleton_data["representations"] += representations + instances = [instance_skeleton_data] + + # if we are attaching to other subsets, create copy of existing + # instances, change data to match thats subset and replace + # existing instances with modified data + if instance.data.get("attachTo"): + self.log.info("Attaching render to subset:") + new_instances = [] + for at in instance.data.get("attachTo"): + for i in instances: + new_i = copy(i) + new_i["version"] = at.get("version") + new_i["subset"] = at.get("subset") + new_i["append"] = True + new_i["families"].append(at.get("family")) + new_instances.append(new_i) + self.log.info(" - {} / v{}".format( + at.get("subset"), at.get("version"))) + instances = new_instances + + # publish job file + publish_job = { "asset": asset, - "regex": regex, "frameStart": start, "frameEnd": end, "fps": context.data.get("fps", None), - "families": ["render"], "source": source, "user": context.data["user"], - "version": context.data["version"], + "version": context.data["version"], # this is workfile version "intent": context.data.get("intent"), "comment": context.data.get("comment"), - # Optional metadata (for debugging) - "metadata": { - "instance": data, - "job": render_job, - "session": api.Session.copy() - } + "job": render_job, + "session": api.Session.copy(), + "instances": instances } - if api.Session["AVALON_APP"] == "nuke": - metadata['subset'] = subset - + # pass Ftrack credentials in case of Muster if submission_type == "muster": ftrack = { "FTRACK_API_USER": os.environ.get("FTRACK_API_USER"), "FTRACK_API_KEY": os.environ.get("FTRACK_API_KEY"), - "FTRACK_SERVER": os.environ.get("FTRACK_SERVER") + "FTRACK_SERVER": os.environ.get("FTRACK_SERVER"), } - metadata.update({"ftrack": ftrack}) + publish_job.update({"ftrack": ftrack}) # Ensure output dir exists output_dir = instance.data["outputDir"] if not os.path.isdir(output_dir): os.makedirs(output_dir) - if data.get("extendFrames", False): - - family = "render" - override = data["overrideExistingFrame"] - - # override = data.get("overrideExistingFrame", False) - out_file = render_job.get("OutFile") - if not out_file: - raise RuntimeError("OutFile not found in render job!") - - extension = os.path.splitext(out_file[0])[1] - _ext = extension[1:] - - # Frame comparison - prev_start = None - prev_end = None - resource_range = range(int(start), int(end)+1) - - # Gather all the subset files (one subset per render pass!) - subset_names = [data["subset"]] - subset_names.extend(data.get("renderPasses", [])) - resources = [] - for subset_name in subset_names: - version = get_latest_version(asset_name=data["asset"], - subset_name=subset_name, - family=family) - - # Set prev start / end frames for comparison - if not prev_start and not prev_end: - prev_start = version["data"]["frameStart"] - prev_end = version["data"]["frameEnd"] - - subset_resources = get_resources(version, _ext) - resource_files = get_resource_files(subset_resources, - resource_range, - override) - - resources.extend(resource_files) - - updated_start = min(start, prev_start) - updated_end = max(end, prev_end) - - # Update metadata and instance start / end frame - self.log.info("Updating start / end frame : " - "{} - {}".format(updated_start, updated_end)) - - # TODO : Improve logic to get new frame range for the - # publish job (publish_filesequence.py) - # The current approach is not following Pyblish logic - # which is based - # on Collect / Validate / Extract. - - # ---- Collect Plugins --- - # Collect Extend Frames - Only run if extendFrames is toggled - # # # Store in instance: - # # # Previous rendered files per subset based on frames - # # # --> Add to instance.data[resources] - # # # Update publish frame range - - # ---- Validate Plugins --- - # Validate Extend Frames - # # # Check if instance has the requirements to extend frames - # There might have been some things which can be added to the list - # Please do so when fixing this. - - # Start frame - metadata["frameStart"] = updated_start - metadata["metadata"]["instance"]["frameStart"] = updated_start - - # End frame - metadata["frameEnd"] = updated_end - metadata["metadata"]["instance"]["frameEnd"] = updated_end - metadata_filename = "{}_metadata.json".format(subset) metadata_path = os.path.join(output_dir, metadata_filename) - # convert log messages if they are `LogRecord` to their - # string format to allow serializing as JSON later on. - rendered_logs = [] - for log in metadata["metadata"]["instance"].get("_log", []): - if isinstance(log, logging.LogRecord): - rendered_logs.append(log.getMessage()) - else: - rendered_logs.append(log) - - metadata["metadata"]["instance"]["_log"] = rendered_logs + self.log.info("Writing json file: {}".format(metadata_path)) with open(metadata_path, "w") as f: - json.dump(metadata, f, indent=4, sort_keys=True) + json.dump(publish_job, f, indent=4, sort_keys=True) - # Copy files from previous render if extendFrame is True - if data.get("extendFrames", False): + def _extend_frames(self, asset, subset, start, end, override): + """ + This will get latest version of asset and update frame range based + on minimum and maximuma values + """ - self.log.info("Preparing to copy ..") - import shutil + # Frame comparison + prev_start = None + prev_end = None - dest_path = data["outputDir"] - for source in resources: - src_file = os.path.basename(source) - dest = os.path.join(dest_path, src_file) - shutil.copy(source, dest) + version = get_latest_version( + asset_name=asset, + subset_name=subset, + family='render' + ) - self.log.info("Finished copying %i files" % len(resources)) + # Set prev start / end frames for comparison + if not prev_start and not prev_end: + prev_start = version["data"]["frameStart"] + prev_end = version["data"]["frameEnd"] + + updated_start = min(start, prev_start) + updated_end = max(end, prev_end) + + self.log.info( + "Updating start / end frame : " + "{} - {}".format(updated_start, updated_end) + ) + + return updated_start, updated_end diff --git a/pype/plugins/global/publish/validate_ffmpeg_installed.py b/pype/plugins/global/publish/validate_ffmpeg_installed.py index df7c330e95..f6738e6de1 100644 --- a/pype/plugins/global/publish/validate_ffmpeg_installed.py +++ b/pype/plugins/global/publish/validate_ffmpeg_installed.py @@ -1,13 +1,14 @@ import pyblish.api import os import subprocess +import pype.lib try: import os.errno as errno except ImportError: import errno -class ValidateFfmpegInstallef(pyblish.api.Validator): +class ValidateFFmpegInstalled(pyblish.api.Validator): """Validate availability of ffmpeg tool in PATH""" order = pyblish.api.ValidatorOrder @@ -27,10 +28,8 @@ class ValidateFfmpegInstallef(pyblish.api.Validator): return True def process(self, instance): - self.log.info("ffmpeg path: `{}`".format( - os.environ.get("FFMPEG_PATH", ""))) - if self.is_tool( - os.path.join( - os.environ.get("FFMPEG_PATH", ""), "ffmpeg")) is False: + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") + self.log.info("ffmpeg path: `{}`".format(ffmpeg_path)) + if self.is_tool(ffmpeg_path) is False: self.log.error("ffmpeg not found in PATH") raise RuntimeError('ffmpeg not installed.') diff --git a/pype/plugins/global/publish/validate_version.py b/pype/plugins/global/publish/validate_version.py new file mode 100644 index 0000000000..4585e5a008 --- /dev/null +++ b/pype/plugins/global/publish/validate_version.py @@ -0,0 +1,25 @@ +import pyblish.api + + +class ValidateVersion(pyblish.api.InstancePlugin): + """Validate instance version. + + Pype is not allowing overwiting previously published versions. + """ + + order = pyblish.api.ValidatorOrder + + label = "Validate Version" + hosts = ["nuke", "maya", "blender"] + + def process(self, instance): + version = instance.data.get("version") + latest_version = instance.data.get("latestVersion") + + if latest_version is not None: + msg = ("Version `{0}` that you are" + " trying to publish, already" + " exists in the" + " database.").format( + version, latest_version) + assert (int(version) > int(latest_version)), msg diff --git a/pype/plugins/maya/create/create_renderglobals.py b/pype/plugins/maya/create/create_render.py similarity index 51% rename from pype/plugins/maya/create/create_renderglobals.py rename to pype/plugins/maya/create/create_render.py index 7c71bfbc36..080c6bd55d 100644 --- a/pype/plugins/maya/create/create_renderglobals.py +++ b/pype/plugins/maya/create/create_render.py @@ -2,43 +2,108 @@ import os import json import appdirs import requests + from maya import cmds +import maya.app.renderSetup.model.renderSetup as renderSetup + import pype.maya.lib as lib import avalon.maya -class CreateRenderGlobals(avalon.maya.Creator): +class CreateRender(avalon.maya.Creator): + """Create render layer for export""" - label = "Render Globals" - family = "renderglobals" - icon = "gears" - defaults = ['Main'] + label = "Render" + family = "rendering" + icon = "eye" + defaults = ["Main"] _token = None _user = None _password = None + # renderSetup instance + _rs = None + + _image_prefix_nodes = { + 'mentalray': 'defaultRenderGlobals.imageFilePrefix', + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' + } + + _image_prefixes = { + 'mentalray': 'maya///_', + 'vray': '"maya///', + 'arnold': 'maya///_', + 'renderman': 'maya///_', + 'redshift': 'maya///_' + } + def __init__(self, *args, **kwargs): - super(CreateRenderGlobals, self).__init__(*args, **kwargs) + super(CreateRender, self).__init__(*args, **kwargs) - # We won't be publishing this one - self.data["id"] = "avalon.renderglobals" + def process(self): + exists = cmds.ls(self.name) + if exists: + return cmds.warning("%s already exists." % exists[0]) + use_selection = self.options.get("useSelection") + with lib.undo_chunk(): + self._create_render_settings() + instance = super(CreateRender, self).process() + cmds.setAttr("{}.machineList".format(instance), lock=True) + self._rs = renderSetup.instance() + layers = self._rs.getRenderLayers() + if use_selection: + print(">>> processing existing layers") + sets = [] + for layer in layers: + print(" - creating set for {}".format(layer.name())) + render_set = cmds.sets(n="LAYER_{}".format(layer.name())) + sets.append(render_set) + cmds.sets(sets, forceElement=instance) + + # if no render layers are present, create default one with + # asterix selector + if not layers: + rl = self._rs.createRenderLayer('Main') + cl = rl.createCollection("defaultCollection") + cl.getSelector().setPattern('*') + + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + # handle various renderman names + if renderer.startswith('renderman'): + renderer = 'renderman' + + cmds.setAttr(self._image_prefix_nodes[renderer], + self._image_prefixes[renderer], + type="string") + + def _create_render_settings(self): # get pools pools = [] - deadline_url = os.environ.get('DEADLINE_REST_URL', None) - muster_url = os.environ.get('MUSTER_REST_URL', None) + deadline_url = os.environ.get("DEADLINE_REST_URL", None) + muster_url = os.environ.get("MUSTER_REST_URL", None) if deadline_url and muster_url: - self.log.error("Both Deadline and Muster are enabled. " - "Cannot support both.") + self.log.error( + "Both Deadline and Muster are enabled. " "Cannot support both." + ) raise RuntimeError("Both Deadline and Muster are enabled") if deadline_url is None: self.log.warning("Deadline REST API url not found.") else: argument = "{}/api/pools?NamesOnly=true".format(deadline_url) - response = self._requests_get(argument) + try: + response = self._requests_get(argument) + except requests.exceptions.ConnectionError as e: + msg = 'Cannot connect to deadline web service' + self.log.error(msg) + raise RuntimeError('{} - {}'.format(msg, e)) if not response.ok: self.log.warning("No pools retrieved") else: @@ -57,8 +122,8 @@ class CreateRenderGlobals(avalon.maya.Creator): try: pools = self._get_muster_pools() except requests.exceptions.HTTPError as e: - if e.startswith('401'): - self.log.warning('access token expired') + if e.startswith("401"): + self.log.warning("access token expired") self._show_login() raise RuntimeError("Access token expired") except requests.exceptions.ConnectionError: @@ -66,20 +131,15 @@ class CreateRenderGlobals(avalon.maya.Creator): raise RuntimeError("Cannot connect to {}".format(muster_url)) pool_names = [] for pool in pools: - self.log.info(" - pool: {}".format(pool['name'])) - pool_names.append(pool['name']) + self.log.info(" - pool: {}".format(pool["name"])) + pool_names.append(pool["name"]) self.data["primaryPool"] = pool_names - # We don't need subset or asset attributes - # self.data.pop("subset", None) - # self.data.pop("asset", None) - # self.data.pop("active", None) - self.data["suspendPublishJob"] = False self.data["extendFrames"] = False self.data["overrideExistingFrame"] = True - self.data["useLegacyRenderLayers"] = True + # self.data["useLegacyRenderLayers"] = True self.data["priority"] = 50 self.data["framesPerTask"] = 1 self.data["whitelist"] = False @@ -88,20 +148,6 @@ class CreateRenderGlobals(avalon.maya.Creator): self.options = {"useSelection": False} # Force no content - def process(self): - - exists = cmds.ls(self.name) - assert len(exists) <= 1, ( - "More than one renderglobal exists, this is a bug" - ) - - if exists: - return cmds.warning("%s already exists." % exists[0]) - - with lib.undo_chunk(): - super(CreateRenderGlobals, self).process() - cmds.setAttr("{}.machineList".format(self.name), lock=True) - def _load_credentials(self): """ Load Muster credentials from file and set `MUSTER_USER`, @@ -111,14 +157,12 @@ class CreateRenderGlobals(avalon.maya.Creator): Show login dialog if access token is invalid or missing. """ - app_dir = os.path.normpath( - appdirs.user_data_dir('pype-app', 'pype') - ) - file_name = 'muster_cred.json' + app_dir = os.path.normpath(appdirs.user_data_dir("pype-app", "pype")) + file_name = "muster_cred.json" fpath = os.path.join(app_dir, file_name) - file = open(fpath, 'r') + file = open(fpath, "r") muster_json = json.load(file) - self._token = muster_json.get('token', None) + self._token = muster_json.get("token", None) if not self._token: self._show_login() raise RuntimeError("Invalid access token for Muster") @@ -131,26 +175,25 @@ class CreateRenderGlobals(avalon.maya.Creator): """ Get render pools from muster """ - params = { - 'authToken': self._token - } - api_entry = '/api/pools/list' - response = self._requests_get( - self.MUSTER_REST_URL + api_entry, params=params) + params = {"authToken": self._token} + api_entry = "/api/pools/list" + response = self._requests_get(self.MUSTER_REST_URL + api_entry, + params=params) if response.status_code != 200: if response.status_code == 401: - self.log.warning('Authentication token expired.') + self.log.warning("Authentication token expired.") self._show_login() else: self.log.error( - 'Cannot get pools from Muster: {}'.format( - response.status_code)) - raise Exception('Cannot get pools from Muster') + ("Cannot get pools from " + "Muster: {}").format(response.status_code) + ) + raise Exception("Cannot get pools from Muster") try: - pools = response.json()['ResponseData']['pools'] + pools = response.json()["ResponseData"]["pools"] except ValueError as e: - self.log.error('Invalid response from Muster server {}'.format(e)) - raise Exception('Invalid response from Muster server') + self.log.error("Invalid response from Muster server {}".format(e)) + raise Exception("Invalid response from Muster server") return pools @@ -162,8 +205,8 @@ class CreateRenderGlobals(avalon.maya.Creator): self.log.debug(api_url) login_response = self._requests_post(api_url, timeout=1) if login_response.status_code != 200: - self.log.error('Cannot show login form to Muster') - raise Exception('Cannot show login form to Muster') + self.log.error("Cannot show login form to Muster") + raise Exception("Cannot show login form to Muster") def _requests_post(self, *args, **kwargs): """ Wrapper for requests, disabling SSL certificate validation if @@ -175,8 +218,10 @@ class CreateRenderGlobals(avalon.maya.Creator): WARNING: disabling SSL certificate validation is defeating one line of defense SSL is providing and it is not recommended. """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa + if "verify" not in kwargs: + kwargs["verify"] = ( + False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True + ) # noqa return requests.post(*args, **kwargs) def _requests_get(self, *args, **kwargs): @@ -189,6 +234,8 @@ class CreateRenderGlobals(avalon.maya.Creator): WARNING: disabling SSL certificate validation is defeating one line of defense SSL is providing and it is not recommended. """ - if 'verify' not in kwargs: - kwargs['verify'] = False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True # noqa + if "verify" not in kwargs: + kwargs["verify"] = ( + False if os.getenv("PYPE_DONT_VERIFY_SSL", True) else True + ) # noqa return requests.get(*args, **kwargs) diff --git a/pype/plugins/maya/publish/collect_render.py b/pype/plugins/maya/publish/collect_render.py new file mode 100644 index 0000000000..f31198448b --- /dev/null +++ b/pype/plugins/maya/publish/collect_render.py @@ -0,0 +1,909 @@ +""" +This collector will go through render layers in maya and prepare all data +needed to create instances and their representations for submition and +publishing on farm. + +Requires: + instance -> families + instance -> setMembers + + context -> currentFile + context -> workspaceDir + context -> user + + session -> AVALON_ASSET + +Optional: + +Provides: + instance -> label + instance -> subset + instance -> attachTo + instance -> setMembers + instance -> publish + instance -> frameStart + instance -> frameEnd + instance -> byFrameStep + instance -> renderer + instance -> family + instance -> families + instance -> asset + instance -> time + instance -> author + instance -> source + instance -> expectedFiles + instance -> resolutionWidth + instance -> resolutionHeight + instance -> pixelAspect +""" + +import re +import os +import types +import six +from abc import ABCMeta, abstractmethod + +from maya import cmds +import maya.app.renderSetup.model.renderSetup as renderSetup + +import pyblish.api + +from avalon import maya, api +import pype.maya.lib as lib + + +R_SINGLE_FRAME = re.compile(r'^(-?)\d+$') +R_FRAME_RANGE = re.compile(r'^(?P(-?)\d+)-(?P(-?)\d+)$') +R_FRAME_NUMBER = re.compile(r'.+\.(?P[0-9]+)\..+') +R_LAYER_TOKEN = re.compile( + r'.*%l.*|.*.*|.*.*', re.IGNORECASE) +R_AOV_TOKEN = re.compile(r'.*%a.*|.*.*|.*.*', re.IGNORECASE) +R_SUBSTITUTE_AOV_TOKEN = re.compile(r'%a||', re.IGNORECASE) +R_REMOVE_AOV_TOKEN = re.compile(r'_%a|_|_', re.IGNORECASE) +# to remove unused renderman tokens +R_CLEAN_FRAME_TOKEN = re.compile(r'\.?\.?', re.IGNORECASE) +R_CLEAN_EXT_TOKEN = re.compile(r'\.?\.?', re.IGNORECASE) + +R_SUBSTITUTE_LAYER_TOKEN = re.compile( + r'%l||', re.IGNORECASE) +R_SUBSTITUTE_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) +R_SUBSTITUTE_SCENE_TOKEN = re.compile(r'%s|', re.IGNORECASE) + +RENDERER_NAMES = { + 'mentalray': 'MentalRay', + 'vray': 'V-Ray', + 'arnold': 'Arnold', + 'renderman': 'Renderman', + 'redshift': 'Redshift' +} + +# not sure about the renderman image prefix +ImagePrefixes = { + 'mentalray': 'defaultRenderGlobals.imageFilePrefix', + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'rmanGlobals.imageFileFormat', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' +} + + +class CollectMayaRender(pyblish.api.ContextPlugin): + """Gather all publishable render layers from renderSetup""" + + order = pyblish.api.CollectorOrder + 0.01 + hosts = ["maya"] + label = "Collect Render Layers" + + def process(self, context): + render_instance = None + for instance in context: + if 'rendering' in instance.data['families']: + render_instance = instance + render_instance.data["remove"] = True + + # make sure workfile instance publishing is enabled + if 'workfile' in instance.data['families']: + instance.data["publish"] = True + + if not render_instance: + self.log.info("No render instance found, skipping render " + "layer collection.") + return + + render_globals = render_instance + collected_render_layers = render_instance.data['setMembers'] + filepath = context.data["currentFile"].replace("\\", "/") + asset = api.Session["AVALON_ASSET"] + workspace = context.data["workspaceDir"] + + self._rs = renderSetup.instance() + maya_render_layers = {l.name(): l for l in self._rs.getRenderLayers()} + + self.maya_layers = maya_render_layers + + for layer in collected_render_layers: + # every layer in set should start with `LAYER_` prefix + try: + expected_layer_name = re.search(r"^LAYER_(.*)", layer).group(1) + except IndexError: + msg = ("Invalid layer name in set [ {} ]".format(layer)) + self.log.warnig(msg) + continue + + self.log.info("processing %s" % layer) + # check if layer is part of renderSetup + if expected_layer_name not in maya_render_layers: + msg = ("Render layer [ {} ] is not in " + "Render Setup".format(expected_layer_name)) + self.log.warning(msg) + continue + + # check if layer is renderable + if not maya_render_layers[expected_layer_name].isRenderable(): + msg = ("Render layer [ {} ] is not " + "renderable".format(expected_layer_name)) + self.log.warning(msg) + continue + + # test if there are sets (subsets) to attach render to + sets = cmds.sets(layer, query=True) or [] + attachTo = [] + if sets: + for s in sets: + attachTo.append({ + "version": None, # we need integrator to get version + "subset": s, + "family": cmds.getAttr("{}.family".format(s)) + }) + self.log.info(" -> attach render to: {}".format(s)) + + layer_name = "rs_{}".format(expected_layer_name) + + # collect all frames we are expecting to be rendered + renderer = cmds.getAttr( + 'defaultRenderGlobals.currentRenderer').lower() + # handle various renderman names + if renderer.startswith('renderman'): + renderer = 'renderman' + + # return all expected files for all cameras and aovs in given + # frame range + exp_files = ExpectedFiles().get(renderer, layer_name) + assert exp_files, ("no file names were generated, this is bug") + + # if we want to attach render to subset, check if we have AOV's + # in expectedFiles. If so, raise error as we cannot attach AOV + # (considered to be subset on its own) to another subset + if attachTo: + assert len(exp_files[0].keys()) == 1, ( + "attaching multiple AOVs or renderable cameras to " + "subset is not supported") + + # append full path + full_exp_files = [] + aov_dict = {} + + # we either get AOVs or just list of files. List of files can + # mean two things - there are no AOVs enabled or multipass EXR + # is produced. In either case we treat those as `beauty`. + if isinstance(exp_files[0], dict): + for aov, files in exp_files[0].items(): + full_paths = [] + for ef in files: + full_path = os.path.join(workspace, "renders", ef) + full_path = full_path.replace("\\", "/") + full_paths.append(full_path) + aov_dict[aov] = full_paths + else: + full_paths = [] + for ef in exp_files: + full_path = os.path.join(workspace, "renders", ef) + full_path = full_path.replace("\\", "/") + full_paths.append(full_path) + aov_dict["beauty"] = full_paths + + full_exp_files.append(aov_dict) + self.log.info(full_exp_files) + self.log.info("collecting layer: {}".format(layer_name)) + # Get layer specific settings, might be overrides + data = { + "subset": expected_layer_name, + "attachTo": attachTo, + "setMembers": layer_name, + "publish": True, + "frameStart": int(self.get_render_attribute("startFrame", + layer=layer_name)), + "frameEnd": int(self.get_render_attribute("endFrame", + layer=layer_name)), + "byFrameStep": int( + self.get_render_attribute("byFrameStep", + layer=layer_name)), + "renderer": self.get_render_attribute("currentRenderer", + layer=layer_name), + "handleStart": context.data["assetEntity"]['data']['handleStart'], + "handleEnd": context.data["assetEntity"]['data']['handleEnd'], + + # instance subset + "family": "renderlayer", + "families": ["renderlayer"], + "asset": asset, + "time": api.time(), + "author": context.data["user"], + + # Add source to allow tracing back to the scene from + # which was submitted originally + "source": filepath, + "expectedFiles": full_exp_files, + "resolutionWidth": cmds.getAttr("defaultResolution.width"), + "resolutionHeight": cmds.getAttr("defaultResolution.height"), + "pixelAspect": cmds.getAttr("defaultResolution.height") + } + + # Apply each user defined attribute as data + for attr in cmds.listAttr(layer, userDefined=True) or list(): + try: + value = cmds.getAttr("{}.{}".format(layer, attr)) + except Exception: + # Some attributes cannot be read directly, + # such as mesh and color attributes. These + # are considered non-essential to this + # particular publishing pipeline. + value = None + + data[attr] = value + + # Include (optional) global settings + # Get global overrides and translate to Deadline values + overrides = self.parse_options(str(render_globals)) + data.update(**overrides) + + # Define nice label + label = "{0} ({1})".format(expected_layer_name, data["asset"]) + label += " [{0}-{1}]".format(int(data["frameStart"]), + int(data["frameEnd"])) + + instance = context.create_instance(expected_layer_name) + instance.data["label"] = label + instance.data.update(data) + pass + + def parse_options(self, render_globals): + """Get all overrides with a value, skip those without + + Here's the kicker. These globals override defaults in the submission + integrator, but an empty value means no overriding is made. + Otherwise, Frames would override the default frames set under globals. + + Args: + render_globals (str): collection of render globals + + Returns: + dict: only overrides with values + """ + + attributes = maya.read(render_globals) + + options = {"renderGlobals": {}} + options["renderGlobals"]["Priority"] = attributes["priority"] + + # Check for specific pools + pool_a, pool_b = self._discover_pools(attributes) + options["renderGlobals"].update({"Pool": pool_a}) + if pool_b: + options["renderGlobals"].update({"SecondaryPool": pool_b}) + + # Machine list + machine_list = attributes["machineList"] + if machine_list: + key = "Whitelist" if attributes["whitelist"] else "Blacklist" + options['renderGlobals'][key] = machine_list + + # Suspend publish job + state = "Suspended" if attributes["suspendPublishJob"] else "Active" + options["publishJobState"] = state + + chunksize = attributes.get("framesPerTask", 1) + options["renderGlobals"]["ChunkSize"] = chunksize + + # Override frames should be False if extendFrames is False. This is + # to ensure it doesn't go off doing crazy unpredictable things + override_frames = False + extend_frames = attributes.get("extendFrames", False) + if extend_frames: + override_frames = attributes.get("overrideExistingFrame", False) + + options["extendFrames"] = extend_frames + options["overrideExistingFrame"] = override_frames + + maya_render_plugin = "MayaBatch" + if not attributes.get("useMayaBatch", True): + maya_render_plugin = "MayaCmd" + + options["mayaRenderPlugin"] = maya_render_plugin + + return options + + def _discover_pools(self, attributes): + + pool_a = None + pool_b = None + + # Check for specific pools + pool_b = [] + if "primaryPool" in attributes: + pool_a = attributes["primaryPool"] + if "secondaryPool" in attributes: + pool_b = attributes["secondaryPool"] + + else: + # Backwards compatibility + pool_str = attributes.get("pools", None) + if pool_str: + pool_a, pool_b = pool_str.split(";") + + # Ensure empty entry token is caught + if pool_b == "-": + pool_b = None + + return pool_a, pool_b + + def _get_overrides(self, layer): + rset = self.maya_layers[layer].renderSettingsCollectionInstance() + return rset.getOverrides() + + def get_render_attribute(self, attr, layer): + return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr), + layer=layer) + + +class ExpectedFiles: + + def get(self, renderer, layer): + if renderer.lower() == 'arnold': + return ExpectedFilesArnold(layer).get_files() + elif renderer.lower() == 'vray': + return ExpectedFilesVray(layer).get_files() + elif renderer.lower() == 'redshift': + return ExpectedFilesRedshift(layer).get_files() + elif renderer.lower() == 'mentalray': + return ExpectedFilesMentalray(layer).get_files() + elif renderer.lower() == 'renderman': + return ExpectedFilesRenderman(layer).get_files() + else: + raise UnsupportedRendererException( + "unsupported {}".format(renderer)) + + +@six.add_metaclass(ABCMeta) +class AExpectedFiles: + renderer = None + layer = None + + def __init__(self, layer): + self.layer = layer + + @abstractmethod + def get_aovs(self): + pass + + def get_renderer_prefix(self): + try: + file_prefix = cmds.getAttr(ImagePrefixes[self.renderer]) + except KeyError: + raise UnsupportedRendererException( + "Unsupported renderer {}".format(self.renderer)) + return file_prefix + + def _get_layer_data(self): + # ______________________________________________ + # ____________________/ ____________________________________________/ + # 1 - get scene name /__________________/ + # ____________________/ + scene_dir, scene_basename = os.path.split(cmds.file(q=True, loc=True)) + scene_name, _ = os.path.splitext(scene_basename) + + # ______________________________________________ + # ____________________/ ____________________________________________/ + # 2 - detect renderer /__________________/ + # ____________________/ + renderer = self.renderer + + # ________________________________________________ + # __________________/ ______________________________________________/ + # 3 - image prefix /__________________/ + # __________________/ + file_prefix = self.get_renderer_prefix() + + if not file_prefix: + raise RuntimeError("Image prefix not set") + + default_ext = cmds.getAttr('defaultRenderGlobals.imfPluginKey') + + # ________________________________________________ + # __________________/ ______________________________________________/ + # 4 - get renderable cameras_____________/ + # __________________/ + + # if we have token in prefix path we'll expect output for + # every renderable camera in layer. + + renderable_cameras = self.get_renderable_cameras() + # ________________________________________________ + # __________________/ ______________________________________________/ + # 5 - get AOVs /____________________/ + # __________________/ + + enabled_aovs = self.get_aovs() + + layer_name = self.layer + if self.layer.startswith("rs_"): + layer_name = self.layer[3:] + start_frame = int(self.get_render_attribute('startFrame')) + end_frame = int(self.get_render_attribute('endFrame')) + frame_step = int(self.get_render_attribute('byFrameStep')) + padding = int(self.get_render_attribute('extensionPadding')) + + scene_data = { + "frameStart": start_frame, + "frameEnd": end_frame, + "frameStep": frame_step, + "padding": padding, + "cameras": renderable_cameras, + "sceneName": scene_name, + "layerName": layer_name, + "renderer": renderer, + "defaultExt": default_ext, + "filePrefix": file_prefix, + "enabledAOVs": enabled_aovs + } + return scene_data + + def _generate_single_file_sequence(self, layer_data): + expected_files = [] + file_prefix = layer_data["filePrefix"] + for cam in layer_data["cameras"]: + mappings = ( + (R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]), + (R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]), + (R_SUBSTITUTE_CAMERA_TOKEN, cam), + # this is required to remove unfilled aov token, for example + # in Redshift + (R_REMOVE_AOV_TOKEN, ""), + (R_CLEAN_FRAME_TOKEN, ""), + (R_CLEAN_EXT_TOKEN, "") + ) + + for regex, value in mappings: + file_prefix = re.sub(regex, value, file_prefix) + + for frame in range( + int(layer_data["frameStart"]), + int(layer_data["frameEnd"]) + 1, + int(layer_data["frameStep"])): + expected_files.append( + '{}.{}.{}'.format(file_prefix, + str(frame).rjust( + layer_data["padding"], "0"), + layer_data["defaultExt"])) + return expected_files + + def _generate_aov_file_sequences(self, layer_data): + expected_files = [] + aov_file_list = {} + file_prefix = layer_data["filePrefix"] + for aov in layer_data["enabledAOVs"]: + for cam in layer_data["cameras"]: + + mappings = ( + (R_SUBSTITUTE_SCENE_TOKEN, layer_data["sceneName"]), + (R_SUBSTITUTE_LAYER_TOKEN, layer_data["layerName"]), + (R_SUBSTITUTE_CAMERA_TOKEN, cam), + (R_SUBSTITUTE_AOV_TOKEN, aov[0]), + (R_CLEAN_FRAME_TOKEN, ""), + (R_CLEAN_EXT_TOKEN, "") + ) + + for regex, value in mappings: + file_prefix = re.sub(regex, value, file_prefix) + + aov_files = [] + for frame in range( + int(layer_data["frameStart"]), + int(layer_data["frameEnd"]) + 1, + int(layer_data["frameStep"])): + aov_files.append( + '{}.{}.{}'.format( + file_prefix, + str(frame).rjust(layer_data["padding"], "0"), + aov[1])) + + # if we have more then one renderable camera, append + # camera name to AOV to allow per camera AOVs. + aov_name = aov[0] + if len(layer_data["cameras"]) > 1: + aov_name = "{}_{}".format(aov[0], cam) + + aov_file_list[aov_name] = aov_files + file_prefix = layer_data["filePrefix"] + + expected_files.append(aov_file_list) + return expected_files + + def get_files(self): + """ + This method will return list of expected files. + + It will translate render token strings ('', etc.) to + their values. This task is tricky as every renderer deals with this + differently. It depends on `get_aovs()` abstract method implemented + for every supported renderer. + """ + layer_data = self._get_layer_data() + + expected_files = [] + if layer_data.get("enabledAOVs"): + expected_files = self._generate_aov_file_sequences(layer_data) + else: + expected_files = self._generate_single_file_sequence(layer_data) + + return expected_files + + def get_renderable_cameras(self): + cam_parents = [cmds.listRelatives(x, ap=True)[-1] + for x in cmds.ls(cameras=True)] + + renderable_cameras = [] + for cam in cam_parents: + renderable = False + if self.maya_is_true(cmds.getAttr('{}.renderable'.format(cam))): + renderable = True + + for override in self.get_layer_overrides( + '{}.renderable'.format(cam), self.layer): + renderable = self.maya_is_true(override) + + if renderable: + renderable_cameras.append(cam) + return renderable_cameras + + def maya_is_true(self, attr_val): + """ + Whether a Maya attr evaluates to True. + When querying an attribute value from an ambiguous object the + Maya API will return a list of values, which need to be properly + handled to evaluate properly. + """ + if isinstance(attr_val, types.BooleanType): + return attr_val + elif isinstance(attr_val, (types.ListType, types.GeneratorType)): + return any(attr_val) + else: + return bool(attr_val) + + def get_layer_overrides(self, attr, layer): + connections = cmds.listConnections(attr, plugs=True) + if connections: + for connection in connections: + if connection: + node_name = connection.split('.')[0] + if cmds.nodeType(node_name) == 'renderLayer': + attr_name = '%s.value' % '.'.join( + connection.split('.')[:-1]) + if node_name == layer: + yield cmds.getAttr(attr_name) + + def get_render_attribute(self, attr): + return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr), + layer=self.layer) + + +class ExpectedFilesArnold(AExpectedFiles): + + # Arnold AOV driver extension mapping + # Is there a better way? + aiDriverExtension = { + 'jpeg': 'jpg', + 'exr': 'exr', + 'deepexr': 'exr', + 'png': 'png', + 'tiff': 'tif', + 'mtoa_shaders': 'ass', # TODO: research what those last two should be + 'maya': '' + } + + def __init__(self, layer): + super(ExpectedFilesArnold, self).__init__(layer) + self.renderer = 'arnold' + + def get_aovs(self): + enabled_aovs = [] + try: + if not (cmds.getAttr('defaultArnoldRenderOptions.aovMode') + and not cmds.getAttr('defaultArnoldDriver.mergeAOVs')): + # AOVs are merged in mutli-channel file + return enabled_aovs + except ValueError: + # this occurs when Render Setting windows was not opened yet. In + # such case there are no Arnold options created so query for AOVs + # will fail. We terminate here as there are no AOVs specified then. + # This state will most probably fail later on some Validator + # anyway. + return enabled_aovs + + # AOVs are set to be rendered separately. We should expect + # token in path. + + ai_aovs = [n for n in cmds.ls(type='aiAOV')] + + for aov in ai_aovs: + enabled = self.maya_is_true( + cmds.getAttr('{}.enabled'.format(aov))) + ai_driver = cmds.listConnections( + '{}.outputs'.format(aov))[0] + ai_translator = cmds.getAttr( + '{}.aiTranslator'.format(ai_driver)) + try: + aov_ext = self.aiDriverExtension[ai_translator] + except KeyError: + msg = ('Unrecognized arnold ' + 'driver format for AOV - {}').format( + cmds.getAttr('{}.name'.format(aov)) + ) + raise AOVError(msg) + + for override in self.get_layer_overrides( + '{}.enabled'.format(aov), self.layer): + enabled = self.maya_is_true(override) + if enabled: + # If aov RGBA is selected, arnold will translate it to `beauty` + aov_name = cmds.getAttr('%s.name' % aov) + if aov_name == 'RGBA': + aov_name = 'beauty' + enabled_aovs.append( + ( + aov_name, + aov_ext + ) + ) + # Append 'beauty' as this is arnolds + # default. If token is specified and no AOVs are + # defined, this will be used. + enabled_aovs.append( + ( + u'beauty', + cmds.getAttr('defaultRenderGlobals.imfPluginKey') + ) + ) + return enabled_aovs + + +class ExpectedFilesVray(AExpectedFiles): + + # V-ray file extension mapping + # 5 - exr + # 6 - multichannel exr + # 13 - deep exr + + def __init__(self, layer): + super(ExpectedFilesVray, self).__init__(layer) + self.renderer = 'vray' + + def get_renderer_prefix(self): + prefix = super(ExpectedFilesVray, self).get_renderer_prefix() + prefix = "{}_".format(prefix) + return prefix + + def get_files(self): + expected_files = super(ExpectedFilesVray, self).get_files() + + # we need to add one sequence for plain beauty if AOVs are enabled. + # as vray output beauty without 'beauty' in filename. + + layer_data = self._get_layer_data() + if layer_data.get("enabledAOVs"): + expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501 + + return expected_files + + def get_aovs(self): + enabled_aovs = [] + + try: + # really? do we set it in vray just by selecting multichannel exr? + if cmds.getAttr( + "vraySettings.imageFormatStr") == "exr (multichannel)": + # AOVs are merged in mutli-channel file + return enabled_aovs + except ValueError: + # this occurs when Render Setting windows was not opened yet. In + # such case there are no Arnold options created so query for AOVs + # will fail. We terminate here as there are no AOVs specified then. + # This state will most probably fail later on some Validator + # anyway. + return enabled_aovs + + default_ext = cmds.getAttr('vraySettings.imageFormatStr') + if default_ext == "exr (multichannel)" or default_ext == "exr (deep)": + default_ext = "exr" + + vr_aovs = [n for n in cmds.ls( + type=["VRayRenderElement", "VRayRenderElementSet"])] + + # todo: find out how to detect multichannel exr for vray + for aov in vr_aovs: + enabled = self.maya_is_true( + cmds.getAttr('{}.enabled'.format(aov))) + for override in self.get_layer_overrides( + '{}.enabled'.format(aov), 'rs_{}'.format(self.layer)): + enabled = self.maya_is_true(override) + + if enabled: + # todo: find how vray set format for AOVs + enabled_aovs.append( + ( + self._get_vray_aov_name(aov), + default_ext) + ) + return enabled_aovs + + def _get_vray_aov_name(self, node): + + # Get render element pass type + vray_node_attr = next(attr for attr in cmds.listAttr(node) + if attr.startswith("vray_name")) + pass_type = vray_node_attr.rsplit("_", 1)[-1] + + # Support V-Ray extratex explicit name (if set by user) + if pass_type == "extratex": + explicit_attr = "{}.vray_explicit_name_extratex".format(node) + explicit_name = cmds.getAttr(explicit_attr) + if explicit_name: + return explicit_name + + # Node type is in the attribute name but we need to check if value + # of the attribute as it can be changed + return cmds.getAttr("{}.{}".format(node, vray_node_attr)) + + +class ExpectedFilesRedshift(AExpectedFiles): + + # mapping redshift extension dropdown values to strings + ext_mapping = ['iff', 'exr', 'tif', 'png', 'tga', 'jpg'] + + def __init__(self, layer): + super(ExpectedFilesRedshift, self).__init__(layer) + self.renderer = 'redshift' + + def get_renderer_prefix(self): + prefix = super(ExpectedFilesRedshift, self).get_renderer_prefix() + prefix = "{}_".format(prefix) + return prefix + + def get_files(self): + expected_files = super(ExpectedFilesRedshift, self).get_files() + + # we need to add one sequence for plain beauty if AOVs are enabled. + # as redshift output beauty without 'beauty' in filename. + + layer_data = self._get_layer_data() + if layer_data.get("enabledAOVs"): + expected_files[0][u"beauty"] = self._generate_single_file_sequence(layer_data) # noqa: E501 + + return expected_files + + def get_aovs(self): + enabled_aovs = [] + + try: + if self.maya_is_true( + cmds.getAttr("redshiftOptions.exrForceMultilayer")): + # AOVs are merged in mutli-channel file + return enabled_aovs + except ValueError: + # this occurs when Render Setting windows was not opened yet. In + # such case there are no Arnold options created so query for AOVs + # will fail. We terminate here as there are no AOVs specified then. + # This state will most probably fail later on some Validator + # anyway. + return enabled_aovs + + default_ext = self.ext_mapping[ + cmds.getAttr('redshiftOptions.imageFormat') + ] + rs_aovs = [n for n in cmds.ls(type='RedshiftAOV')] + + # todo: find out how to detect multichannel exr for redshift + for aov in rs_aovs: + enabled = self.maya_is_true( + cmds.getAttr('{}.enabled'.format(aov))) + for override in self.get_layer_overrides( + '{}.enabled'.format(aov), self.layer): + enabled = self.maya_is_true(override) + + if enabled: + enabled_aovs.append( + ( + cmds.getAttr('%s.name' % aov), + default_ext + ) + ) + + return enabled_aovs + + +class ExpectedFilesRenderman(AExpectedFiles): + + def __init__(self, layer): + super(ExpectedFilesRenderman, self).__init__(layer) + self.renderer = 'renderman' + + def get_aovs(self): + enabled_aovs = [] + + default_ext = "exr" + displays = cmds.listConnections("rmanGlobals.displays") + for aov in displays: + aov_name = str(aov) + if aov_name == "rmanDefaultDisplay": + aov_name = "beauty" + + enabled = self.maya_is_true( + cmds.getAttr("{}.enable".format(aov))) + for override in self.get_layer_overrides( + '{}.enable'.format(aov), self.layer): + enabled = self.maya_is_true(override) + + if enabled: + enabled_aovs.append( + ( + aov_name, + default_ext + ) + ) + + return enabled_aovs + + def get_files(self): + """ + In renderman we hack it with prepending path. This path would + normally be translated from `rmanGlobals.imageOutputDir`. We skip + this and harcode prepend path we expect. There is no place for user + to mess around with this settings anyway and it is enforced in + render settings validator. + """ + layer_data = self._get_layer_data() + new_aovs = {} + + expected_files = super(ExpectedFilesRenderman, self).get_files() + # we always get beauty + for aov, files in expected_files[0].items(): + new_files = [] + for file in files: + new_file = "{}/{}/{}".format(layer_data["sceneName"], + layer_data["layerName"], + file) + new_files.append(new_file) + new_aovs[aov] = new_files + + return [new_aovs] + + +class ExpectedFilesMentalray(AExpectedFiles): + + def __init__(self, layer): + raise UnimplementedRendererException('Mentalray not implemented') + + def get_aovs(self): + return [] + + +class AOVError(Exception): + pass + + +class UnsupportedRendererException(Exception): + pass + + +class UnimplementedRendererException(Exception): + pass diff --git a/pype/plugins/maya/publish/collect_renderable_camera.py b/pype/plugins/maya/publish/collect_renderable_camera.py index 6b1732c3cb..13b847cee4 100644 --- a/pype/plugins/maya/publish/collect_renderable_camera.py +++ b/pype/plugins/maya/publish/collect_renderable_camera.py @@ -17,7 +17,7 @@ class CollectRenderableCamera(pyblish.api.InstancePlugin): def process(self, instance): layer = instance.data["setMembers"] - + self.log.info("layer: {}".format(layer)) cameras = cmds.ls(type="camera", long=True) renderable = [c for c in cameras if lib.get_attr_in_layer("%s.renderable" % c, layer=layer)] diff --git a/pype/plugins/maya/publish/collect_renderlayers.py b/pype/plugins/maya/publish/collect_renderlayers.py deleted file mode 100644 index 73a4d237ab..0000000000 --- a/pype/plugins/maya/publish/collect_renderlayers.py +++ /dev/null @@ -1,201 +0,0 @@ -from maya import cmds - -import pyblish.api - -from avalon import maya, api -import pype.maya.lib as lib - - -class CollectMayaRenderlayers(pyblish.api.ContextPlugin): - """Gather instances by active render layers""" - - order = pyblish.api.CollectorOrder + 0.01 - hosts = ["maya"] - label = "Render Layers" - - def process(self, context): - - asset = api.Session["AVALON_ASSET"] - filepath = context.data["currentFile"].replace("\\", "/") - - # Get render globals node - try: - render_globals = cmds.ls("renderglobalsMain")[0] - for instance in context: - self.log.debug(instance.name) - if instance.data['family'] == 'workfile': - instance.data['publish'] = True - except IndexError: - self.log.info("Skipping renderlayer collection, no " - "renderGlobalsDefault found..") - return - # Get all valid renderlayers - # This is how Maya populates the renderlayer display - rlm_attribute = "renderLayerManager.renderLayerId" - connected_layers = cmds.listConnections(rlm_attribute) or [] - valid_layers = set(connected_layers) - - # Get all renderlayers and check their state - renderlayers = [i for i in cmds.ls(type="renderLayer") if - cmds.getAttr("{}.renderable".format(i)) and not - cmds.referenceQuery(i, isNodeReferenced=True)] - - # Sort by displayOrder - def sort_by_display_order(layer): - return cmds.getAttr("%s.displayOrder" % layer) - - renderlayers = sorted(renderlayers, key=sort_by_display_order) - - for layer in renderlayers: - - # Check if layer is in valid (linked) layers - if layer not in valid_layers: - self.log.warning("%s is invalid, skipping" % layer) - continue - - if layer.endswith("defaultRenderLayer"): - continue - else: - # Remove Maya render setup prefix `rs_` - layername = layer.split("rs_", 1)[-1] - - # Get layer specific settings, might be overrides - data = { - "subset": layername, - "setMembers": layer, - "publish": True, - "frameStart": self.get_render_attribute("startFrame", - layer=layer), - "frameEnd": self.get_render_attribute("endFrame", - layer=layer), - "byFrameStep": self.get_render_attribute("byFrameStep", - layer=layer), - "renderer": self.get_render_attribute("currentRenderer", - layer=layer), - - # instance subset - "family": "Render Layers", - "families": ["renderlayer"], - "asset": asset, - "time": api.time(), - "author": context.data["user"], - - # Add source to allow tracing back to the scene from - # which was submitted originally - "source": filepath - } - - # Apply each user defined attribute as data - for attr in cmds.listAttr(layer, userDefined=True) or list(): - try: - value = cmds.getAttr("{}.{}".format(layer, attr)) - except Exception: - # Some attributes cannot be read directly, - # such as mesh and color attributes. These - # are considered non-essential to this - # particular publishing pipeline. - value = None - - data[attr] = value - - # Include (optional) global settings - # TODO(marcus): Take into account layer overrides - # Get global overrides and translate to Deadline values - overrides = self.parse_options(render_globals) - data.update(**overrides) - - # Define nice label - label = "{0} ({1})".format(layername, data["asset"]) - label += " [{0}-{1}]".format(int(data["frameStart"]), - int(data["frameEnd"])) - - instance = context.create_instance(layername) - instance.data["label"] = label - instance.data.update(data) - - def get_render_attribute(self, attr, layer): - return lib.get_attr_in_layer("defaultRenderGlobals.{}".format(attr), - layer=layer) - - def parse_options(self, render_globals): - """Get all overrides with a value, skip those without - - Here's the kicker. These globals override defaults in the submission - integrator, but an empty value means no overriding is made. - Otherwise, Frames would override the default frames set under globals. - - Args: - render_globals (str): collection of render globals - - Returns: - dict: only overrides with values - """ - - attributes = maya.read(render_globals) - - options = {"renderGlobals": {}} - options["renderGlobals"]["Priority"] = attributes["priority"] - - # Check for specific pools - pool_a, pool_b = self._discover_pools(attributes) - options["renderGlobals"].update({"Pool": pool_a}) - if pool_b: - options["renderGlobals"].update({"SecondaryPool": pool_b}) - - legacy = attributes["useLegacyRenderLayers"] - options["renderGlobals"]["UseLegacyRenderLayers"] = legacy - - # Machine list - machine_list = attributes["machineList"] - if machine_list: - key = "Whitelist" if attributes["whitelist"] else "Blacklist" - options['renderGlobals'][key] = machine_list - - # Suspend publish job - state = "Suspended" if attributes["suspendPublishJob"] else "Active" - options["publishJobState"] = state - - chunksize = attributes.get("framesPerTask", 1) - options["renderGlobals"]["ChunkSize"] = chunksize - - # Override frames should be False if extendFrames is False. This is - # to ensure it doesn't go off doing crazy unpredictable things - override_frames = False - extend_frames = attributes.get("extendFrames", False) - if extend_frames: - override_frames = attributes.get("overrideExistingFrame", False) - - options["extendFrames"] = extend_frames - options["overrideExistingFrame"] = override_frames - - maya_render_plugin = "MayaBatch" - if not attributes.get("useMayaBatch", True): - maya_render_plugin = "MayaCmd" - - options["mayaRenderPlugin"] = maya_render_plugin - - return options - - def _discover_pools(self, attributes): - - pool_a = None - pool_b = None - - # Check for specific pools - pool_b = [] - if "primaryPool" in attributes: - pool_a = attributes["primaryPool"] - if "secondaryPool" in attributes: - pool_b = attributes["secondaryPool"] - - else: - # Backwards compatibility - pool_str = attributes.get("pools", None) - if pool_str: - pool_a, pool_b = pool_str.split(";") - - # Ensure empty entry token is caught - if pool_b == "-": - pool_b = None - - return pool_a, pool_b diff --git a/pype/plugins/maya/publish/determine_future_version.py b/pype/plugins/maya/publish/determine_future_version.py new file mode 100644 index 0000000000..afa249aca2 --- /dev/null +++ b/pype/plugins/maya/publish/determine_future_version.py @@ -0,0 +1,28 @@ +import pyblish + +class DetermineFutureVersion(pyblish.api.InstancePlugin): + """ + This will determine version of subset if we want render to be attached to. + """ + label = "Determine Subset Version" + order = pyblish.api.IntegratorOrder + hosts = ["maya"] + families = ["renderlayer"] + + def process(self, instance): + context = instance.context + attach_to_subsets = [s["subset"] for s in instance.data['attachTo']] + + if not attach_to_subsets: + return + + for i in context: + if i.data["subset"] in attach_to_subsets: + # # this will get corresponding subset in attachTo list + # # so we can set version there + sub = next(item for item in instance.data['attachTo'] if item["subset"] == i.data["subset"]) # noqa: E501 + + sub["version"] = i.data.get("version", 1) + self.log.info("render will be attached to {} v{}".format( + sub["subset"], sub["version"] + )) diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py index e3fa79b1c8..bd8497152e 100644 --- a/pype/plugins/maya/publish/submit_maya_deadline.py +++ b/pype/plugins/maya/publish/submit_maya_deadline.py @@ -1,6 +1,7 @@ import os import json import getpass +import clique from maya import cmds @@ -117,6 +118,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): else: optional = True + use_published = True + def process(self, instance): DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL", @@ -125,21 +128,66 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): context = instance.context workspace = context.data["workspaceDir"] + anatomy = context.data['anatomy'] filepath = None + if self.use_published: + for i in context: + if "workfile" in i.data["families"]: + assert i.data["publish"] is True, ( + "Workfile (scene) must be published along") + template_data = i.data.get("anatomyData") + rep = i.data.get("representations")[0].get("name") + template_data["representation"] = rep + template_data["ext"] = rep + template_data["comment"] = None + anatomy_filled = anatomy.format(template_data) + template_filled = anatomy_filled["publish"]["path"] + filepath = os.path.normpath(template_filled) + self.log.info("Using published scene for render {}".format( + filepath)) + + # now we need to switch scene in expected files + # because token will now point to published + # scene file and that might differ from current one + new_scene = os.path.splitext( + os.path.basename(filepath))[0] + orig_scene = os.path.splitext( + os.path.basename(context.data["currentFile"]))[0] + exp = instance.data.get("expectedFiles") + + if isinstance(exp[0], dict): + # we have aovs and we need to iterate over them + new_exp = {} + for aov, files in exp[0].items(): + replaced_files = [] + for f in files: + replaced_files.append( + f.replace(orig_scene, new_scene) + ) + new_exp[aov] = replaced_files + instance.data["expectedFiles"] = [new_exp] + else: + new_exp = [] + for f in exp: + new_exp.append( + f.replace(orig_scene, new_scene) + ) + instance.data["expectedFiles"] = [new_exp] + self.log.info("Scene name was switched {} -> {}".format( + orig_scene, new_scene + )) + allInstances = [] for result in context.data["results"]: if (result["instance"] is not None and result["instance"] not in allInstances): allInstances.append(result["instance"]) - for inst in allInstances: - print(inst) - if inst.data['family'] == 'scene': - filepath = inst.data['destination_list'][0] - + # fallback if nothing was set if not filepath: + self.log.warning("Falling back to workfile") filepath = context.data["currentFile"] self.log.debug(filepath) @@ -150,8 +198,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): dirname = os.path.join(workspace, "renders") renderlayer = instance.data['setMembers'] # rs_beauty renderlayer_name = instance.data['subset'] # beauty - renderlayer_globals = instance.data["renderGlobals"] - legacy_layers = renderlayer_globals["UseLegacyRenderLayers"] + # renderlayer_globals = instance.data["renderGlobals"] + # legacy_layers = renderlayer_globals["UseLegacyRenderLayers"] deadline_user = context.data.get("deadlineUser", getpass.getuser()) jobname = "%s - %s" % (filename, instance.name) @@ -195,7 +243,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # Optional, enable double-click to preview rendered # frames from Deadline Monitor - "OutputFilename0": output_filename_0.replace("\\", "/"), + "OutputDirectory0": os.path.dirname(output_filename_0), + "OutputFilename0": output_filename_0.replace("\\", "/") }, "PluginInfo": { # Input @@ -211,9 +260,6 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): # Only render layers are considered renderable in this pipeline "UsingRenderLayers": True, - # Use legacy Render Layer system - "UseLegacyRenderLayers": legacy_layers, - # Render only this layer "RenderLayer": renderlayer, @@ -228,6 +274,26 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin): "AuxFiles": [] } + exp = instance.data.get("expectedFiles") + + OutputFilenames = {} + expIndex = 0 + + if isinstance(exp[0], dict): + # we have aovs and we need to iterate over them + for aov, files in exp[0].items(): + col = clique.assemble(files)[0][0] + outputFile = col.format('{head}{padding}{tail}') + payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile + OutputFilenames[expIndex] = outputFile + expIndex += 1 + else: + col = clique.assemble(files)[0][0] + outputFile = col.format('{head}{padding}{tail}') + payload['JobInfo']['OutputFilename' + str(expIndex)] = outputFile + # OutputFilenames[expIndex] = outputFile + + # We need those to pass them to pype for it to set correct context keys = [ "FTRACK_API_KEY", diff --git a/pype/plugins/maya/publish/validate_render_single_camera.py b/pype/plugins/maya/publish/validate_render_single_camera.py index b8561a69c9..51c5f64c86 100644 --- a/pype/plugins/maya/publish/validate_render_single_camera.py +++ b/pype/plugins/maya/publish/validate_render_single_camera.py @@ -1,17 +1,26 @@ +import re + import pyblish.api import pype.api import pype.maya.action +from maya import cmds + + +ImagePrefixes = { + 'mentalray': 'defaultRenderGlobals.imageFilePrefix', + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'defaultRenderGlobals.imageFilePrefix', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' +} + class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): - """Only one camera may be renderable in a layer. - - Currently the pipeline supports only a single camera per layer. - This is because when multiple cameras are rendered the output files - automatically get different names because the render token - is not in the output path. As such the output files conflict with how - our pipeline expects the output. + """Validate renderable camera count for layer and token. + Pipeline is supporting multiple renderable cameras per layer, but image + prefix must contain token. """ order = pype.api.ValidateContentsOrder @@ -21,6 +30,8 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): "vrayscene"] actions = [pype.maya.action.SelectInvalidAction] + R_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) + def process(self, instance): """Process all the cameras in the instance""" invalid = self.get_invalid(instance) @@ -31,8 +42,17 @@ class ValidateRenderSingleCamera(pyblish.api.InstancePlugin): def get_invalid(cls, instance): cameras = instance.data.get("cameras", []) + renderer = cmds.getAttr('defaultRenderGlobals.currentRenderer').lower() + # handle various renderman names + if renderer.startswith('renderman'): + renderer = 'renderman' + file_prefix = cmds.getAttr(ImagePrefixes[renderer]) if len(cameras) > 1: + if re.search(cls.R_CAMERA_TOKEN, file_prefix): + # if there is token in prefix and we have more then + # 1 camera, all is ok. + return cls.log.error("Multiple renderable cameras found for %s: %s " % (instance.data["setMembers"], cameras)) return [instance.data["setMembers"]] + cameras diff --git a/pype/plugins/maya/publish/validate_rendersettings.py b/pype/plugins/maya/publish/validate_rendersettings.py index 7bf44710e2..c98f0f8cdc 100644 --- a/pype/plugins/maya/publish/validate_rendersettings.py +++ b/pype/plugins/maya/publish/validate_rendersettings.py @@ -1,4 +1,5 @@ import os +import re from maya import cmds, mel import pymel.core as pm @@ -11,9 +12,13 @@ import pype.maya.lib as lib class ValidateRenderSettings(pyblish.api.InstancePlugin): """Validates the global render settings - * File Name Prefix must be as followed: - * vray: maya/// - * default: maya///_ + * File Name Prefix must start with: `maya/` + all other token are customizable but sane values are: + + `maya///_` + + token is supported also, usefull for multiple renderable + cameras per render layer. * Frame Padding must be: * default: 4 @@ -35,16 +40,47 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): families = ["renderlayer"] actions = [pype.api.RepairAction] + ImagePrefixes = { + 'mentalray': 'defaultRenderGlobals.imageFilePrefix', + 'vray': 'vraySettings.fileNamePrefix', + 'arnold': 'defaultRenderGlobals.imageFilePrefix', + 'renderman': 'rmanGlobals.imageFileFormat', + 'redshift': 'defaultRenderGlobals.imageFilePrefix' + } + + ImagePrefixTokens = { + + 'arnold': 'maya///_', + 'redshift': 'maya///', + 'vray': 'maya///', + 'renderman': '_..' + } + + # WARNING: There is bug? in renderman, translating token + # to something left behind mayas default image prefix. So instead + # `SceneName_v01` it translates to: + # `SceneName_v01//` that means + # for example: + # `SceneName_v01/Main/Main_`. Possible solution is to define + # custom token like to point to determined scene name. + RendermanDirPrefix = "/renders/maya//" + + R_AOV_TOKEN = re.compile( + r'%a||', re.IGNORECASE) + R_LAYER_TOKEN = re.compile( + r'%l||', re.IGNORECASE) + R_CAMERA_TOKEN = re.compile(r'%c|', re.IGNORECASE) + R_SCENE_TOKEN = re.compile(r'%s|', re.IGNORECASE) + DEFAULT_PADDING = 4 - RENDERER_PREFIX = {"vray": "maya///"} + VRAY_PREFIX = "maya///" DEFAULT_PREFIX = "maya///_" def process(self, instance): invalid = self.get_invalid(instance) - if invalid: - raise ValueError("Invalid render settings found for '%s'!" - % instance.name) + assert invalid is False, ("Invalid render settings " + "found for '{}'!".format(instance.name)) @classmethod def get_invalid(cls, instance): @@ -53,10 +89,11 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): renderer = instance.data['renderer'] layer = instance.data['setMembers'] + cameras = instance.data.get("cameras", []) # Get the node attributes for current renderer attrs = lib.RENDER_ATTRS.get(renderer, lib.RENDER_ATTRS['default']) - prefix = lib.get_attr_in_layer("{node}.{prefix}".format(**attrs), + prefix = lib.get_attr_in_layer(cls.ImagePrefixes[renderer], layer=layer) padding = lib.get_attr_in_layer("{node}.{padding}".format(**attrs), layer=layer) @@ -68,12 +105,63 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): cls.log.error("Animation needs to be enabled. Use the same " "frame for start and end to render single frame") - fname_prefix = cls.get_prefix(renderer) - - if prefix != fname_prefix: + if not prefix.lower().startswith("maya/"): invalid = True - cls.log.error("Wrong file name prefix: %s (expected: %s)" - % (prefix, fname_prefix)) + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't start with: 'maya/'".format(prefix)) + + if not re.search(cls.R_LAYER_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' or " + "'' token".format(prefix)) + + if len(cameras) > 1: + if not re.search(cls.R_CAMERA_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' token".format(prefix)) + + # renderer specific checks + if renderer == "vray": + # no vray checks implemented yet + pass + elif renderer == "redshift": + # no redshift check implemented yet + pass + elif renderer == "renderman": + file_prefix = cmds.getAttr("rmanGlobals.imageFileFormat") + dir_prefix = cmds.getAttr("rmanGlobals.imageOutputDir") + + if file_prefix.lower() != cls.ImagePrefixTokens[renderer].lower(): + invalid = True + cls.log.error("Wrong image prefix [ {} ]".format(file_prefix)) + + if dir_prefix.lower() != cls.RendermanDirPrefix.lower(): + invalid = True + cls.log.error("Wrong directory prefix [ {} ]".format( + dir_prefix)) + + else: + multichannel = cmds.getAttr("defaultArnoldDriver.mergeAOVs") + if multichannel: + if re.search(cls.R_AOV_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "You can't use '' token " + "with merge AOVs turned on".format(prefix)) + else: + if not re.search(cls.R_AOV_TOKEN, prefix): + invalid = True + cls.log.error("Wrong image prefix [ {} ] - " + "doesn't have: '' or " + "token".format(prefix)) + + # prefix check + if prefix.lower() != cls.ImagePrefixTokens[renderer].lower(): + cls.log.warning("warning: prefix differs from " + "recommended {}".format( + cls.ImagePrefixTokens[renderer])) if padding != cls.DEFAULT_PADDING: invalid = True @@ -82,21 +170,6 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): return invalid - @classmethod - def get_prefix(cls, renderer): - prefix = cls.RENDERER_PREFIX.get(renderer, cls.DEFAULT_PREFIX) - # maya.cmds and pymel.core return only default project directory and - # not the current one but only default. - output_path = os.path.join( - mel.eval("workspace -q -rd;"), pm.workspace.fileRules["images"] - ) - # Workfile paths can be configured to have host name in file path. - # In this case we want to avoid duplicate folder names. - if "maya" in output_path.lower(): - prefix = prefix.replace("maya/", "") - - return prefix - @classmethod def repair(cls, instance): @@ -108,14 +181,23 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin): render_attrs = lib.RENDER_ATTRS.get(renderer, default) # Repair prefix - node = render_attrs["node"] - prefix_attr = render_attrs["prefix"] + if renderer != "renderman": + node = render_attrs["node"] + prefix_attr = render_attrs["prefix"] - fname_prefix = cls.get_prefix(renderer) - cmds.setAttr("{}.{}".format(node, prefix_attr), - fname_prefix, type="string") + fname_prefix = cls.ImagePrefixTokens[renderer] + cmds.setAttr("{}.{}".format(node, prefix_attr), + fname_prefix, type="string") - # Repair padding - padding_attr = render_attrs["padding"] - cmds.setAttr("{}.{}".format(node, padding_attr), - cls.DEFAULT_PADDING) + # Repair padding + padding_attr = render_attrs["padding"] + cmds.setAttr("{}.{}".format(node, padding_attr), + cls.DEFAULT_PADDING) + else: + # renderman handles stuff differently + cmds.setAttr("rmanGlobals.imageFileFormat", + cls.ImagePrefixTokens[renderer], + type="string") + cmds.setAttr("rmanGlobals.imageOutputDir", + cls.RendermanDirPrefix, + type="string") diff --git a/pype/plugins/nuke/_load_unused/load_alembic b/pype/plugins/nuke/_load_unused/load_alembic deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pype/plugins/nuke/_load_unused/load_camera_abc b/pype/plugins/nuke/_load_unused/load_camera_abc deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/pype/plugins/nuke/_load_unused/load_camera_nk b/pype/plugins/nuke/_load_unused/load_camera_nk deleted file mode 100644 index 8b13789179..0000000000 --- a/pype/plugins/nuke/_load_unused/load_camera_nk +++ /dev/null @@ -1 +0,0 @@ - diff --git a/pype/plugins/nuke/_load_unused/load_still b/pype/plugins/nuke/_load_unused/load_still deleted file mode 100644 index c2aa061c5a..0000000000 --- a/pype/plugins/nuke/_load_unused/load_still +++ /dev/null @@ -1 +0,0 @@ -# usually used for mattepainting diff --git a/pype/plugins/nuke/_publish_unused/collect_render_target.py b/pype/plugins/nuke/_publish_unused/collect_render_target.py deleted file mode 100644 index 6c04414f69..0000000000 --- a/pype/plugins/nuke/_publish_unused/collect_render_target.py +++ /dev/null @@ -1,46 +0,0 @@ -import pyblish.api - - -@pyblish.api.log -class CollectRenderTarget(pyblish.api.InstancePlugin): - """Collect families for all instances""" - - order = pyblish.api.CollectorOrder + 0.2 - label = "Collect Render Target" - hosts = ["nuke", "nukeassist"] - families = ['write'] - - def process(self, instance): - - node = instance[0] - - self.log.info('processing {}'.format(node)) - - families = [] - if instance.data.get('families'): - families += instance.data['families'] - - # set for ftrack to accept - # instance.data["families"] = ["ftrack"] - - if node["render"].value(): - # dealing with local/farm rendering - if node["render_farm"].value(): - families.append("render.farm") - else: - families.append("render.local") - else: - families.append("render.frames") - # to ignore staging dir op in integrate - instance.data['transfer'] = False - - families.append('ftrack') - - instance.data["families"] = families - - # Sort/grouped by family (preserving local index) - instance.context[:] = sorted(instance.context, key=self.sort_by_family) - - def sort_by_family(self, instance): - """Sort by family""" - return instance.data.get("families", instance.data.get("family")) diff --git a/pype/plugins/nuke/_publish_unused/submit_deadline.py b/pype/plugins/nuke/_publish_unused/submit_deadline.py deleted file mode 100644 index 8b86189425..0000000000 --- a/pype/plugins/nuke/_publish_unused/submit_deadline.py +++ /dev/null @@ -1,147 +0,0 @@ -import os -import json -import getpass - -from avalon import api -from avalon.vendor import requests - -import pyblish.api - - -class NukeSubmitDeadline(pyblish.api.InstancePlugin): - # TODO: rewrite docstring to nuke - """Submit current Comp to Deadline - - Renders are submitted to a Deadline Web Service as - supplied via the environment variable DEADLINE_REST_URL - - """ - - label = "Submit to Deadline" - order = pyblish.api.IntegratorOrder - hosts = ["nuke"] - families = ["write", "render.deadline"] - - def process(self, instance): - - context = instance.context - - key = "__hasRun{}".format(self.__class__.__name__) - if context.data.get(key, False): - return - else: - context.data[key] = True - - DEADLINE_REST_URL = api.Session.get("DEADLINE_REST_URL", - "http://localhost:8082") - assert DEADLINE_REST_URL, "Requires DEADLINE_REST_URL" - - # Collect all saver instances in context that are to be rendered - write_instances = [] - for instance in context[:]: - if not self.families[0] in instance.data.get("families"): - # Allow only saver family instances - continue - - if not instance.data.get("publish", True): - # Skip inactive instances - continue - self.log.debug(instance.data["name"]) - write_instances.append(instance) - - if not write_instances: - raise RuntimeError("No instances found for Deadline submittion") - - hostVersion = int(context.data["hostVersion"]) - filepath = context.data["currentFile"] - filename = os.path.basename(filepath) - comment = context.data.get("comment", "") - deadline_user = context.data.get("deadlineUser", getpass.getuser()) - - # Documentation for keys available at: - # https://docs.thinkboxsoftware.com - # /products/deadline/8.0/1_User%20Manual/manual - # /manual-submission.html#job-info-file-options - payload = { - "JobInfo": { - # Top-level group name - "BatchName": filename, - - # Job name, as seen in Monitor - "Name": filename, - - # User, as seen in Monitor - "UserName": deadline_user, - - # Use a default submission pool for Nuke - "Pool": "nuke", - - "Plugin": "Nuke", - "Frames": "{start}-{end}".format( - start=int(instance.data["frameStart"]), - end=int(instance.data["frameEnd"]) - ), - - "Comment": comment, - }, - "PluginInfo": { - # Input - "FlowFile": filepath, - - # Mandatory for Deadline - "Version": str(hostVersion), - - # Render in high quality - "HighQuality": True, - - # Whether saver output should be checked after rendering - # is complete - "CheckOutput": True, - - # Proxy: higher numbers smaller images for faster test renders - # 1 = no proxy quality - "Proxy": 1, - }, - - # Mandatory for Deadline, may be empty - "AuxFiles": [] - } - - # Enable going to rendered frames from Deadline Monitor - for index, instance in enumerate(write_instances): - path = instance.data["path"] - folder, filename = os.path.split(path) - payload["JobInfo"]["OutputDirectory%d" % index] = folder - payload["JobInfo"]["OutputFilename%d" % index] = filename - - # Include critical variables with submission - keys = [ - # TODO: This won't work if the slaves don't have accesss to - # these paths, such as if slaves are running Linux and the - # submitter is on Windows. - "PYTHONPATH", - "NUKE_PATH" - # "OFX_PLUGIN_PATH", - ] - environment = dict({key: os.environ[key] for key in keys - if key in os.environ}, **api.Session) - - payload["JobInfo"].update({ - "EnvironmentKeyValue%d" % index: "{key}={value}".format( - key=key, - value=environment[key] - ) for index, key in enumerate(environment) - }) - - self.log.info("Submitting..") - self.log.info(json.dumps(payload, indent=4, sort_keys=True)) - - # E.g. http://192.168.0.1:8082/api/jobs - url = "{}/api/jobs".format(DEADLINE_REST_URL) - response = requests.post(url, json=payload) - if not response.ok: - raise Exception(response.text) - - # Store the response for dependent job submission plug-ins - for instance in write_instances: - instance.data["deadlineSubmissionJob"] = response.json() diff --git a/pype/plugins/nuke/_publish_unused/test_instances.py b/pype/plugins/nuke/_publish_unused/test_instances.py deleted file mode 100644 index e3fcc4b8f1..0000000000 --- a/pype/plugins/nuke/_publish_unused/test_instances.py +++ /dev/null @@ -1,24 +0,0 @@ -import pyblish.api - - -class IncrementTestPlugin(pyblish.api.ContextPlugin): - """Increment current script version.""" - - order = pyblish.api.CollectorOrder + 0.5 - label = "Test Plugin" - hosts = ['nuke'] - - def process(self, context): - instances = context[:] - - prerender_check = list() - families_check = list() - for instance in instances: - if ("prerender" in str(instance)): - prerender_check.append(instance) - if instance.data.get("families", None): - families_check.append(True) - - if len(prerender_check) != len(families_check): - self.log.info(prerender_check) - self.log.info(families_check) diff --git a/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py b/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py deleted file mode 100644 index 441658297d..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py +++ /dev/null @@ -1,68 +0,0 @@ -import nuke -import os -import pyblish.api -from avalon import io -# TODO: add repair function - - -@pyblish.api.log -class ValidateSettingsNuke(pyblish.api.Validator): - """ Validates settings """ - - families = ['scene'] - hosts = ['nuke'] - optional = True - label = 'Settings' - - def process(self, instance): - - asset = io.find_one({"name": os.environ['AVALON_ASSET']}) - try: - avalon_resolution = asset["data"].get("resolution", '') - avalon_pixel_aspect = asset["data"].get("pixelAspect", '') - avalon_fps = asset["data"].get("fps", '') - avalon_first = asset["data"].get("frameStart", '') - avalon_last = asset["data"].get("frameEnd", '') - avalon_crop = asset["data"].get("crop", '') - except KeyError: - print( - "No resolution information found for \"{0}\".".format( - asset["name"] - ) - ) - return - - # validating first frame - local_first = nuke.root()['first_frame'].value() - msg = 'First frame is incorrect.' - msg += '\n\nLocal first: %s' % local_first - msg += '\n\nOnline first: %s' % avalon_first - assert local_first == avalon_first, msg - - # validating last frame - local_last = nuke.root()['last_frame'].value() - msg = 'Last frame is incorrect.' - msg += '\n\nLocal last: %s' % local_last - msg += '\n\nOnline last: %s' % avalon_last - assert local_last == avalon_last, msg - - # validating fps - local_fps = nuke.root()['fps'].value() - msg = 'FPS is incorrect.' - msg += '\n\nLocal fps: %s' % local_fps - msg += '\n\nOnline fps: %s' % avalon_fps - assert local_fps == avalon_fps, msg - - # validating resolution width - local_width = nuke.root().format().width() - msg = 'Width is incorrect.' - msg += '\n\nLocal width: %s' % local_width - msg += '\n\nOnline width: %s' % avalon_resolution[0] - assert local_width == avalon_resolution[0], msg - - # validating resolution width - local_height = nuke.root().format().height() - msg = 'Height is incorrect.' - msg += '\n\nLocal height: %s' % local_height - msg += '\n\nOnline height: %s' % avalon_resolution[1] - assert local_height == avalon_resolution[1], msg diff --git a/pype/plugins/nuke/_publish_unused/validate_proxy_mode.py b/pype/plugins/nuke/_publish_unused/validate_proxy_mode.py deleted file mode 100644 index a82fb16f31..0000000000 --- a/pype/plugins/nuke/_publish_unused/validate_proxy_mode.py +++ /dev/null @@ -1,33 +0,0 @@ -import nuke - -import pyblish.api - - -class RepairNukeProxyModeAction(pyblish.api.Action): - - label = "Repair" - icon = "wrench" - on = "failed" - - def process(self, context, plugin): - - nuke.root()["proxy"].setValue(0) - - -class ValidateNukeProxyMode(pyblish.api.ContextPlugin): - """Validates against having proxy mode on.""" - - order = pyblish.api.ValidatorOrder - optional = True - label = "Proxy Mode" - actions = [RepairNukeProxyModeAction] - hosts = ["nuke", "nukeassist"] - # targets = ["default", "process"] - - def process(self, context): - - msg = ( - "Proxy mode is not supported. Please disable Proxy Mode in the " - "Project settings." - ) - assert not nuke.root()["proxy"].getValue(), msg diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py index 77346a82a4..88e65156cb 100644 --- a/pype/plugins/nuke/load/load_mov.py +++ b/pype/plugins/nuke/load/load_mov.py @@ -112,6 +112,7 @@ class LoadMov(api.Loader): ) version = context['version'] version_data = version.get("data", {}) + repr_id = context["representation"]["_id"] orig_first = version_data.get("frameStart") orig_last = version_data.get("frameEnd") @@ -120,12 +121,16 @@ class LoadMov(api.Loader): first = orig_first - diff last = orig_last - diff - handle_start = version_data.get("handleStart") - handle_end = version_data.get("handleEnd") + handle_start = version_data.get("handleStart", 0) + handle_end = version_data.get("handleEnd", 0) colorspace = version_data.get("colorspace") repr_cont = context["representation"]["context"] + self.log.debug( + "Representation id `{}` ".format(repr_id)) + + context["representation"]["_id"] # create handles offset (only to last, because of mov) last += handle_start + handle_end # offset should be with handles so it match orig frame range @@ -138,7 +143,6 @@ class LoadMov(api.Loader): file = self.fname if not file: - repr_id = context["representation"]["_id"] self.log.warning( "Representation id `{}` is failing to load".format(repr_id)) return diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py index db77c53aff..690f074c3f 100644 --- a/pype/plugins/nuke/load/load_sequence.py +++ b/pype/plugins/nuke/load/load_sequence.py @@ -86,8 +86,11 @@ class LoadSequence(api.Loader): version = context['version'] version_data = version.get("data", {}) - + repr_id = context["representation"]["_id"] + self.log.info("version_data: {}\n".format(version_data)) + self.log.debug( + "Representation id `{}` ".format(repr_id)) self.first_frame = int(nuke.root()["first_frame"].getValue()) self.handle_start = version_data.get("handleStart", 0) diff --git a/pype/plugins/nuke/publish/collect_script_version.py b/pype/plugins/nuke/publish/collect_script_version.py deleted file mode 100644 index 9a6b5bf572..0000000000 --- a/pype/plugins/nuke/publish/collect_script_version.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import pype.api as pype -import pyblish.api - - -class CollectScriptVersion(pyblish. api.ContextPlugin): - """Collect Script Version.""" - - order = pyblish.api.CollectorOrder - label = "Collect Script Version" - hosts = [ - "nuke", - "nukeassist" - ] - - def process(self, context): - file_path = context.data["currentFile"] - base_name = os.path.basename(file_path) - # get version string - version = pype.get_version_from_path(base_name) - - context.data['version'] = version diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py index c29f676ef7..993b8574f5 100644 --- a/pype/plugins/nuke/publish/collect_writes.py +++ b/pype/plugins/nuke/publish/collect_writes.py @@ -41,6 +41,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): handle_end = instance.context.data["handleEnd"] first_frame = int(nuke.root()["first_frame"].getValue()) last_frame = int(nuke.root()["last_frame"].getValue()) + frame_length = int( + last_frame - first_frame + 1 + ) if node["use_limit"].getValue(): handles = 0 @@ -52,9 +55,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): output_dir = os.path.dirname(path) self.log.debug('output dir: {}'.format(output_dir)) - # # get version to instance for integration - # instance.data['version'] = instance.context.data.get( - # "version", pype.get_version_from_path(nuke.root().name())) + # get version to instance for integration + instance.data['version'] = instance.context.data["version"] self.log.debug('Write Version: %s' % instance.data('version')) @@ -82,8 +84,26 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): collected_frames = [f for f in os.listdir(output_dir) if ext in f] if collected_frames: - representation['frameStart'] = "%0{}d".format( + collected_frames_len = len(collected_frames) + frame_start_str = "%0{}d".format( len(str(last_frame))) % first_frame + representation['frameStart'] = frame_start_str + + # in case slate is expected and not yet rendered + self.log.debug("_ frame_length: {}".format(frame_length)) + self.log.debug( + "_ collected_frames_len: {}".format( + collected_frames_len)) + # this will only run if slate frame is not already + # rendered from previews publishes + if "slate" in instance.data["families"] \ + and (frame_length == collected_frames_len): + frame_slate_str = "%0{}d".format( + len(str(last_frame))) % (first_frame - 1) + slate_frame = collected_frames[0].replace( + frame_start_str, frame_slate_str) + collected_frames.insert(0, slate_frame) + representation['files'] = collected_frames instance.data["representations"].append(representation) except Exception: @@ -112,7 +132,8 @@ class CollectNukeWrites(pyblish.api.InstancePlugin): "outputDir": output_dir, "ext": ext, "label": label, - "handles": handles, + "handleStart": handle_start, + "handleEnd": handle_end, "frameStart": first_frame, "frameEnd": last_frame, "outputType": output_type, diff --git a/pype/plugins/nuke/publish/extract_slate_frame.py b/pype/plugins/nuke/publish/extract_slate_frame.py index 4d43f38859..488f9bd31d 100644 --- a/pype/plugins/nuke/publish/extract_slate_frame.py +++ b/pype/plugins/nuke/publish/extract_slate_frame.py @@ -33,6 +33,7 @@ class ExtractSlateFrame(pype.api.Extractor): self.render_slate(instance) def render_slate(self, instance): + node_subset_name = instance.data.get("name", None) node = instance[0] # group node self.log.info("Creating staging dir...") @@ -47,6 +48,10 @@ class ExtractSlateFrame(pype.api.Extractor): self.log.info( "StagingDir `{0}`...".format(instance.data["stagingDir"])) + frame_length = int( + instance.data["frameEnd"] - instance.data["frameStart"] + 1 + ) + temporary_nodes = [] collection = instance.data.get("collection", None) @@ -56,10 +61,16 @@ class ExtractSlateFrame(pype.api.Extractor): "{head}{padding}{tail}")) fhead = collection.format("{head}") + collected_frames_len = int(len(collection.indexes)) + # get first and last frame first_frame = min(collection.indexes) - 1 - - if "slate" in instance.data["families"]: + self.log.info('frame_length: {}'.format(frame_length)) + self.log.info( + 'len(collection.indexes): {}'.format(collected_frames_len) + ) + if ("slate" in instance.data["families"]) \ + and (frame_length != collected_frames_len): first_frame += 1 last_frame = first_frame @@ -103,6 +114,8 @@ class ExtractSlateFrame(pype.api.Extractor): # Render frames nuke.execute(write_node.name(), int(first_frame), int(last_frame)) + # also render slate as sequence frame + nuke.execute(node_subset_name, int(first_frame), int(last_frame)) self.log.debug( "slate frame path: {}".format(instance.data["slateFrame"])) diff --git a/pype/plugins/nuke/publish/extract_thumbnail.py b/pype/plugins/nuke/publish/extract_thumbnail.py index 55ba34a0d4..88ea78e623 100644 --- a/pype/plugins/nuke/publish/extract_thumbnail.py +++ b/pype/plugins/nuke/publish/extract_thumbnail.py @@ -116,7 +116,7 @@ class ExtractThumbnail(pype.api.Extractor): write_node["raw"].setValue(1) write_node.setInput(0, previous_node) temporary_nodes.append(write_node) - tags = ["thumbnail"] + tags = ["thumbnail", "publish_on_farm"] # retime for first_frame = int(last_frame) / 2 diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py index 71108189c0..ee7432e241 100644 --- a/pype/plugins/nuke/publish/submit_nuke_deadline.py +++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py @@ -5,7 +5,6 @@ import getpass from avalon import api from avalon.vendor import requests import re - import pyblish.api @@ -23,6 +22,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): families = ["render.farm"] optional = True + deadline_priority = 50 + deadline_pool = "" + deadline_pool_secondary = "" + deadline_chunk_size = 1 + def process(self, instance): node = instance[0] @@ -55,7 +59,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): ) # Store output dir for unified publisher (filesequence) instance.data["deadlineSubmissionJob"] = response.json() - instance.data["publishJobState"] = "Active" + instance.data["outputDir"] = os.path.dirname( + render_path).replace("\\", "/") + instance.data["publishJobState"] = "Suspended" if instance.data.get("bakeScriptPath"): render_path = instance.data.get("bakeRenderPath") @@ -87,6 +93,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): script_name = os.path.basename(script_path) jobname = "%s - %s" % (script_name, instance.name) + output_filename_0 = self.preview_fname(render_path) + if not responce_data: responce_data = {} @@ -96,6 +104,15 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): except OSError: pass + # define chunk and priority + chunk_size = instance.data.get("deadlineChunkSize") + if chunk_size == 0: + chunk_size = self.deadline_chunk_size + + priority = instance.data.get("deadlinePriority") + if priority != 50: + priority = self.deadline_priority + payload = { "JobInfo": { # Top-level group name @@ -107,10 +124,11 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): # Arbitrary username, for visualisation in Monitor "UserName": self._deadline_user, - "Priority": instance.data["deadlinePriority"], + "Priority": priority, + "ChunkSize": chunk_size, - "Pool": "2d", - "SecondaryPool": "2d", + "Pool": self.deadline_pool, + "SecondaryPool": self.deadline_pool_secondary, "Plugin": "Nuke", "Frames": "{start}-{end}".format( @@ -119,6 +137,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): ), "Comment": self._comment, + # Optional, enable double-click to preview rendered + # frames from Deadline Monitor + "OutputFilename0": output_filename_0.replace("\\", "/") + }, "PluginInfo": { # Input @@ -220,6 +242,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): self.log.info("Submitting..") self.log.info(json.dumps(payload, indent=4, sort_keys=True)) + # adding expectied files to instance.data + self.expected_files(instance, render_path) + self.log.debug("__ expectedFiles: `{}`".format( + instance.data["expectedFiles"])) response = requests.post(self.deadline_url, json=payload) if not response.ok: @@ -240,3 +266,51 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin): "%f=%d was rounded off to nearest integer" % (value, int(value)) ) + + def preview_fname(self, path): + """Return output file path with #### for padding. + + Deadline requires the path to be formatted with # in place of numbers. + For example `/path/to/render.####.png` + + Args: + path (str): path to rendered images + + Returns: + str + + """ + self.log.debug("_ path: `{}`".format(path)) + if "%" in path: + search_results = re.search(r"(%0)(\d)(d.)", path).groups() + self.log.debug("_ search_results: `{}`".format(search_results)) + return int(search_results[1]) + if "#" in path: + self.log.debug("_ path: `{}`".format(path)) + return path + else: + return path + + def expected_files(self, + instance, + path): + """ Create expected files in instance data + """ + if not instance.data.get("expectedFiles"): + instance.data["expectedFiles"] = list() + + dir = os.path.dirname(path) + file = os.path.basename(path) + + if "#" in file: + pparts = file.split("#") + padding = "%0{}d".format(len(pparts) - 1) + file = pparts[0] + padding + pparts[-1] + + if "%" not in file: + instance.data["expectedFiles"].append(path) + return + + for i in range(self._frame_start, (self._frame_end + 1)): + instance.data["expectedFiles"].append( + os.path.join(dir, (file % i)).replace("\\", "/")) diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py index 169ea1ecb5..8a8bf3cc5e 100644 --- a/pype/plugins/nuke/publish/validate_rendered_frames.py +++ b/pype/plugins/nuke/publish/validate_rendered_frames.py @@ -76,7 +76,8 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin): 'len(collection.indexes): {}'.format(collected_frames_len) ) - if "slate" in instance.data["families"]: + if ("slate" in instance.data["families"]) \ + and (frame_length != collected_frames_len): collected_frames_len -= 1 assert (collected_frames_len == frame_length), ( diff --git a/pype/plugins/nukestudio/publish/collect_handles.py b/pype/plugins/nukestudio/publish/collect_handles.py index 28f502d846..c16f1a5803 100644 --- a/pype/plugins/nukestudio/publish/collect_handles.py +++ b/pype/plugins/nukestudio/publish/collect_handles.py @@ -55,8 +55,6 @@ class CollectClipHandles(api.ContextPlugin): # debug printing self.log.debug("_ s_asset_data: `{}`".format( s_asset_data)) - self.log.debug("_ instance.data[handles]: `{}`".format( - instance.data["handles"])) self.log.debug("_ instance.data[handleStart]: `{}`".format( instance.data["handleStart"])) self.log.debug("_ instance.data[handleEnd]: `{}`".format( diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py index 5085b9719e..5bc9bea7dd 100644 --- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py +++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py @@ -42,6 +42,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): width = int(sequence.format().width()) height = int(sequence.format().height()) pixel_aspect = sequence.format().pixelAspect() + fps = context.data["fps"] # build data for inner nukestudio project property data = { @@ -161,9 +162,10 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin): "asset": asset, "hierarchy": hierarchy, "parents": parents, - "width": width, - "height": height, + "resolutionWidth": width, + "resolutionHeight": height, "pixelAspect": pixel_aspect, + "fps": fps, "tasks": instance.data["tasks"] }) @@ -223,9 +225,12 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): instance.data["parents"] = s_asset_data["parents"] instance.data["hierarchy"] = s_asset_data["hierarchy"] instance.data["tasks"] = s_asset_data["tasks"] - instance.data["width"] = s_asset_data["width"] - instance.data["height"] = s_asset_data["height"] + instance.data["resolutionWidth"] = s_asset_data[ + "resolutionWidth"] + instance.data["resolutionHeight"] = s_asset_data[ + "resolutionHeight"] instance.data["pixelAspect"] = s_asset_data["pixelAspect"] + instance.data["fps"] = s_asset_data["fps"] # adding frame start if any on instance start_frame = s_asset_data.get("startingFrame") @@ -275,8 +280,8 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin): # adding SourceResolution if Tag was present if instance.data.get("main"): in_info['custom_attributes'].update({ - "resolutionWidth": instance.data["width"], - "resolutionHeight": instance.data["height"], + "resolutionWidth": instance.data["resolutionWidth"], + "resolutionHeight": instance.data["resolutionHeight"], "pixelAspect": instance.data["pixelAspect"] }) diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py index acdc5193ae..d08f69d4bb 100644 --- a/pype/plugins/nukestudio/publish/collect_plates.py +++ b/pype/plugins/nukestudio/publish/collect_plates.py @@ -83,7 +83,7 @@ class CollectPlates(api.InstancePlugin): class CollectPlatesData(api.InstancePlugin): """Collect plates""" - order = api.CollectorOrder + 0.495 + order = api.CollectorOrder + 0.48 label = "Collect Plates Data" hosts = ["nukestudio"] families = ["plate"] @@ -126,7 +126,7 @@ class CollectPlatesData(api.InstancePlugin): transfer_data = [ "handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", - "clipInH", "clipOutH", "asset", "track", "version", "width", "height", "pixelAspect" + "clipInH", "clipOutH", "asset", "track", "version", "resolutionWidth", "resolutionHeight", "pixelAspect", "fps" ] # pass data to version diff --git a/pype/plugins/nukestudio/publish/extract_effects.py b/pype/plugins/nukestudio/publish/extract_effects.py index a8db5826b8..5c9ee97f2b 100644 --- a/pype/plugins/nukestudio/publish/extract_effects.py +++ b/pype/plugins/nukestudio/publish/extract_effects.py @@ -196,7 +196,8 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): "asset": asset_name, "family": instance.data["family"], "subset": subset_name, - "version": version_number + "version": version_number, + "hierarchy": instance.data["hierarchy"] }) resolution_width = instance.data.get("resolutionWidth") @@ -207,9 +208,13 @@ class ExtractVideoTracksLuts(pyblish.api.InstancePlugin): if resolution_height: anatomy_data["resolution_height"] = resolution_height + pixel_aspect = instance.data.get("pixelAspect") + if pixel_aspect: + anatomy_data["pixel_aspect"] = float("{:0.2f}".format(pixel_aspect)) + fps = instance.data.get("fps") if resolution_height: - anatomy_data["fps"] = fps + anatomy_data["fps"] = float("{:0.2f}".format(fps)) instance.data["projectEntity"] = project_entity instance.data["assetEntity"] = asset_entity diff --git a/pype/plugins/standalonepublisher/publish/extract_review.py b/pype/plugins/standalonepublisher/publish/extract_review.py index f06d9bcde0..66cdcdf4df 100644 --- a/pype/plugins/standalonepublisher/publish/extract_review.py +++ b/pype/plugins/standalonepublisher/publish/extract_review.py @@ -4,6 +4,7 @@ import tempfile import pyblish.api import clique import pype.api +import pype.lib class ExtractReviewSP(pyblish.api.InstancePlugin): @@ -148,12 +149,7 @@ class ExtractReviewSP(pyblish.api.InstancePlugin): # output filename output_args.append(full_output_path) - ffmpeg_path = os.getenv("FFMPEG_PATH", "") - if ffmpeg_path: - ffmpeg_path += "/ffmpeg" - else: - ffmpeg_path = "ffmpeg" - + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") mov_args = [ ffmpeg_path, " ".join(input_args), diff --git a/pype/plugins/standalonepublisher/publish/extract_thumbnail.py b/pype/plugins/standalonepublisher/publish/extract_thumbnail.py index 69a2e0fdad..daa3936359 100644 --- a/pype/plugins/standalonepublisher/publish/extract_thumbnail.py +++ b/pype/plugins/standalonepublisher/publish/extract_thumbnail.py @@ -3,6 +3,7 @@ import tempfile import subprocess import pyblish.api import pype.api +import pype.lib class ExtractThumbnailSP(pyblish.api.InstancePlugin): @@ -73,11 +74,7 @@ class ExtractThumbnailSP(pyblish.api.InstancePlugin): config_data.get("__default__", {}) ) - ffmpeg_path = os.getenv("FFMPEG_PATH", "") - if ffmpeg_path: - ffmpeg_path += "/ffmpeg" - else: - ffmpeg_path = "ffmpeg" + ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") jpeg_items = [] jpeg_items.append(ffmpeg_path) diff --git a/pype/scripts/otio_burnin.py b/pype/scripts/otio_burnin.py index 46b2d1421c..8d0b925089 100644 --- a/pype/scripts/otio_burnin.py +++ b/pype/scripts/otio_burnin.py @@ -6,25 +6,22 @@ import json import opentimelineio_contrib.adapters.ffmpeg_burnins as ffmpeg_burnins from pypeapp.lib import config from pypeapp import Logger - +import pype.lib log = Logger().get_logger("BurninWrapper", "burninwrap") -ffmpeg_path = os.environ.get("FFMPEG_PATH") -if ffmpeg_path and os.path.exists(ffmpeg_path): - # add separator "/" or "\" to be prepared for next part - ffmpeg_path += os.path.sep -else: - ffmpeg_path = "" +ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg") +ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") + FFMPEG = ( '{} -loglevel panic -i %(input)s %(filters)s %(args)s%(output)s' -).format(os.path.normpath(ffmpeg_path + "ffmpeg")) +).format(ffmpeg_path) FFPROBE = ( '{} -v quiet -print_format json -show_format -show_streams %(source)s' -).format(os.path.normpath(ffmpeg_path + "ffprobe")) +).format(ffprobe_path) DRAWTEXT = ( "drawtext=text=\\'%(text)s\\':x=%(x)s:y=%(y)s:fontcolor=" @@ -421,6 +418,13 @@ def burnins_from_data( if not value: continue + if isinstance(value, (dict, list, tuple)): + raise TypeError(( + "Expected string or number type." + " Got: {} - \"{}\"" + " (Make sure you have new burnin presets)." + ).format(str(type(value)), str(value))) + has_timecode = TIME_CODE_KEY in value align = None diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py index 620ee3d851..fe795564a5 100644 --- a/pype/scripts/publish_filesequence.py +++ b/pype/scripts/publish_filesequence.py @@ -6,6 +6,7 @@ import argparse import logging import subprocess import platform +import json try: from shutil import which @@ -24,6 +25,18 @@ log.setLevel(logging.DEBUG) error_format = "Failed {plugin.__name__}: {error} -- {error.traceback}" +def _load_json(path): + assert os.path.isfile(path), ("path to json file doesn't exist") + data = None + with open(path, "r") as json_file: + try: + data = json.load(json_file) + except Exception as exc: + log.error( + "Error loading json: " + "{} - Exception: {}".format(path, exc) + ) + return data def __main__(): parser = argparse.ArgumentParser() @@ -77,6 +90,12 @@ def __main__(): paths = kwargs.paths or [os.environ.get("PYPE_METADATA_FILE")] or [os.getcwd()] # noqa + for path in paths: + data = _load_json(path) + log.info("Setting session using data from file") + os.environ["AVALON_PROJECT"] = data["session"]["AVALON_PROJECT"] + break + args = [ os.path.join(pype_root, pype_command), "publish", diff --git a/pype/standalonepublish/__init__.py b/pype/standalonepublish/__init__.py index c7be80f189..8e615afbea 100644 --- a/pype/standalonepublish/__init__.py +++ b/pype/standalonepublish/__init__.py @@ -1,3 +1,5 @@ +PUBLISH_PATHS = [] + from .standalonepublish_module import StandAlonePublishModule from .app import ( show, diff --git a/pype/standalonepublish/publish.py b/pype/standalonepublish/publish.py index fcbb6e137d..045b3d590e 100644 --- a/pype/standalonepublish/publish.py +++ b/pype/standalonepublish/publish.py @@ -5,14 +5,14 @@ import tempfile import random import string -from avalon import io -from avalon import api as avalon +from avalon import io, api from avalon.tools import publish as av_publish import pype from pypeapp import execute import pyblish.api +from . import PUBLISH_PATHS def set_context(project, asset, task, app): @@ -31,7 +31,6 @@ def set_context(project, asset, task, app): os.environ["AVALON_TASK"] = task io.Session["AVALON_TASK"] = task - io.install() av_project = io.find_one({'type': 'project'}) @@ -76,7 +75,7 @@ def avalon_api_publish(data, gui=True): io.install() # Create hash name folder in temp - chars = "".join( [random.choice(string.ascii_letters) for i in range(15)] ) + chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) staging_dir = tempfile.mkdtemp(chars) # create also json and fill with data @@ -105,8 +104,27 @@ def avalon_api_publish(data, gui=True): def cli_publish(data, gui=True): io.install() + pyblish.api.deregister_all_plugins() + # Registers Global pyblish plugins + pype.install() + # Registers Standalone pyblish plugins + for path in PUBLISH_PATHS: + pyblish.api.register_plugin_path(path) + + project_plugins_paths = os.environ.get("PYPE_PROJECT_PLUGINS") + project_name = os.environ["AVALON_PROJECT"] + if project_plugins_paths and project_name: + for path in project_plugins_paths.split(os.pathsep): + if not path: + continue + plugin_path = os.path.join(path, project_name, "plugins") + if os.path.exists(plugin_path): + pyblish.api.register_plugin_path(plugin_path) + api.register_plugin_path(api.Loader, plugin_path) + api.register_plugin_path(api.Creator, plugin_path) + # Create hash name folder in temp - chars = "".join( [random.choice(string.ascii_letters) for i in range(15)] ) + chars = "".join([random.choice(string.ascii_letters) for i in range(15)]) staging_dir = tempfile.mkdtemp(chars) # create json for return data diff --git a/pype/standalonepublish/standalonepublish_module.py b/pype/standalonepublish/standalonepublish_module.py index 75c033e16b..64195bc271 100644 --- a/pype/standalonepublish/standalonepublish_module.py +++ b/pype/standalonepublish/standalonepublish_module.py @@ -2,16 +2,16 @@ import os from .app import show from .widgets import QtWidgets import pype -import pyblish.api +from . import PUBLISH_PATHS class StandAlonePublishModule: - PUBLISH_PATHS = [] def __init__(self, main_parent=None, parent=None): self.main_parent = main_parent self.parent_widget = parent - self.PUBLISH_PATHS.append(os.path.sep.join( + PUBLISH_PATHS.clear() + PUBLISH_PATHS.append(os.path.sep.join( [pype.PLUGINS_DIR, "standalonepublisher", "publish"] )) @@ -24,16 +24,9 @@ class StandAlonePublishModule: def process_modules(self, modules): if "FtrackModule" in modules: - self.PUBLISH_PATHS.append(os.path.sep.join( + PUBLISH_PATHS.append(os.path.sep.join( [pype.PLUGINS_DIR, "ftrack", "publish"] )) - def tray_start(self): - # Registers Global pyblish plugins - pype.install() - # Registers Standalone pyblish plugins - for path in self.PUBLISH_PATHS: - pyblish.api.register_plugin_path(path) - def show(self): show(self.main_parent, False) diff --git a/pype/standalonepublish/widgets/widget_drop_frame.py b/pype/standalonepublish/widgets/widget_drop_frame.py index 73b9f0e179..c85105a333 100644 --- a/pype/standalonepublish/widgets/widget_drop_frame.py +++ b/pype/standalonepublish/widgets/widget_drop_frame.py @@ -4,6 +4,7 @@ import json import clique import subprocess from pypeapp import config +import pype.lib from . import QtWidgets, QtCore from . import DropEmpty, ComponentsList, ComponentItem @@ -224,12 +225,7 @@ class DropDataFrame(QtWidgets.QFrame): self._process_data(data) def load_data_with_probe(self, filepath): - ffprobe_path = os.getenv("FFMPEG_PATH", "") - if ffprobe_path: - ffprobe_path += '/ffprobe' - else: - ffprobe_path = 'ffprobe' - + ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe") args = [ ffprobe_path, '-v', 'quiet', diff --git a/pype/tools/assetcreator/model.py b/pype/tools/assetcreator/model.py index b77ffa7a5d..3af1d77127 100644 --- a/pype/tools/assetcreator/model.py +++ b/pype/tools/assetcreator/model.py @@ -241,7 +241,7 @@ class TasksModel(TreeModel): self.endResetModel() def flags(self, index): - return QtCore.Qt.ItemIsEnabled + return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable def headerData(self, section, orientation, role): diff --git a/pype/widgets/message_window.py b/pype/widgets/message_window.py index 72e655cf5c..3532d2df44 100644 --- a/pype/widgets/message_window.py +++ b/pype/widgets/message_window.py @@ -7,7 +7,7 @@ log = logging.getLogger(__name__) class Window(QtWidgets.QWidget): def __init__(self, parent, title, message, level): - super().__init__() + super(Window, self).__init__() self.parent = parent self.title = title self.message = message @@ -48,9 +48,10 @@ class Window(QtWidgets.QWidget): return -def message(title=None, message=None, level="info"): - global app - app = QtWidgets.QApplication(sys.argv) +def message(title=None, message=None, level="info", parent=None): + app = parent + if not app: + app = QtWidgets.QApplication(sys.argv) ex = Window(app, title, message, level) ex.show() # sys.exit(app.exec_()) diff --git a/setup/maya/userSetup.py b/setup/maya/userSetup.py index b419e9d27e..4f4aed36b7 100644 --- a/setup/maya/userSetup.py +++ b/setup/maya/userSetup.py @@ -14,12 +14,15 @@ shelf_preset = presets['maya'].get('project_shelf') if shelf_preset: project = os.environ["AVALON_PROJECT"] + icon_path = os.path.join(os.environ['PYPE_PROJECT_SCRIPTS'], project,"icons") + icon_path = os.path.abspath(icon_path) + for i in shelf_preset['imports']: import_string = "from {} import {}".format(project, i) print(import_string) exec(import_string) -cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], preset=shelf_preset)") +cmds.evalDeferred("mlib.shelf(name=shelf_preset['name'], iconPath=icon_path, preset=shelf_preset)") print("finished PYPE usersetup") diff --git a/setup/nuke/nuke_path/atom_server.py b/setup/nuke/nuke_path/atom_server.py deleted file mode 100644 index 1742c290c1..0000000000 --- a/setup/nuke/nuke_path/atom_server.py +++ /dev/null @@ -1,54 +0,0 @@ -''' - Simple socket server using threads -''' - -import socket -import sys -import threading -import StringIO -import contextlib - -import nuke - -HOST = '' -PORT = 8888 - - -@contextlib.contextmanager -def stdoutIO(stdout=None): - old = sys.stdout - if stdout is None: - stdout = StringIO.StringIO() - sys.stdout = stdout - yield stdout - sys.stdout = old - - -def _exec(data): - with stdoutIO() as s: - exec(data) - return s.getvalue() - - -def server_start(): - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.bind((HOST, PORT)) - s.listen(5) - - while 1: - client, address = s.accept() - try: - data = client.recv(4096) - if data: - result = nuke.executeInMainThreadWithResult(_exec, args=(data)) - client.send(str(result)) - except SystemExit: - result = self.encode('SERVER: Shutting down...') - client.send(str(result)) - raise - finally: - client.close() - -t = threading.Thread(None, server_start) -t.setDaemon(True) -t.start() diff --git a/setup/nuke/nuke_path/menu.py b/setup/nuke/nuke_path/menu.py index 7f5de6013d..15702fa364 100644 --- a/setup/nuke/nuke_path/menu.py +++ b/setup/nuke/nuke_path/menu.py @@ -1,6 +1,5 @@ import os import sys -import atom_server import KnobScripter from pype.nuke.lib import (