diff --git a/changelog.md b/changelog.md
new file mode 100644
index 0000000000..159ff0baeb
--- /dev/null
+++ b/changelog.md
@@ -0,0 +1,54 @@
+# Pype changelog #
+Welcome to pype changelog
+
+## 2.1 ##
+
+A large cleanup release. Most of the change are under the hood.
+
+**new**:
+- _(pype)_ add customisable workflow for creating quicktimes from renders or playblasts
+- _(pype)_ Added configurable option to add burnins to any generated quicktimes
+- _(ftrack)_ Action that identifies what machines pype is running on.
+- _(system)_ unify subprocess calls
+- _(maya)_ add audio to review quicktimes
+- _(nuke)_ add crop before write node to prevent overscan problems in ffmpeg
+- **Nuke Studio** publishing and workfiles support
+- **Muster** render manager support
+- _(nuke)_ Framerange, FPS and Resolution are set automatically at startup
+- _(maya)_ Ability to load published sequences as image planes
+- _(system)_ Ftrack event that sets asset folder permissions based on task assignees in ftrack.
+- _(maya)_ Pyblish plugin that allow validation of maya attributes
+- _(system)_ added better startup logging to tray debug, including basic connection information
+- _(avalon)_ option to group published subsets to groups in the loader
+- _(avalon)_ loader family filters are working now
+
+**changed**:
+- change multiple key attributes to unify their behaviour across the pipeline
+ - `frameRate` to `fps`
+ - `startFrame` to `frameStart`
+ - `endFrame` to `frameEnd`
+ - `fstart` to `frameStart`
+ - `fend` to `frameEnd`
+ - `handle_start` to `handleStart`
+ - `handle_end` to `handleEnd`
+ - `resolution_width` to `resolutionWidth`
+ - `resolution_height` to `resolutionHeight`
+ - `pixel_aspect` to `pixelAspect`
+
+- _(nuke)_ write nodes are now created inside group with only some attributes editable by the artist
+- rendered frames are now deleted from temporary location after their publishing is finished.
+- _(ftrack)_ RV action can now be launched from any entity
+- after publishing only refresh button is now available in pyblish UI
+- added context instance pyblish-lite so that artist knows if context plugin fails
+- _(avalon)_ allow opening selected files using enter key
+- _(avalon)_ core updated to v5.2.9 with our forked changes on top
+
+**fix**:
+- faster hierarchy retrieval from db
+- _(nuke)_ A lot of stability enhancements
+- _(nuke studio)_ A lot of stability enhancements
+- _(nuke)_ now only renders a single write node on farm
+- _(ftrack)_ pype would crash when launcher project level task
+- work directory was sometimes not being created correctly
+- major pype.lib cleanup. Removing of unused functions, merging those that were doing the same and general house cleaning.
+- _(avalon)_ subsets in maya 2019 weren't behaving correctly in the outliner
diff --git a/pype/__init__.py b/pype/__init__.py
index 35511eb6c1..a5858f49e7 100644
--- a/pype/__init__.py
+++ b/pype/__init__.py
@@ -7,6 +7,8 @@ from .lib import filter_pyblish_plugins
import logging
log = logging.getLogger(__name__)
+__version__ = "2.1.0"
+
PACKAGE_DIR = os.path.dirname(__file__)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
diff --git a/pype/api.py b/pype/api.py
index 1ecbe3b36a..2c227b5b4b 100644
--- a/pype/api.py
+++ b/pype/api.py
@@ -18,31 +18,20 @@ from .action import (
from pypeapp import Logger
-
-from .templates import (
- get_project_name,
- get_project_code,
- get_hierarchy,
- get_asset,
- get_task,
- set_avalon_workdir,
- get_version_from_path,
- get_workdir_template,
- set_hierarchy,
- set_project_code
-)
-
from .lib import (
version_up,
- get_handle_irregular,
- get_project_data,
- get_asset_data,
+ get_asset,
+ get_project,
+ get_hierarchy,
+ get_subsets,
+ get_version_from_path,
modified_environ,
- add_tool_to_environment,
- get_data_hierarchical_attr,
- get_avalon_project_template
+ add_tool_to_environment
)
+# Special naming case for subprocess since its a built-in method.
+from .lib import _subprocess as subprocess
+
__all__ = [
# plugin classes
"Extractor",
@@ -54,28 +43,21 @@ __all__ = [
# action
"get_errored_instances_from_context",
"RepairAction",
+ "RepairContextAction",
"Logger",
"ValidationException",
# get contextual data
- "get_handle_irregular",
- "get_project_data",
- "get_asset_data",
- "get_project_name",
- "get_project_code",
+ "version_up",
+ "get_project",
"get_hierarchy",
"get_asset",
- "get_task",
- "set_avalon_workdir",
+ "get_subsets",
"get_version_from_path",
- "get_workdir_template",
"modified_environ",
"add_tool_to_environment",
- "set_hierarchy",
- "set_project_code",
- "get_data_hierarchical_attr",
- "get_avalon_project_template",
+ "subprocess"
]
diff --git a/pype/aport/__init__.py b/pype/aport/__init__.py
index 00e14924f0..b4d9e68028 100644
--- a/pype/aport/__init__.py
+++ b/pype/aport/__init__.py
@@ -6,6 +6,7 @@ from pyblish import api as pyblish
from pypeapp import execute, Logger
from .. import api
+from .lib import set_avalon_workdir
log = Logger().get_logger(__name__, "aport")
@@ -33,7 +34,7 @@ INVENTORY_PATH = os.path.join(PLUGINS_DIR, "aport", "inventory")
def install():
- api.set_avalon_workdir()
+ set_avalon_workdir()
log.info("Registering Aport plug-ins..")
pyblish.register_plugin_path(PUBLISH_PATH)
diff --git a/pype/aport/api.py b/pype/aport/api.py
index bac3e235df..d5ac81bf63 100644
--- a/pype/aport/api.py
+++ b/pype/aport/api.py
@@ -80,17 +80,23 @@ def publish(json_data_path, gui):
@pico.expose()
-def context(project, asset, task, app):
+def context(project_name, asset, task, app):
# http://localhost:4242/pipeline/context?project=this&asset=shot01&task=comp
- os.environ["AVALON_PROJECT"] = project
+ os.environ["AVALON_PROJECT"] = project_name
+ io.Session["AVALON_PROJECT"] = project_name
avalon.update_current_task(task, asset, app)
- project_code = pype.get_project_code()
- pype.set_project_code(project_code)
+ project_code = pype.get_project()["data"].get("code", '')
+
+ os.environ["AVALON_PROJECTCODE"] = project_code
+ io.Session["AVALON_PROJECTCODE"] = project_code
+
hierarchy = pype.get_hierarchy()
- pype.set_hierarchy(hierarchy)
+ os.environ["AVALON_HIERARCHY"] = hierarchy
+ io.Session["AVALON_HIERARCHY"] = hierarchy
+
fix_paths = {k: v.replace("\\", "/") for k, v in SESSION.items()
if isinstance(v, str)}
SESSION.update(fix_paths)
diff --git a/pype/aport/lib.py b/pype/aport/lib.py
new file mode 100644
index 0000000000..62b81db88a
--- /dev/null
+++ b/pype/aport/lib.py
@@ -0,0 +1,135 @@
+import os
+import re
+import sys
+from avalon import io, api as avalon, lib as avalonlib
+from pype import lib
+from pype import api as pype
+# from pypeapp.api import (Templates, Logger, format)
+from pypeapp import Logger, Anatomy
+log = Logger().get_logger(__name__, os.getenv("AVALON_APP", "pype-config"))
+
+
+def get_asset():
+ """
+ Obtain Asset string from session or environment variable
+
+ Returns:
+ string: asset name
+
+ Raises:
+ log: error
+ """
+ lib.set_io_database()
+ asset = io.Session.get("AVALON_ASSET", None) \
+ or os.getenv("AVALON_ASSET", None)
+ log.info("asset: {}".format(asset))
+ assert asset, log.error("missing `AVALON_ASSET`"
+ "in avalon session "
+ "or os.environ!")
+ return asset
+
+
+def get_context_data(
+ project_name=None, hierarchy=None, asset=None, task_name=None
+):
+ """
+ Collect all main contextual data
+
+ Args:
+ project (string, optional): project name
+ hierarchy (string, optional): hierarchy path
+ asset (string, optional): asset name
+ task (string, optional): task name
+
+ Returns:
+ dict: contextual data
+
+ """
+ if not task_name:
+ lib.set_io_database()
+ task_name = io.Session.get("AVALON_TASK", None) \
+ or os.getenv("AVALON_TASK", None)
+ assert task_name, log.error(
+ "missing `AVALON_TASK` in avalon session or os.environ!"
+ )
+
+ application = avalonlib.get_application(os.environ["AVALON_APP_NAME"])
+
+ os.environ['AVALON_PROJECT'] = project_name
+ io.Session['AVALON_PROJECT'] = project_name
+
+ if not hierarchy:
+ hierarchy = pype.get_hierarchy()
+
+ project_doc = io.find_one({"type": "project"})
+
+ data = {
+ "task": task_name,
+ "asset": asset or get_asset(),
+ "project": {
+ "name": project_doc["name"],
+ "code": project_doc["data"].get("code", '')
+ },
+ "hierarchy": hierarchy,
+ "app": application["application_dir"]
+ }
+ return data
+
+
+def set_avalon_workdir(
+ project=None, hierarchy=None, asset=None, task=None
+):
+ """
+ Updates os.environ and session with filled workdir
+
+ Args:
+ project (string, optional): project name
+ hierarchy (string, optional): hierarchy path
+ asset (string, optional): asset name
+ task (string, optional): task name
+
+ Returns:
+ os.environ[AVALON_WORKDIR]: workdir path
+ avalon.session[AVALON_WORKDIR]: workdir path
+
+ """
+
+ lib.set_io_database()
+ awd = io.Session.get("AVALON_WORKDIR", None) or \
+ os.getenv("AVALON_WORKDIR", None)
+
+ data = get_context_data(project, hierarchy, asset, task)
+
+ if (not awd) or ("{" not in awd):
+ anatomy_filled = Anatomy(io.Session["AVALON_PROJECT"]).format(data)
+ awd = anatomy_filled["work"]["folder"]
+
+ awd_filled = os.path.normpath(format(awd, data))
+
+ io.Session["AVALON_WORKDIR"] = awd_filled
+ os.environ["AVALON_WORKDIR"] = awd_filled
+ log.info("`AVALON_WORKDIR` fixed to: {}".format(awd_filled))
+
+
+def get_workdir_template(data=None):
+ """
+ Obtain workdir templated path from Anatomy()
+
+ Args:
+ data (dict, optional): basic contextual data
+
+ Returns:
+ string: template path
+ """
+
+ anatomy = Anatomy()
+ anatomy_filled = anatomy.format(data or get_context_data())
+
+ try:
+ work = anatomy_filled["work"]
+ except Exception as e:
+ log.error(
+ "{0} Error in get_workdir_template(): {1}".format(__name__, str(e))
+ )
+
+ return work
diff --git a/pype/aport/original/api.py b/pype/aport/original/api.py
index b1fffed1dc..7f8334d426 100644
--- a/pype/aport/original/api.py
+++ b/pype/aport/original/api.py
@@ -82,13 +82,19 @@ def context(project, asset, task, app):
# http://localhost:4242/pipeline/context?project=this&asset=shot01&task=comp
os.environ["AVALON_PROJECT"] = project
+ io.Session["AVALON_PROJECT"] = project
avalon.update_current_task(task, asset, app)
- project_code = pype.get_project_code()
- pype.set_project_code(project_code)
+ project_code = pype.get_project()["data"].get("code", '')
+
+ os.environ["AVALON_PROJECTCODE"] = project_code
+ io.Session["AVALON_PROJECTCODE"] = project_code
+
hierarchy = pype.get_hierarchy()
- pype.set_hierarchy(hierarchy)
+ os.environ["AVALON_HIERARCHY"] = hierarchy
+ io.Session["AVALON_HIERARCHY"] = hierarchy
+
fix_paths = {k: v.replace("\\", "/") for k, v in SESSION.items()
if isinstance(v, str)}
SESSION.update(fix_paths)
diff --git a/pype/aport/original/pipeline.py b/pype/aport/original/pipeline.py
index 1bfd9a8d1e..1a4f268e7b 100644
--- a/pype/aport/original/pipeline.py
+++ b/pype/aport/original/pipeline.py
@@ -81,13 +81,19 @@ def context(project, asset, task, app):
# http://localhost:4242/pipeline/context?project=this&asset=shot01&task=comp
os.environ["AVALON_PROJECT"] = project
+ io.Session["AVALON_PROJECT"] = project
avalon.update_current_task(task, asset, app)
- project_code = pype.get_project_code()
- pype.set_project_code(project_code)
+ project_code = pype.get_project()["data"].get("code", '')
+
+ os.environ["AVALON_PROJECTCODE"] = project_code
+ io.Session["AVALON_PROJECTCODE"] = project_code
+
hierarchy = pype.get_hierarchy()
- pype.set_hierarchy(hierarchy)
+ os.environ["AVALON_HIERARCHY"] = hierarchy
+ io.Session["AVALON_HIERARCHY"] = hierarchy
+
fix_paths = {k: v.replace("\\", "/") for k, v in SESSION.items()
if isinstance(v, str)}
SESSION.update(fix_paths)
diff --git a/pype/clockify/__init__.py b/pype/clockify/__init__.py
index 5f61acd751..aab0d048de 100644
--- a/pype/clockify/__init__.py
+++ b/pype/clockify/__init__.py
@@ -1,9 +1,14 @@
from .clockify_api import ClockifyAPI
from .widget_settings import ClockifySettings
+from .widget_message import MessageWidget
from .clockify import ClockifyModule
__all__ = [
- 'ClockifyAPI',
- 'ClockifySettings',
- 'ClockifyModule'
+ "ClockifyAPI",
+ "ClockifySettings",
+ "ClockifyModule",
+ "MessageWidget"
]
+
+def tray_init(tray_widget, main_widget):
+ return ClockifyModule(main_widget, tray_widget)
diff --git a/pype/clockify/clockify.py b/pype/clockify/clockify.py
index 0b84bf3953..ed6d996e2e 100644
--- a/pype/clockify/clockify.py
+++ b/pype/clockify/clockify.py
@@ -1,15 +1,19 @@
+import os
import threading
-from pypeapp import style
+from pypeapp import style, Logger
from Qt import QtWidgets
-from pype.clockify import ClockifySettings, ClockifyAPI
+from . import ClockifySettings, ClockifyAPI, MessageWidget
class ClockifyModule:
def __init__(self, main_parent=None, parent=None):
+ self.log = Logger().get_logger(self.__class__.__name__, "PypeTray")
+
self.main_parent = main_parent
self.parent = parent
self.clockapi = ClockifyAPI()
+ self.message_widget = None
self.widget_settings = ClockifySettings(main_parent, self)
self.widget_settings_required = None
@@ -20,9 +24,10 @@ class ClockifyModule:
self.bool_workspace_set = False
self.bool_timer_run = False
- def start_up(self):
self.clockapi.set_master(self)
self.bool_api_key_set = self.clockapi.set_api()
+
+ def tray_start(self):
if self.bool_api_key_set is False:
self.show_settings()
return
@@ -41,7 +46,7 @@ class ClockifyModule:
os.path.dirname(__file__),
'ftrack_actions'
])
- current = os.environ('FTRACK_ACTIONS_PATH', '')
+ current = os.environ.get('FTRACK_ACTIONS_PATH', '')
if current:
current += os.pathsep
os.environ['FTRACK_ACTIONS_PATH'] = current + actions_path
@@ -57,6 +62,25 @@ class ClockifyModule:
current += os.pathsep
os.environ['AVALON_ACTIONS'] = current + actions_path
+ if 'TimersManager' in modules:
+ self.timer_manager = modules['TimersManager']
+ self.timer_manager.add_module(self)
+
+ def start_timer_manager(self, data):
+ self.start_timer(data)
+
+ def stop_timer_manager(self):
+ self.stop_timer()
+
+ def timer_started(self, data):
+ if hasattr(self, 'timer_manager'):
+ self.timer_manager.start_timers(data)
+
+ def timer_stopped(self):
+ self.bool_timer_run = False
+ if hasattr(self, 'timer_manager'):
+ self.timer_manager.stop_timers()
+
def start_timer_check(self):
self.bool_thread_check_running = True
if self.thread_timer_check is None:
@@ -75,21 +99,129 @@ class ClockifyModule:
def check_running(self):
import time
while self.bool_thread_check_running is True:
+ bool_timer_run = False
if self.clockapi.get_in_progress() is not None:
- self.bool_timer_run = True
- else:
- self.bool_timer_run = False
- self.set_menu_visibility()
+ bool_timer_run = True
+
+ if self.bool_timer_run != bool_timer_run:
+ if self.bool_timer_run is True:
+ self.timer_stopped()
+ elif self.bool_timer_run is False:
+ actual_timer = self.clockapi.get_in_progress()
+ if not actual_timer:
+ continue
+
+ actual_proj_id = actual_timer["projectId"]
+ if not actual_proj_id:
+ continue
+
+ project = self.clockapi.get_project_by_id(actual_proj_id)
+ if project and project.get("code") == 501:
+ continue
+
+ project_name = project["name"]
+
+ actual_timer_hierarchy = actual_timer["description"]
+ hierarchy_items = actual_timer_hierarchy.split("/")
+ # Each pype timer must have at least 2 items!
+ if len(hierarchy_items) < 2:
+ continue
+ task_name = hierarchy_items[-1]
+ hierarchy = hierarchy_items[:-1]
+
+ task_type = None
+ if len(actual_timer.get("tags", [])) > 0:
+ task_type = actual_timer["tags"][0].get("name")
+ data = {
+ "task_name": task_name,
+ "hierarchy": hierarchy,
+ "project_name": project_name,
+ "task_type": task_type
+ }
+
+ self.timer_started(data)
+
+ self.bool_timer_run = bool_timer_run
+ self.set_menu_visibility()
time.sleep(5)
def stop_timer(self):
self.clockapi.finish_time_entry()
- self.bool_timer_run = False
+ if self.bool_timer_run:
+ self.timer_stopped()
+
+ def signed_in(self):
+ if hasattr(self, 'timer_manager'):
+ if not self.timer_manager:
+ return
+
+ if not self.timer_manager.last_task:
+ return
+
+ if self.timer_manager.is_running:
+ self.start_timer_manager(self.timer_manager.last_task)
+
+ def start_timer(self, input_data):
+ # If not api key is not entered then skip
+ if not self.clockapi.get_api_key():
+ return
+
+ actual_timer = self.clockapi.get_in_progress()
+ actual_timer_hierarchy = None
+ actual_project_id = None
+ if actual_timer is not None:
+ actual_timer_hierarchy = actual_timer.get("description")
+ actual_project_id = actual_timer.get("projectId")
+
+ # Concatenate hierarchy and task to get description
+ desc_items = [val for val in input_data.get("hierarchy", [])]
+ desc_items.append(input_data["task_name"])
+ description = "/".join(desc_items)
+
+ # Check project existence
+ project_name = input_data["project_name"]
+ project_id = self.clockapi.get_project_id(project_name)
+ if not project_id:
+ self.log.warning((
+ "Project \"{}\" was not found in Clockify. Timer won't start."
+ ).format(project_name))
+
+ msg = (
+ "Project \"{}\" is not in Clockify Workspace \"{}\"."
+ "
Please inform your Project Manager."
+ ).format(project_name, str(self.clockapi.workspace))
+
+ self.message_widget = MessageWidget(
+ self.main_parent, msg, "Clockify - Info Message"
+ )
+ self.message_widget.closed.connect(self.message_widget)
+ self.message_widget.show()
+
+ return
+
+ if (
+ actual_timer is not None and
+ description == actual_timer_hierarchy and
+ project_id == actual_project_id
+ ):
+ return
+
+ tag_ids = []
+ task_tag_id = self.clockapi.get_tag_id(input_data["task_type"])
+ if task_tag_id is not None:
+ tag_ids.append(task_tag_id)
+
+ self.clockapi.start_time_entry(
+ description, project_id, tag_ids=tag_ids
+ )
+
+ def on_message_widget_close(self):
+ self.message_widget = None
# Definition of Tray menu
- def tray_menu(self, parent):
+ def tray_menu(self, parent_menu):
# Menu for Tray App
- self.menu = QtWidgets.QMenu('Clockify', parent)
+ self.menu = QtWidgets.QMenu('Clockify', parent_menu)
self.menu.setProperty('submenu', 'on')
self.menu.setStyleSheet(style.load_stylesheet())
@@ -109,7 +241,7 @@ class ClockifyModule:
self.set_menu_visibility()
- return self.menu
+ parent_menu.addMenu(self.menu)
def show_settings(self):
self.widget_settings.input_api_key.setText(self.clockapi.get_api_key())
diff --git a/pype/clockify/clockify_api.py b/pype/clockify/clockify_api.py
index f5ebac0cef..f012efc002 100644
--- a/pype/clockify/clockify_api.py
+++ b/pype/clockify/clockify_api.py
@@ -1,4 +1,5 @@
import os
+import re
import requests
import json
import datetime
@@ -22,7 +23,9 @@ class ClockifyAPI(metaclass=Singleton):
app_dir = os.path.normpath(appdirs.user_data_dir('pype-app', 'pype'))
file_name = 'clockify.json'
fpath = os.path.join(app_dir, file_name)
+ admin_permission_names = ['WORKSPACE_OWN', 'WORKSPACE_ADMIN']
master_parent = None
+ workspace = None
workspace_id = None
def set_master(self, master_parent):
@@ -41,6 +44,8 @@ class ClockifyAPI(metaclass=Singleton):
if api_key is not None and self.validate_api_key(api_key) is True:
self.headers["X-Api-Key"] = api_key
self.set_workspace()
+ if self.master_parent:
+ self.master_parent.signed_in()
return True
return False
@@ -55,31 +60,41 @@ class ClockifyAPI(metaclass=Singleton):
return False
return True
- def validate_workspace_perm(self):
- test_project = '__test__'
- action_url = 'workspaces/{}/projects/'.format(self.workspace_id)
- body = {
- "name": test_project, "clientId": "", "isPublic": "false",
- "estimate": {"type": "AUTO"},
- "color": "#f44336", "billable": "true"
- }
- response = requests.post(
- self.endpoint + action_url,
- headers=self.headers, json=body
+ def validate_workspace_perm(self, workspace_id=None):
+ user_id = self.get_user_id()
+ if user_id is None:
+ return False
+ if workspace_id is None:
+ workspace_id = self.workspace_id
+ action_url = "/workspaces/{}/users/{}/permissions".format(
+ workspace_id, user_id
)
- if response.status_code == 201:
- self.delete_project(self.get_project_id(test_project))
- return True
- else:
- projects = self.get_projects()
- if test_project in projects:
- try:
- self.delete_project(self.get_project_id(test_project))
- return True
- except json.decoder.JSONDecodeError:
- return False
+ response = requests.get(
+ self.endpoint + action_url,
+ headers=self.headers
+ )
+ user_permissions = response.json()
+ for perm in user_permissions:
+ if perm['name'] in self.admin_permission_names:
+ return True
return False
+ def get_user_id(self):
+ action_url = 'v1/user/'
+ response = requests.get(
+ self.endpoint + action_url,
+ headers=self.headers
+ )
+ # this regex is neccessary: UNICODE strings are crashing
+ # during json serialization
+ id_regex ='\"{1}id\"{1}\:{1}\"{1}\w+\"{1}'
+ result = re.findall(id_regex, str(response.content))
+ if len(result) != 1:
+ # replace with log and better message?
+ print('User ID was not found (this is a BUG!!!)')
+ return None
+ return json.loads('{'+result[0]+'}')['id']
+
def set_workspace(self, name=None):
if name is None:
name = os.environ.get('CLOCKIFY_WORKSPACE', None)
@@ -147,6 +162,19 @@ class ClockifyAPI(metaclass=Singleton):
project["name"]: project["id"] for project in response.json()
}
+ def get_project_by_id(self, project_id, workspace_id=None):
+ if workspace_id is None:
+ workspace_id = self.workspace_id
+ action_url = 'workspaces/{}/projects/{}/'.format(
+ workspace_id, project_id
+ )
+ response = requests.get(
+ self.endpoint + action_url,
+ headers=self.headers
+ )
+
+ return response.json()
+
def get_tags(self, workspace_id=None):
if workspace_id is None:
workspace_id = self.workspace_id
@@ -279,6 +307,9 @@ class ClockifyAPI(metaclass=Singleton):
if workspace_id is None:
workspace_id = self.workspace_id
current = self.get_in_progress(workspace_id)
+ if current is None:
+ return
+
current_id = current["id"]
action_url = 'workspaces/{}/timeEntries/{}'.format(
workspace_id, current_id
diff --git a/pype/clockify/ftrack_actions/action_clockify_start.py b/pype/clockify/ftrack_actions/action_clockify_start.py
deleted file mode 100644
index e09d0b76e6..0000000000
--- a/pype/clockify/ftrack_actions/action_clockify_start.py
+++ /dev/null
@@ -1,108 +0,0 @@
-import os
-import sys
-import argparse
-import logging
-
-from pype.vendor import ftrack_api
-from pype.ftrack import BaseAction
-from pype.clockify import ClockifyAPI
-
-
-class StartClockify(BaseAction):
- '''Starts timer on clockify.'''
-
- #: Action identifier.
- identifier = 'clockify.start.timer'
- #: Action label.
- label = 'Start timer'
- #: Action description.
- description = 'Starts timer on clockify'
- #: roles that are allowed to register this action
- icon = '{}/app_icons/clockify.png'.format(
- os.environ.get('PYPE_STATICS_SERVER', '')
- )
- #: Clockify api
- clockapi = ClockifyAPI()
-
- def discover(self, session, entities, event):
- if len(entities) != 1:
- return False
- if entities[0].entity_type.lower() != 'task':
- return False
- if self.clockapi.workspace_id is None:
- return False
- return True
-
- def launch(self, session, entities, event):
- task = entities[0]
- task_name = task['type']['name']
- project_name = task['project']['full_name']
-
- def get_parents(entity):
- output = []
- if entity.entity_type.lower() == 'project':
- return output
- output.extend(get_parents(entity['parent']))
- output.append(entity['name'])
-
- return output
-
- desc_items = get_parents(task['parent'])
- desc_items.append(task['name'])
- description = '/'.join(desc_items)
- project_id = self.clockapi.get_project_id(project_name)
- tag_ids = []
- tag_ids.append(self.clockapi.get_tag_id(task_name))
- self.clockapi.start_time_entry(
- description, project_id, tag_ids=tag_ids
- )
-
- return True
-
-
-def register(session, **kw):
- '''Register plugin. Called when used as an plugin.'''
-
- if not isinstance(session, ftrack_api.session.Session):
- return
-
- StartClockify(session).register()
-
-
-def main(arguments=None):
- '''Set up logging and register action.'''
- if arguments is None:
- arguments = []
-
- parser = argparse.ArgumentParser()
- # Allow setting of logging level from arguments.
- loggingLevels = {}
- for level in (
- logging.NOTSET, logging.DEBUG, logging.INFO, logging.WARNING,
- logging.ERROR, logging.CRITICAL
- ):
- loggingLevels[logging.getLevelName(level).lower()] = level
-
- parser.add_argument(
- '-v', '--verbosity',
- help='Set the logging output verbosity.',
- choices=loggingLevels.keys(),
- default='info'
- )
- namespace = parser.parse_args(arguments)
-
- # Set up basic logging
- logging.basicConfig(level=loggingLevels[namespace.verbosity])
-
- session = ftrack_api.Session()
- register(session)
-
- # Wait for events
- logging.info(
- 'Registered actions and listening for events. Use Ctrl-C to abort.'
- )
- session.event_hub.wait()
-
-
-if __name__ == '__main__':
- raise SystemExit(main(sys.argv[1:]))
diff --git a/pype/clockify/ftrack_actions/action_clockify_sync.py b/pype/clockify/ftrack_actions/action_clockify_sync.py
index 695f7581c0..e679894d0e 100644
--- a/pype/clockify/ftrack_actions/action_clockify_sync.py
+++ b/pype/clockify/ftrack_actions/action_clockify_sync.py
@@ -17,10 +17,8 @@ class SyncClocify(BaseAction):
label = 'Sync To Clockify'
#: Action description.
description = 'Synchronise data to Clockify workspace'
- #: priority
- priority = 100
#: roles that are allowed to register this action
- role_list = ['Pypeclub', 'Administrator']
+ role_list = ["Pypeclub", "Administrator", "project Manager"]
#: icon
icon = '{}/app_icons/clockify-white.png'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
@@ -28,16 +26,22 @@ class SyncClocify(BaseAction):
#: CLockifyApi
clockapi = ClockifyAPI()
- def register(self):
+ def preregister(self):
if self.clockapi.workspace_id is None:
- raise ValueError('Clockify Workspace or API key are not set!')
+ return "Clockify Workspace or API key are not set!"
if self.clockapi.validate_workspace_perm() is False:
raise MissingPermision('Clockify')
- super().register()
+
+ return True
def discover(self, session, entities, event):
''' Validation '''
+ if len(entities) != 1:
+ return False
+
+ if entities[0].entity_type.lower() != "project":
+ return False
return True
def launch(self, session, entities, event):
diff --git a/pype/clockify/widget_message.py b/pype/clockify/widget_message.py
new file mode 100644
index 0000000000..349875b9e5
--- /dev/null
+++ b/pype/clockify/widget_message.py
@@ -0,0 +1,91 @@
+from Qt import QtCore, QtGui, QtWidgets
+from pypeapp import style
+
+
+class MessageWidget(QtWidgets.QWidget):
+
+ SIZE_W = 300
+ SIZE_H = 130
+
+ closed = QtCore.Signal()
+
+ def __init__(self, parent=None, messages=[], title="Message"):
+
+ super(MessageWidget, self).__init__()
+
+ self._parent = parent
+
+ # Icon
+ if parent and hasattr(parent, 'icon'):
+ self.setWindowIcon(parent.icon)
+ else:
+ from pypeapp.resources import get_resource
+ self.setWindowIcon(QtGui.QIcon(get_resource('icon.png')))
+
+ self.setWindowFlags(
+ QtCore.Qt.WindowCloseButtonHint |
+ QtCore.Qt.WindowMinimizeButtonHint
+ )
+
+ # Font
+ self.font = QtGui.QFont()
+ self.font.setFamily("DejaVu Sans Condensed")
+ self.font.setPointSize(9)
+ self.font.setBold(True)
+ self.font.setWeight(50)
+ self.font.setKerning(True)
+
+ # Size setting
+ self.resize(self.SIZE_W, self.SIZE_H)
+ self.setMinimumSize(QtCore.QSize(self.SIZE_W, self.SIZE_H))
+ self.setMaximumSize(QtCore.QSize(self.SIZE_W+100, self.SIZE_H+100))
+
+ # Style
+ self.setStyleSheet(style.load_stylesheet())
+
+ self.setLayout(self._ui_layout(messages))
+ self.setWindowTitle(title)
+
+ def _ui_layout(self, messages):
+ if not messages:
+ messages = ["*Misssing messages (This is a bug)*", ]
+
+ elif not isinstance(messages, (tuple, list)):
+ messages = [messages, ]
+
+ main_layout = QtWidgets.QVBoxLayout(self)
+
+ labels = []
+ for message in messages:
+ label = QtWidgets.QLabel(message)
+ label.setFont(self.font)
+ label.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
+ label.setTextFormat(QtCore.Qt.RichText)
+ label.setWordWrap(True)
+
+ labels.append(label)
+ main_layout.addWidget(label)
+
+ btn_close = QtWidgets.QPushButton("Close")
+ btn_close.setToolTip('Close this window')
+ btn_close.clicked.connect(self.on_close_clicked)
+
+ btn_group = QtWidgets.QHBoxLayout()
+ btn_group.addStretch(1)
+ btn_group.addWidget(btn_close)
+
+ main_layout.addLayout(btn_group)
+
+ self.labels = labels
+ self.btn_group = btn_group
+ self.btn_close = btn_close
+ self.main_layout = main_layout
+
+ return main_layout
+
+ def on_close_clicked(self):
+ self.close()
+
+ def close(self, *args, **kwargs):
+ self.closed.emit()
+ super(MessageWidget, self).close(*args, **kwargs)
diff --git a/pype/ftrack/actions/action_application_loader.py b/pype/ftrack/actions/action_application_loader.py
index 1b0f48f9be..813eee358a 100644
--- a/pype/ftrack/actions/action_application_loader.py
+++ b/pype/ftrack/actions/action_application_loader.py
@@ -4,21 +4,18 @@ import time
from pype.ftrack import AppAction
from avalon import lib
from pypeapp import Logger
-from pype import lib as pypelib
+from pype.lib import get_all_avalon_projects
log = Logger().get_logger(__name__)
-def registerApp(app, session):
+def registerApp(app, session, plugins_presets):
name = app['name']
variant = ""
try:
variant = app['name'].split("_")[1]
except Exception:
- log.warning((
- '"{0}" - App "name" and "variant" is not separated by "_"'
- ' (variant is not set)'
- ).format(app['name']))
+ pass
abspath = lib.which_app(app['name'])
if abspath is None:
@@ -44,29 +41,42 @@ def registerApp(app, session):
# register action
AppAction(
session, label, name, executable, variant,
- icon, description, preactions
+ icon, description, preactions, plugins_presets
).register()
+ if not variant:
+ log.info('- Variant is not set')
-def register(session):
- projects = pypelib.get_all_avalon_projects()
+
+def register(session, plugins_presets={}):
+ # WARNING getting projects only helps to check connection to mongo
+ # - without will `discover` of ftrack apps actions take ages
+ result = get_all_avalon_projects()
apps = []
- appNames = []
- # Get all application from all projects
- for project in projects:
- for app in project['config']['apps']:
- if app['name'] not in appNames:
- appNames.append(app['name'])
- apps.append(app)
+
+ launchers_path = os.path.join(os.environ["PYPE_CONFIG"], "launchers")
+ for file in os.listdir(launchers_path):
+ filename, ext = os.path.splitext(file)
+ if ext.lower() != ".toml":
+ continue
+ loaded_data = toml.load(os.path.join(launchers_path, file))
+ app_data = {
+ "name": filename,
+ "label": loaded_data.get("label", filename)
+ }
+ apps.append(app_data)
apps = sorted(apps, key=lambda x: x['name'])
app_counter = 0
for app in apps:
try:
- registerApp(app, session)
+ registerApp(app, session, plugins_presets)
if app_counter%5 == 0:
time.sleep(0.1)
app_counter += 1
- except Exception as e:
- log.exception("'{0}' - not proper App ({1})".format(app['name'], e))
+ except Exception as exc:
+ log.exception(
+ "\"{}\" - not a proper App ({})".format(app['name'], str(exc)),
+ exc_info=True
+ )
diff --git a/pype/ftrack/actions/action_asset_delete.py b/pype/ftrack/actions/action_asset_delete.py
index 684b3862a8..654c78049b 100644
--- a/pype/ftrack/actions/action_asset_delete.py
+++ b/pype/ftrack/actions/action_asset_delete.py
@@ -78,7 +78,7 @@ class AssetDelete(BaseAction):
}
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register action. Called when used as an event plugin.'''
# Validate that session is an instance of ftrack_api.Session. If not,
@@ -87,7 +87,7 @@ def register(session, **kw):
if not isinstance(session, ftrack_api.session.Session):
return
- AssetDelete(session).register()
+ AssetDelete(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_attributes_remapper.py b/pype/ftrack/actions/action_attributes_remapper.py
new file mode 100644
index 0000000000..db33fd1365
--- /dev/null
+++ b/pype/ftrack/actions/action_attributes_remapper.py
@@ -0,0 +1,286 @@
+import os
+
+from pype.vendor import ftrack_api
+from pype.ftrack import BaseAction
+from avalon.tools.libraryloader.io_nonsingleton import DbConnector
+
+
+class AttributesRemapper(BaseAction):
+ '''Edit meta data action.'''
+
+ #: Action identifier.
+ identifier = 'attributes.remapper'
+ #: Action label.
+ label = "Pype Doctor"
+ variant = '- Attributes Remapper'
+ #: Action description.
+ description = 'Remaps attributes in avalon DB'
+
+ #: roles that are allowed to register this action
+ role_list = ["Pypeclub", "Administrator"]
+ icon = '{}/ftrack/action_icons/PypeDoctor.svg'.format(
+ os.environ.get('PYPE_STATICS_SERVER', '')
+ )
+
+ db_con = DbConnector()
+ keys_to_change = {
+ "fstart": "frameStart",
+ "startFrame": "frameStart",
+ "edit_in": "frameStart",
+
+ "fend": "frameEnd",
+ "endFrame": "frameEnd",
+ "edit_out": "frameEnd",
+
+ "handle_start": "handleStart",
+ "handle_end": "handleEnd",
+ "handles": ["handleEnd", "handleStart"],
+
+ "frameRate": "fps",
+ "framerate": "fps",
+ "resolution_width": "resolutionWidth",
+ "resolution_height": "resolutionHeight",
+ "pixel_aspect": "pixelAspect"
+ }
+
+ def discover(self, session, entities, event):
+ ''' Validation '''
+
+ return True
+
+ def interface(self, session, entities, event):
+ if event['data'].get('values', {}):
+ return
+
+ title = 'Select Projects where attributes should be remapped'
+
+ items = []
+
+ selection_enum = {
+ 'label': 'Process type',
+ 'type': 'enumerator',
+ 'name': 'process_type',
+ 'data': [
+ {
+ 'label': 'Selection',
+ 'value': 'selection'
+ }, {
+ 'label': 'Inverted selection',
+ 'value': 'except'
+ }
+ ],
+ 'value': 'selection'
+ }
+ selection_label = {
+ 'type': 'label',
+ 'value': (
+ 'Selection based variants:
'
+ '- `Selection` - '
+ 'NOTHING is processed when nothing is selected
'
+ '- `Inverted selection` - '
+ 'ALL Projects are processed when nothing is selected'
+ )
+ }
+
+ items.append(selection_enum)
+ items.append(selection_label)
+
+ item_splitter = {'type': 'label', 'value': '---'}
+
+ all_projects = session.query('Project').all()
+ for project in all_projects:
+ item_label = {
+ 'type': 'label',
+ 'value': '{} ({})'.format(
+ project['full_name'], project['name']
+ )
+ }
+ item = {
+ 'name': project['id'],
+ 'type': 'boolean',
+ 'value': False
+ }
+ if len(items) > 0:
+ items.append(item_splitter)
+ items.append(item_label)
+ items.append(item)
+
+ if len(items) == 0:
+ return {
+ 'success': False,
+ 'message': 'Didn\'t found any projects'
+ }
+ else:
+ return {
+ 'items': items,
+ 'title': title
+ }
+
+ def launch(self, session, entities, event):
+ if 'values' not in event['data']:
+ return
+
+ values = event['data']['values']
+ process_type = values.pop('process_type')
+
+ selection = True
+ if process_type == 'except':
+ selection = False
+
+ interface_messages = {}
+
+ projects_to_update = []
+ for project_id, update_bool in values.items():
+ if not update_bool and selection:
+ continue
+
+ if update_bool and not selection:
+ continue
+
+ project = session.query(
+ 'Project where id is "{}"'.format(project_id)
+ ).one()
+ projects_to_update.append(project)
+
+ if not projects_to_update:
+ self.log.debug('Nothing to update')
+ return {
+ 'success': True,
+ 'message': 'Nothing to update'
+ }
+
+
+ self.db_con.install()
+
+ relevant_types = ["project", "asset", "version"]
+
+ for ft_project in projects_to_update:
+ self.log.debug(
+ "Processing project \"{}\"".format(ft_project["full_name"])
+ )
+
+ self.db_con.Session["AVALON_PROJECT"] = ft_project["full_name"]
+ project = self.db_con.find_one({'type': 'project'})
+ if not project:
+ key = "Projects not synchronized to db"
+ if key not in interface_messages:
+ interface_messages[key] = []
+ interface_messages[key].append(ft_project["full_name"])
+ continue
+
+ # Get all entities in project collection from MongoDB
+ _entities = self.db_con.find({})
+ for _entity in _entities:
+ ent_t = _entity.get("type", "*unknown type")
+ name = _entity.get("name", "*unknown name")
+
+ self.log.debug(
+ "- {} ({})".format(name, ent_t)
+ )
+
+ # Skip types that do not store keys to change
+ if ent_t.lower() not in relevant_types:
+ self.log.debug("-- skipping - type is not relevant")
+ continue
+
+ # Get data which will change
+ updating_data = {}
+ source_data = _entity["data"]
+
+ for key_from, key_to in self.keys_to_change.items():
+ # continue if final key already exists
+ if type(key_to) == list:
+ for key in key_to:
+ # continue if final key was set in update_data
+ if key in updating_data:
+ continue
+
+ # continue if source key not exist or value is None
+ value = source_data.get(key_from)
+ if value is None:
+ continue
+
+ self.log.debug(
+ "-- changing key {} to {}".format(
+ key_from,
+ key
+ )
+ )
+
+ updating_data[key] = value
+ else:
+ if key_to in source_data:
+ continue
+
+ # continue if final key was set in update_data
+ if key_to in updating_data:
+ continue
+
+ # continue if source key not exist or value is None
+ value = source_data.get(key_from)
+ if value is None:
+ continue
+
+ self.log.debug(
+ "-- changing key {} to {}".format(key_from, key_to)
+ )
+ updating_data[key_to] = value
+
+ # Pop out old keys from entity
+ is_obsolete = False
+ for key in self.keys_to_change:
+ if key not in source_data:
+ continue
+ is_obsolete = True
+ source_data.pop(key)
+
+ # continue if there is nothing to change
+ if not is_obsolete and not updating_data:
+ self.log.debug("-- nothing to change")
+ continue
+
+ source_data.update(updating_data)
+
+ self.db_con.update_many(
+ {"_id": _entity["_id"]},
+ {"$set": {"data": source_data}}
+ )
+
+ self.db_con.uninstall()
+
+ if interface_messages:
+ self.show_interface_from_dict(
+ messages=interface_messages,
+ title="Errors during remapping attributes",
+ event=event
+ )
+
+ return True
+
+ def show_interface_from_dict(self, event, messages, title=""):
+ items = []
+
+ for key, value in messages.items():
+ if not value:
+ continue
+ subtitle = {'type': 'label', 'value': '# {}'.format(key)}
+ items.append(subtitle)
+ if isinstance(value, list):
+ for item in value:
+ message = {
+ 'type': 'label', 'value': '
{}
'.format(item)
+ }
+ items.append(message)
+ else:
+ message = {'type': 'label', 'value': '{}
'.format(value)}
+ items.append(message)
+
+ self.show_interface(event, items, title)
+
+def register(session, plugins_presets={}):
+ '''Register plugin. Called when used as an plugin.'''
+
+ if not isinstance(session, ftrack_api.session.Session):
+ return
+
+ AttributesRemapper(session, plugins_presets).register()
diff --git a/pype/ftrack/actions/action_client_review_sort.py b/pype/ftrack/actions/action_client_review_sort.py
index b06a928007..6a659ce5e3 100644
--- a/pype/ftrack/actions/action_client_review_sort.py
+++ b/pype/ftrack/actions/action_client_review_sort.py
@@ -53,12 +53,12 @@ class ClientReviewSort(BaseAction):
}
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register action. Called when used as an event plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- action_handler = ClientReviewSort(session)
+ action_handler = ClientReviewSort(session, plugins_presets)
action_handler.register()
diff --git a/pype/ftrack/actions/action_component_open.py b/pype/ftrack/actions/action_component_open.py
index d3213c555a..33f4d38890 100644
--- a/pype/ftrack/actions/action_component_open.py
+++ b/pype/ftrack/actions/action_component_open.py
@@ -65,7 +65,7 @@ class ComponentOpen(BaseAction):
}
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register action. Called when used as an event plugin.'''
# Validate that session is an instance of ftrack_api.Session. If not,
@@ -74,7 +74,7 @@ def register(session, **kw):
if not isinstance(session, ftrack_api.session.Session):
return
- ComponentOpen(session).register()
+ ComponentOpen(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_create_cust_attrs.py b/pype/ftrack/actions/action_create_cust_attrs.py
index d665e92d4a..47a6bb5d5f 100644
--- a/pype/ftrack/actions/action_create_cust_attrs.py
+++ b/pype/ftrack/actions/action_create_cust_attrs.py
@@ -7,6 +7,7 @@ import logging
from pype.vendor import ftrack_api
from pype.ftrack import BaseAction, get_ca_mongoid
from pypeapp import config
+from ftrack_api.exception import NoResultFoundError
"""
This action creates/updates custom attributes.
@@ -109,27 +110,21 @@ class CustomAttributes(BaseAction):
#: Action identifier.
identifier = 'create.update.attributes'
#: Action label.
- label = 'Create/Update Avalon Attributes'
+ label = "Pype Admin"
+ variant = '- Create/Update Avalon Attributes'
#: Action description.
description = 'Creates Avalon/Mongo ID for double check'
#: roles that are allowed to register this action
role_list = ['Pypeclub', 'Administrator']
- icon = '{}/ftrack/action_icons/CustomAttributes.svg'.format(
+ icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
- def __init__(self, session):
- super().__init__(session)
-
- self.types = {}
- self.object_type_ids = {}
- self.groups = {}
- self.security_roles = {}
- self.required_keys = ['key', 'label', 'type']
- self.type_posibilities = [
- 'text', 'boolean', 'date', 'enumerator',
- 'dynamic enumerator', 'number'
- ]
+ required_keys = ['key', 'label', 'type']
+ type_posibilities = [
+ 'text', 'boolean', 'date', 'enumerator',
+ 'dynamic enumerator', 'number'
+ ]
def discover(self, session, entities, event):
'''
@@ -139,8 +134,12 @@ class CustomAttributes(BaseAction):
return True
def launch(self, session, entities, event):
- # JOB SETTINGS
+ self.types = {}
+ self.object_type_ids = {}
+ self.groups = {}
+ self.security_roles = {}
+ # JOB SETTINGS
userId = event['source']['user']['id']
user = session.query('User where id is ' + userId).one()
@@ -159,11 +158,14 @@ class CustomAttributes(BaseAction):
job['status'] = 'done'
session.commit()
- except Exception as e:
+ except Exception as exc:
session.rollback()
job['status'] = 'failed'
session.commit()
- self.log.error('Creating custom attributes failed ({})'.format(e))
+ self.log.error(
+ 'Creating custom attributes failed ({})'.format(exc),
+ exc_info=True
+ )
return True
@@ -226,24 +228,30 @@ class CustomAttributes(BaseAction):
def custom_attributes_from_file(self, session, event):
presets = config.get_presets()['ftrack']['ftrack_custom_attributes']
- for cust_attr_name in presets:
+ for cust_attr_data in presets:
+ cust_attr_name = cust_attr_data.get(
+ 'label',
+ cust_attr_data.get('key')
+ )
try:
data = {}
- cust_attr = presets[cust_attr_name]
# Get key, label, type
- data.update(self.get_required(cust_attr))
+ data.update(self.get_required(cust_attr_data))
# Get hierachical/ entity_type/ object_id
- data.update(self.get_entity_type(cust_attr))
+ data.update(self.get_entity_type(cust_attr_data))
# Get group, default, security roles
- data.update(self.get_optional(cust_attr))
+ data.update(self.get_optional(cust_attr_data))
# Process data
self.process_attribute(data)
except CustAttrException as cae:
- msg = 'Custom attribute error "{}" - {}'.format(
- cust_attr_name, str(cae)
- )
- self.log.warning(msg)
+ if cust_attr_name:
+ msg = 'Custom attribute error "{}" - {}'.format(
+ cust_attr_name, str(cae)
+ )
+ else:
+ msg = 'Custom attribute error - {}'.format(str(cae))
+ self.log.warning(msg, exc_info=True)
self.show_message(event, msg)
return True
@@ -422,9 +430,10 @@ class CustomAttributes(BaseAction):
def get_security_role(self, security_roles):
roles = []
- if len(security_roles) == 0 or security_roles[0] == 'ALL':
+ security_roles_lowered = [role.lower() for role in security_roles]
+ if len(security_roles) == 0 or 'all' in security_roles_lowered:
roles = self.get_role_ALL()
- elif security_roles[0] == 'except':
+ elif security_roles_lowered[0] == 'except':
excepts = security_roles[1:]
all = self.get_role_ALL()
for role in all:
@@ -443,10 +452,10 @@ class CustomAttributes(BaseAction):
role = self.session.query(query).one()
self.security_roles[role_name] = role
roles.append(role)
- except Exception:
- raise CustAttrException(
- 'Securit role "{}" does not exist'.format(role_name)
- )
+ except NoResultFoundError:
+ raise CustAttrException((
+ 'Securit role "{}" does not exist'
+ ).format(role_name))
return roles
@@ -560,7 +569,7 @@ class CustomAttributes(BaseAction):
}
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
# Validate that session is an instance of ftrack_api.Session. If not,
@@ -569,7 +578,7 @@ def register(session, **kw):
if not isinstance(session, ftrack_api.session.Session):
return
- CustomAttributes(session).register()
+ CustomAttributes(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_create_folders.py b/pype/ftrack/actions/action_create_folders.py
index 2a777911b4..b9e10f7c30 100644
--- a/pype/ftrack/actions/action_create_folders.py
+++ b/pype/ftrack/actions/action_create_folders.py
@@ -30,11 +30,13 @@ class CreateFolders(BaseAction):
def discover(self, session, entities, event):
''' Validation '''
- not_allowed = ['assetversion']
if len(entities) != 1:
return False
+
+ not_allowed = ['assetversion', 'project']
if entities[0].entity_type.lower() in not_allowed:
return False
+
return True
def interface(self, session, entities, event):
@@ -322,13 +324,13 @@ class PartialDict(dict):
return '{'+key+'}'
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- CreateFolders(session).register()
+ CreateFolders(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_create_project_folders.py b/pype/ftrack/actions/action_create_project_structure.py
similarity index 95%
rename from pype/ftrack/actions/action_create_project_folders.py
rename to pype/ftrack/actions/action_create_project_structure.py
index 3ccdb08714..74d458b5f8 100644
--- a/pype/ftrack/actions/action_create_project_folders.py
+++ b/pype/ftrack/actions/action_create_project_structure.py
@@ -13,9 +13,9 @@ class CreateProjectFolders(BaseAction):
'''Edit meta data action.'''
#: Action identifier.
- identifier = 'create.project.folders'
+ identifier = 'create.project.structure'
#: Action label.
- label = 'Create Project Folders'
+ label = 'Create Project Structure'
#: Action description.
description = 'Creates folder structure'
#: roles that are allowed to register this action
@@ -31,6 +31,11 @@ class CreateProjectFolders(BaseAction):
def discover(self, session, entities, event):
''' Validation '''
+ if len(entities) != 1:
+ return False
+
+ if entities[0].entity_type.lower() != "project":
+ return False
return True
@@ -190,13 +195,13 @@ class CreateProjectFolders(BaseAction):
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- CreateProjectFolders(session).register()
+ CreateProjectFolders(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_cust_attr_doctor.py b/pype/ftrack/actions/action_cust_attr_doctor.py
index 9d0ce2071f..1b8f250e5b 100644
--- a/pype/ftrack/actions/action_cust_attr_doctor.py
+++ b/pype/ftrack/actions/action_cust_attr_doctor.py
@@ -12,14 +12,15 @@ class CustomAttributeDoctor(BaseAction):
#: Action identifier.
identifier = 'custom.attributes.doctor'
#: Action label.
- label = 'Custom Attributes Doctor'
+ label = "Pype Doctor"
+ variant = '- Custom Attributes Doctor'
#: Action description.
description = (
'Fix hierarchical custom attributes mainly handles, fstart'
' and fend'
)
- icon = '{}/ftrack/action_icons/TestAction.svg'.format(
+ icon = '{}/ftrack/action_icons/PypeDoctor.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
hierarchical_ca = ['handle_start', 'handle_end', 'fstart', 'fend']
@@ -286,13 +287,13 @@ class CustomAttributeDoctor(BaseAction):
return all_roles
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- CustomAttributeDoctor(session).register()
+ CustomAttributeDoctor(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_delete_asset.py b/pype/ftrack/actions/action_delete_asset.py
index 96087f4c8e..a408de45b2 100644
--- a/pype/ftrack/actions/action_delete_asset.py
+++ b/pype/ftrack/actions/action_delete_asset.py
@@ -311,7 +311,7 @@ class DeleteAsset(BaseAction):
return assets
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
# Validate that session is an instance of ftrack_api.Session. If not,
@@ -320,7 +320,7 @@ def register(session, **kw):
if not isinstance(session, ftrack_api.session.Session):
return
- DeleteAsset(session).register()
+ DeleteAsset(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_delete_asset_byname.py b/pype/ftrack/actions/action_delete_asset_byname.py
index fa966096a8..4f2a0e515c 100644
--- a/pype/ftrack/actions/action_delete_asset_byname.py
+++ b/pype/ftrack/actions/action_delete_asset_byname.py
@@ -13,12 +13,13 @@ class AssetsRemover(BaseAction):
#: Action identifier.
identifier = 'remove.assets'
#: Action label.
- label = 'Delete Assets by Name'
+ label = "Pype Admin"
+ variant = '- Delete Assets by Name'
#: Action description.
description = 'Removes assets from Ftrack and Avalon db with all childs'
#: roles that are allowed to register this action
role_list = ['Pypeclub', 'Administrator']
- icon = '{}/ftrack/action_icons/AssetsRemover.svg'.format(
+ icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
#: Db
@@ -131,7 +132,7 @@ class AssetsRemover(BaseAction):
return assets
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
# Validate that session is an instance of ftrack_api.Session. If not,
@@ -140,7 +141,7 @@ def register(session, **kw):
if not isinstance(session, ftrack_api.session.Session):
return
- AssetsRemover(session).register()
+ AssetsRemover(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_delete_unpublished.py b/pype/ftrack/actions/action_delete_unpublished.py
index 377e118ffb..5e7f783ba7 100644
--- a/pype/ftrack/actions/action_delete_unpublished.py
+++ b/pype/ftrack/actions/action_delete_unpublished.py
@@ -42,7 +42,7 @@ class VersionsCleanup(BaseAction):
}
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register action. Called when used as an event plugin.'''
# Validate that session is an instance of ftrack_api.Session. If not,
@@ -51,7 +51,7 @@ def register(session, **kw):
if not isinstance(session, ftrack_api.session.Session):
return
- VersionsCleanup(session).register()
+ VersionsCleanup(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_djvview.py b/pype/ftrack/actions/action_djvview.py
index e0c0334e5f..58914fbc1e 100644
--- a/pype/ftrack/actions/action_djvview.py
+++ b/pype/ftrack/actions/action_djvview.py
@@ -21,9 +21,9 @@ class DJVViewAction(BaseAction):
)
type = 'Application'
- def __init__(self, session):
+ def __init__(self, session, plugins_presets):
'''Expects a ftrack_api.Session instance'''
- super().__init__(session)
+ super().__init__(session, plugins_presets)
self.djv_path = None
self.config_data = config.get_presets()['djv_view']['config']
@@ -218,12 +218,12 @@ class DJVViewAction(BaseAction):
return True
-def register(session):
+def register(session, plugins_presets={}):
"""Register hooks."""
if not isinstance(session, ftrack_api.session.Session):
return
- DJVViewAction(session).register()
+ DJVViewAction(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_job_killer.py b/pype/ftrack/actions/action_job_killer.py
index 44acb24d55..8584b26aa4 100644
--- a/pype/ftrack/actions/action_job_killer.py
+++ b/pype/ftrack/actions/action_job_killer.py
@@ -14,12 +14,13 @@ class JobKiller(BaseAction):
#: Action identifier.
identifier = 'job.killer'
#: Action label.
- label = 'Job Killer'
+ label = "Pype Admin"
+ variant = '- Job Killer'
#: Action description.
description = 'Killing selected running jobs'
#: roles that are allowed to register this action
role_list = ['Pypeclub', 'Administrator']
- icon = '{}/ftrack/action_icons/JobKiller.svg'.format(
+ icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
@@ -117,7 +118,7 @@ class JobKiller(BaseAction):
}
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
# Validate that session is an instance of ftrack_api.Session. If not,
@@ -126,7 +127,7 @@ def register(session, **kw):
if not isinstance(session, ftrack_api.session.Session):
return
- JobKiller(session).register()
+ JobKiller(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_multiple_notes.py b/pype/ftrack/actions/action_multiple_notes.py
index 338083fe47..6e28b7bed6 100644
--- a/pype/ftrack/actions/action_multiple_notes.py
+++ b/pype/ftrack/actions/action_multiple_notes.py
@@ -112,13 +112,13 @@ class MultipleNotes(BaseAction):
return True
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- MultipleNotes(session).register()
+ MultipleNotes(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_prepare_project.py b/pype/ftrack/actions/action_prepare_project.py
new file mode 100644
index 0000000000..e914fa74f0
--- /dev/null
+++ b/pype/ftrack/actions/action_prepare_project.py
@@ -0,0 +1,378 @@
+import os
+import json
+
+from ruamel import yaml
+from pype.vendor import ftrack_api
+from pype.ftrack import BaseAction
+from pypeapp import config
+from pype.ftrack.lib import get_avalon_attr
+
+from pype.vendor.ftrack_api import session as fa_session
+
+
+class PrepareProject(BaseAction):
+ '''Edit meta data action.'''
+
+ #: Action identifier.
+ identifier = 'prepare.project'
+ #: Action label.
+ label = 'Prepare Project'
+ #: Action description.
+ description = 'Set basic attributes on the project'
+ #: roles that are allowed to register this action
+ role_list = ["Pypeclub", "Administrator", "Project manager"]
+ icon = '{}/ftrack/action_icons/PrepareProject.svg'.format(
+ os.environ.get('PYPE_STATICS_SERVER', '')
+ )
+
+ # Key to store info about trigerring create folder structure
+ create_project_structure_key = "create_folder_structure"
+
+ def discover(self, session, entities, event):
+ ''' Validation '''
+ if len(entities) != 1:
+ return False
+
+ if entities[0].entity_type.lower() != "project":
+ return False
+
+ return True
+
+ def interface(self, session, entities, event):
+ if event['data'].get('values', {}):
+ return
+
+ # Inform user that this may take a while
+ self.show_message(event, "Preparing data... Please wait", True)
+
+ self.log.debug("Loading custom attributes")
+ cust_attrs, hier_cust_attrs = get_avalon_attr(session, True)
+ project_defaults = config.get_presets(
+ entities[0]["full_name"]
+ ).get("ftrack", {}).get("project_defaults", {})
+
+ self.log.debug("Preparing data which will be shown")
+ attributes_to_set = {}
+ for attr in hier_cust_attrs:
+ key = attr["key"]
+ attributes_to_set[key] = {
+ "label": attr["label"],
+ "object": attr,
+ "default": project_defaults.get(key)
+ }
+
+ for attr in cust_attrs:
+ if attr["entity_type"].lower() != "show":
+ continue
+ key = attr["key"]
+ attributes_to_set[key] = {
+ "label": attr["label"],
+ "object": attr,
+ "default": project_defaults.get(key)
+ }
+
+ # Sort by label
+ attributes_to_set = dict(sorted(
+ attributes_to_set.items(),
+ key=lambda x: x[1]["label"]
+ ))
+ self.log.debug("Preparing interface for keys: \"{}\"".format(
+ str([key for key in attributes_to_set])
+ ))
+
+ item_splitter = {'type': 'label', 'value': '---'}
+ title = "Prepare Project"
+ items = []
+
+ # Ask if want to trigger Action Create Folder Structure
+ items.append({
+ "type": "label",
+ "value": "Want to create basic Folder Structure?
"
+ })
+
+ items.append({
+ "name": self.create_project_structure_key,
+ "type": "boolean",
+ "value": False,
+ "label": "Check if Yes"
+ })
+
+ items.append(item_splitter)
+ items.append({
+ "type": "label",
+ "value": "Set basic Attributes:
"
+ })
+
+ multiselect_enumerators = []
+
+ # This item will be last (before enumerators)
+ # - sets value of auto synchronization
+ auto_sync_name = "avalon_auto_sync"
+ auto_sync_item = {
+ "name": auto_sync_name,
+ "type": "boolean",
+ "value": project_defaults.get(auto_sync_name, False),
+ "label": "AutoSync to Avalon"
+ }
+
+ for key, in_data in attributes_to_set.items():
+ attr = in_data["object"]
+
+ # initial item definition
+ item = {
+ "name": key,
+ "label": in_data["label"]
+ }
+
+ # cust attr type - may have different visualization
+ type_name = attr["type"]["name"].lower()
+ easy_types = ["text", "boolean", "date", "number"]
+
+ easy_type = False
+ if type_name in easy_types:
+ easy_type = True
+
+ elif type_name == "enumerator":
+
+ attr_config = json.loads(attr["config"])
+ attr_config_data = json.loads(attr_config["data"])
+
+ if attr_config["multiSelect"] is True:
+ multiselect_enumerators.append(item_splitter)
+
+ multiselect_enumerators.append({
+ "type": "label",
+ "value": in_data["label"]
+ })
+
+ default = in_data["default"]
+ names = []
+ for option in sorted(
+ attr_config_data, key=lambda x: x["menu"]
+ ):
+ name = option["value"]
+ new_name = "__{}__{}".format(key, name)
+ names.append(new_name)
+ item = {
+ "name": new_name,
+ "type": "boolean",
+ "label": "- {}".format(option["menu"])
+ }
+ if default:
+ if (
+ isinstance(default, list) or
+ isinstance(default, tuple)
+ ):
+ if name in default:
+ item["value"] = True
+ else:
+ if name == default:
+ item["value"] = True
+
+ multiselect_enumerators.append(item)
+
+ multiselect_enumerators.append({
+ "type": "hidden",
+ "name": "__hidden__{}".format(key),
+ "value": json.dumps(names)
+ })
+ else:
+ easy_type = True
+ item["data"] = attr_config_data
+
+ else:
+ self.log.warning((
+ "Custom attribute \"{}\" has type \"{}\"."
+ " I don't know how to handle"
+ ).format(key, type_name))
+ items.append({
+ "type": "label",
+ "value": (
+ "!!! Can't handle Custom attritubte type \"{}\""
+ " (key: \"{}\")"
+ ).format(type_name, key)
+ })
+
+ if easy_type:
+ item["type"] = type_name
+
+ # default value in interface
+ default = in_data["default"]
+ if default is not None:
+ item["value"] = default
+
+ items.append(item)
+
+ # Add autosync attribute
+ items.append(auto_sync_item)
+
+ # Add enumerator items at the end
+ for item in multiselect_enumerators:
+ items.append(item)
+
+ return {
+ 'items': items,
+ 'title': title
+ }
+
+ def launch(self, session, entities, event):
+ if not event['data'].get('values', {}):
+ return
+
+ in_data = event['data']['values']
+
+ # pop out info about creating project structure
+ create_proj_struct = in_data.pop(self.create_project_structure_key)
+
+ # Find hidden items for multiselect enumerators
+ keys_to_process = []
+ for key in in_data:
+ if key.startswith("__hidden__"):
+ keys_to_process.append(key)
+
+ self.log.debug("Preparing data for Multiselect Enumerators")
+ enumerators = {}
+ for key in keys_to_process:
+ new_key = key.replace("__hidden__", "")
+ enumerator_items = in_data.pop(key)
+ enumerators[new_key] = json.loads(enumerator_items)
+
+ # find values set for multiselect enumerator
+ for key, enumerator_items in enumerators.items():
+ in_data[key] = []
+
+ name = "__{}__".format(key)
+
+ for item in enumerator_items:
+ value = in_data.pop(item)
+ if value is True:
+ new_key = item.replace(name, "")
+ in_data[key].append(new_key)
+
+ self.log.debug("Setting Custom Attribute values:")
+ entity = entities[0]
+ for key, value in in_data.items():
+ entity["custom_attributes"][key] = value
+ self.log.debug("- Key \"{}\" set to \"{}\"".format(key, value))
+
+ session.commit()
+
+ # Create project structure
+ self.create_project_specific_config(entities[0]["full_name"], in_data)
+
+ # Trigger Create Project Structure action
+ if create_proj_struct is True:
+ self.trigger_action("create.project.structure", event)
+
+ return True
+
+ def create_project_specific_config(self, project_name, json_data):
+ self.log.debug("*** Creating project specifig configs ***")
+
+ path_proj_configs = os.environ.get('PYPE_PROJECT_CONFIGS', "")
+
+ # Skip if PYPE_PROJECT_CONFIGS is not set
+ # TODO show user OS message
+ if not path_proj_configs:
+ self.log.warning((
+ "Environment variable \"PYPE_PROJECT_CONFIGS\" is not set."
+ " Project specific config can't be set."
+ ))
+ return
+
+ path_proj_configs = os.path.normpath(path_proj_configs)
+ # Skip if path does not exist
+ # TODO create if not exist?!!!
+ if not os.path.exists(path_proj_configs):
+ self.log.warning((
+ "Path set in Environment variable \"PYPE_PROJECT_CONFIGS\""
+ " Does not exist."
+ ))
+ return
+
+ project_specific_path = os.path.normpath(
+ os.path.join(path_proj_configs, project_name)
+ )
+ if not os.path.exists(project_specific_path):
+ os.makedirs(project_specific_path)
+ self.log.debug((
+ "Project specific config folder for project \"{}\" created."
+ ).format(project_name))
+
+ # Anatomy ####################################
+ self.log.debug("--- Processing Anatomy Begins: ---")
+
+ anatomy_dir = os.path.normpath(os.path.join(
+ project_specific_path, "anatomy"
+ ))
+ anatomy_path = os.path.normpath(os.path.join(
+ anatomy_dir, "default.yaml"
+ ))
+
+ anatomy = None
+ if os.path.exists(anatomy_path):
+ self.log.debug(
+ "Anatomy file already exist. Trying to read: \"{}\"".format(
+ anatomy_path
+ )
+ )
+ # Try to load data
+ with open(anatomy_path, 'r') as file_stream:
+ try:
+ anatomy = yaml.load(file_stream, Loader=yaml.loader.Loader)
+ self.log.debug("Reading Anatomy file was successful")
+ except yaml.YAMLError as exc:
+ self.log.warning(
+ "Reading Yaml file failed: \"{}\"".format(anatomy_path),
+ exc_info=True
+ )
+
+ if not anatomy:
+ self.log.debug("Anatomy is not set. Duplicating default.")
+ # Create Anatomy folder
+ if not os.path.exists(anatomy_dir):
+ self.log.debug(
+ "Creating Anatomy folder: \"{}\"".format(anatomy_dir)
+ )
+ os.makedirs(anatomy_dir)
+
+ source_items = [
+ os.environ["PYPE_CONFIG"], "anatomy", "default.yaml"
+ ]
+
+ source_path = os.path.normpath(os.path.join(*source_items))
+ with open(source_path, 'r') as file_stream:
+ source_data = file_stream.read()
+
+ with open(anatomy_path, 'w') as file_stream:
+ file_stream.write(source_data)
+
+ # Presets ####################################
+ self.log.debug("--- Processing Presets Begins: ---")
+
+ project_defaults_dir = os.path.normpath(os.path.join(*[
+ project_specific_path, "presets", "ftrack"
+ ]))
+ project_defaults_path = os.path.normpath(os.path.join(*[
+ project_defaults_dir, "project_defaults.json"
+ ]))
+ # Create folder if not exist
+ if not os.path.exists(project_defaults_dir):
+ self.log.debug("Creating Ftrack Presets folder: \"{}\"".format(
+ project_defaults_dir
+ ))
+ os.makedirs(project_defaults_dir)
+
+ with open(project_defaults_path, 'w') as file_stream:
+ json.dump(json_data, file_stream, indent=4)
+
+ self.log.debug("*** Creating project specifig configs Finished ***")
+
+
+def register(session, plugins_presets={}):
+ '''Register plugin. Called when used as an plugin.'''
+
+ if not isinstance(session, ftrack_api.session.Session):
+ return
+
+ PrepareProject(session, plugins_presets).register()
diff --git a/pype/ftrack/actions/action_rv.py b/pype/ftrack/actions/action_rv.py
index c41938ada1..6b6591355f 100644
--- a/pype/ftrack/actions/action_rv.py
+++ b/pype/ftrack/actions/action_rv.py
@@ -1,13 +1,14 @@
-from pype.ftrack import BaseAction
import os
import sys
-import json
import subprocess
-from pype.vendor import ftrack_api
import logging
-import operator
-import re
+import traceback
+import json
+
from pypeapp import Logger, config
+from pype.ftrack import BaseAction
+from pype.vendor import ftrack_api
+from avalon import io, api
log = Logger().get_logger(__name__)
@@ -22,13 +23,13 @@ class RVAction(BaseAction):
)
type = 'Application'
- def __init__(self, session):
+ def __init__(self, session, plugins_presets):
""" Constructor
:param session: ftrack Session
:type session: :class:`ftrack_api.Session`
"""
- super().__init__(session)
+ super().__init__(session, plugins_presets)
self.rv_path = None
self.config_data = None
@@ -53,14 +54,7 @@ class RVAction(BaseAction):
def discover(self, session, entities, event):
"""Return available actions based on *event*. """
- selection = event["data"].get("selection", [])
- if len(selection) != 1:
- return False
-
- entityType = selection[0].get("entityType", None)
- if entityType in ["assetversion", "task"]:
- return True
- return False
+ return True
def set_rv_path(self):
self.rv_path = self.config_data.get("rv_path")
@@ -72,151 +66,272 @@ class RVAction(BaseAction):
)
super().register()
+ def get_components_from_entity(self, session, entity, components):
+ """Get components from various entity types.
+
+ The components dictionary is modifid in place, so nothing is returned.
+
+ Args:
+ entity (Ftrack entity)
+ components (dict)
+ """
+
+ if entity.entity_type.lower() == "assetversion":
+ for component in entity["components"]:
+ if component["file_type"][1:] not in self.allowed_types:
+ continue
+
+ try:
+ components[entity["asset"]["parent"]["name"]].append(
+ component
+ )
+ except KeyError:
+ components[entity["asset"]["parent"]["name"]] = [component]
+
+ return
+
+ if entity.entity_type.lower() == "task":
+ query = "AssetVersion where task_id is '{0}'".format(entity["id"])
+ for assetversion in session.query(query):
+ self.get_components_from_entity(
+ session, assetversion, components
+ )
+
+ return
+
+ if entity.entity_type.lower() == "shot":
+ query = "AssetVersion where asset.parent.id is '{0}'".format(
+ entity["id"]
+ )
+ for assetversion in session.query(query):
+ self.get_components_from_entity(
+ session, assetversion, components
+ )
+
+ return
+
+ raise NotImplementedError(
+ "\"{}\" entity type is not implemented yet.".format(
+ entity.entity_type
+ )
+ )
+
def interface(self, session, entities, event):
if event['data'].get('values', {}):
return
- entity = entities[0]
- versions = []
-
- entity_type = entity.entity_type.lower()
- if entity_type == "assetversion":
- if (
- entity[
- 'components'
- ][0]['file_type'][1:] in self.allowed_types
- ):
- versions.append(entity)
- else:
- master_entity = entity
- if entity_type == "task":
- master_entity = entity['parent']
-
- for asset in master_entity['assets']:
- for version in asset['versions']:
- # Get only AssetVersion of selected task
- if (
- entity_type == "task" and
- version['task']['id'] != entity['id']
- ):
- continue
- # Get only components with allowed type
- filetype = version['components'][0]['file_type']
- if filetype[1:] in self.allowed_types:
- versions.append(version)
-
- if len(versions) < 1:
- return {
- 'success': False,
- 'message': 'There are no Asset Versions to open.'
+ user = session.query(
+ "User where username is '{0}'".format(
+ os.environ["FTRACK_API_USER"]
+ )
+ ).one()
+ job = session.create(
+ "Job",
+ {
+ "user": user,
+ "status": "running",
+ "data": json.dumps({
+ "description": "RV: Collecting components."
+ })
}
+ )
+ # Commit to feedback to user.
+ session.commit()
items = []
- base_label = "v{0} - {1} - {2}"
- default_component = self.config_data.get(
- 'default_component', None
- )
- last_available = None
- select_value = None
- for version in versions:
- for component in version['components']:
- label = base_label.format(
- str(version['version']).zfill(3),
- version['asset']['type']['name'],
- component['name']
- )
-
- try:
- location = component[
- 'component_locations'
- ][0]['location']
- file_path = location.get_filesystem_path(component)
- except Exception:
- file_path = component[
- 'component_locations'
- ][0]['resource_identifier']
-
- if os.path.isdir(os.path.dirname(file_path)):
- last_available = file_path
- if component['name'] == default_component:
- select_value = file_path
- items.append(
- {'label': label, 'value': file_path}
- )
-
- if len(items) == 0:
- return {
- 'success': False,
- 'message': (
- 'There are no Asset Versions with accessible path.'
- )
- }
-
- item = {
- 'label': 'Items to view',
- 'type': 'enumerator',
- 'name': 'path',
- 'data': sorted(
- items,
- key=operator.itemgetter('label'),
- reverse=True
- )
- }
- if select_value is not None:
- item['value'] = select_value
+ try:
+ items = self.get_interface_items(session, entities)
+ except Exception:
+ log.error(traceback.format_exc())
+ job["status"] = "failed"
else:
- item['value'] = last_available
+ job["status"] = "done"
- return {'items': [item]}
+ # Commit to end job.
+ session.commit()
+
+ return {"items": items}
+
+ def get_interface_items(self, session, entities):
+
+ components = {}
+ for entity in entities:
+ self.get_components_from_entity(session, entity, components)
+
+ # Sort by version
+ for parent_name, entities in components.items():
+ version_mapping = {}
+ for entity in entities:
+ try:
+ version_mapping[entity["version"]["version"]].append(
+ entity
+ )
+ except KeyError:
+ version_mapping[entity["version"]["version"]] = [entity]
+
+ # Sort same versions by date.
+ for version, entities in version_mapping.items():
+ version_mapping[version] = sorted(
+ entities, key=lambda x: x["version"]["date"], reverse=True
+ )
+
+ components[parent_name] = []
+ for version in reversed(sorted(version_mapping.keys())):
+ components[parent_name].extend(version_mapping[version])
+
+ # Items to present to user.
+ items = []
+ label = "{} - v{} - {}"
+ for parent_name, entities in components.items():
+ data = []
+ for entity in entities:
+ data.append(
+ {
+ "label": label.format(
+ entity["version"]["asset"]["name"],
+ str(entity["version"]["version"]).zfill(3),
+ entity["file_type"][1:]
+ ),
+ "value": entity["id"]
+ }
+ )
+
+ items.append(
+ {
+ "label": parent_name,
+ "type": "enumerator",
+ "name": parent_name,
+ "data": data,
+ "value": data[0]["value"]
+ }
+ )
+
+ return items
def launch(self, session, entities, event):
"""Callback method for RV action."""
# Launching application
if "values" not in event["data"]:
return
- filename = event['data']['values']['path']
- fps = entities[0].get('custom_attributes', {}).get('fps', None)
-
- cmd = []
- # change frame number to padding string for RV to play sequence
- try:
- frame = re.findall(r'(\d+).', filename)[-1]
- except KeyError:
- # we didn't detected frame number
- pass
- else:
- padding = '#' * len(frame)
- pos = filename.rfind(frame)
- filename = filename[:pos] + padding + filename[
- filename.rfind('.'):]
-
- # RV path
- cmd.append(os.path.normpath(self.rv_path))
- if fps is not None:
- cmd.append("-fps {}".format(int(fps)))
- cmd.append(os.path.normpath(filename))
- log.info('Running rv: {}'.format(' '.join(cmd)))
- try:
- # Run RV with these commands
- subprocess.Popen(' '.join(cmd), shell=True)
- except Exception as e:
- return {
- 'success': False,
- 'message': 'File "{}" was not found.'.format(
- e
- )
+ user = session.query(
+ "User where username is '{0}'".format(
+ os.environ["FTRACK_API_USER"]
+ )
+ ).one()
+ job = session.create(
+ "Job",
+ {
+ "user": user,
+ "status": "running",
+ "data": json.dumps({
+ "description": "RV: Collecting file paths."
+ })
}
+ )
+ # Commit to feedback to user.
+ session.commit()
+
+ paths = []
+ try:
+ paths = self.get_file_paths(session, event)
+ except Exception:
+ log.error(traceback.format_exc())
+ job["status"] = "failed"
+ else:
+ job["status"] = "done"
+
+ # Commit to end job.
+ session.commit()
+
+ args = [os.path.normpath(self.rv_path)]
+
+ fps = entities[0].get("custom_attributes", {}).get("fps", None)
+ if fps is not None:
+ args.extend(["-fps", str(fps)])
+
+ args.extend(paths)
+
+ log.info("Running rv: {}".format(args))
+
+ subprocess.Popen(args)
return True
+ def get_file_paths(self, session, event):
+ """Get file paths from selected components."""
-def register(session):
+ link = session.get(
+ "Component", list(event["data"]["values"].values())[0]
+ )["version"]["asset"]["parent"]["link"][0]
+ project = session.get(link["type"], link["id"])
+ os.environ["AVALON_PROJECT"] = project["name"]
+ api.Session["AVALON_PROJECT"] = project["name"]
+ io.install()
+
+ location = ftrack_api.Session().pick_location()
+
+ paths = []
+ for parent_name in sorted(event["data"]["values"].keys()):
+ component = session.get(
+ "Component", event["data"]["values"][parent_name]
+ )
+
+ # Newer publishes have the source referenced in Ftrack.
+ online_source = False
+ for neighbour_component in component["version"]["components"]:
+ if neighbour_component["name"] != "ftrackreview-mp4_src":
+ continue
+
+ paths.append(
+ location.get_filesystem_path(neighbour_component)
+ )
+ online_source = True
+
+ if online_source:
+ continue
+
+ asset = io.find_one({"type": "asset", "name": parent_name})
+ subset = io.find_one(
+ {
+ "type": "subset",
+ "name": component["version"]["asset"]["name"],
+ "parent": asset["_id"]
+ }
+ )
+ version = io.find_one(
+ {
+ "type": "version",
+ "name": component["version"]["version"],
+ "parent": subset["_id"]
+ }
+ )
+ representation = io.find_one(
+ {
+ "type": "representation",
+ "parent": version["_id"],
+ "name": component["file_type"][1:]
+ }
+ )
+ if representation is None:
+ representation = io.find_one(
+ {
+ "type": "representation",
+ "parent": version["_id"],
+ "name": "preview"
+ }
+ )
+ paths.append(api.get_representation_path(representation))
+
+ return paths
+
+
+def register(session, plugins_presets={}):
"""Register hooks."""
if not isinstance(session, ftrack_api.session.Session):
return
- RVAction(session).register()
+ RVAction(session, plugins_presets).register()
def main(arguments=None):
@@ -257,249 +372,3 @@ def main(arguments=None):
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
-
-"""
-Usage: RV movie and image sequence viewer
-
- One File: rv foo.jpg
- This Directory: rv .
- Other Directory: rv /path/to/dir
- Image Sequence w/Audio: rv [ in.#.tif in.wav ]
- Stereo w/Audio: rv [ left.#.tif right.#.tif in.wav ]
- Stereo Movies: rv [ left.mov right.mov ]
- Stereo Movie (from rvio): rv stereo.mov
- Cuts Sequenced: rv cut1.mov cut2.#.exr cut3.mov
- Stereo Cuts Sequenced: rv [ l1.mov r1.mov ] [ l2.mov r2.mov ]
- Forced Anamorphic: rv [ -pa 2.0 fullaperture.#.dpx ]
- Compare: rv -wipe a.exr b.exr
- Difference: rv -diff a.exr b.exr
- Slap Comp Over: rv -over a.exr b.exr
- Tile Images: rv -tile *.jpg
- Cache + Play Movie: rv -l -play foo.mov
- Cache Images to Examine: rv -c big.#.exr
- Fullscreen on 2nd monitor: rv -fullscreen -screen 1
- Select Source View: rv [ in.exr -select view right ]
- Select Source Layer: rv [ in.exr -select layer light1.diffuse ]
- (single-view source)
- Select Source Layer: rv [ in.exr -select layer left,light1.diffuse ]
- (multi-view source)
- Select Source Channel: rv [ in.exr -select channel R ]
- (single-view, single-layer source)
- Select Source Channel: rv [ in.exr -select channel left,Diffuse,R ]
- (multi-view, multi-layer source)
-
-Image Sequence Numbering
-
- Frames 1 to 100 no padding: image.1-100@.jpg
- Frames 1 to 100 padding 4: image.1-100#.jpg -or- image.1-100@@@@.jpg
- Frames 1 to 100 padding 5: image.1-100@@@@@.jpg
- Frames -100 to -200 padding 4: image.-100--200#jpg
- printf style padding 4: image.%04d.jpg
- printf style w/range: image.%04d.jpg 1-100
- printf no padding w/range: image.%d.jpg 1-100
- Complicated no pad 1 to 100: image_887f1-100@_982.tif
- Stereo pair (left,right): image.#.%V.tif
- Stereo pair (L,R): image.#.%v.tif
- All Frames, padding 4: image.#.jpg
- All Frames in Sequence: image.*.jpg
- All Frames in Directory: /path/to/directory
- All Frames in current dir: .
-
-Per-source arguments (inside [ and ] restricts to that source only)
-
--pa %f Per-source pixel aspect ratio
--ro %d Per-source range offset
--rs %d Per-source range start
--fps %f Per-source or global fps
--ao %f Per-source audio offset in seconds
--so %f Per-source stereo relative eye offset
--rso %f Per-source stereo right eye offset
--volume %f Per-source or global audio volume (default=1)
--fcdl %S Per-source file CDL
--lcdl %S Per-source look CDL
--flut %S Per-source file LUT
--llut %S Per-source look LUT
--pclut %S Per-source pre-cache software LUT
--cmap %S Per-source channel mapping
- (channel names, separated by ',')
--select %S %S Per-source view/layer/channel selection
--crop %d %d %d %d Per-source crop (xmin, ymin, xmax, ymax)
--uncrop %d %d %d %d Per-source uncrop (width, height, xoffset, yoffset)
--in %d Per-source cut-in frame
--out %d Per-source cut-out frame
--noMovieAudio Disable source movie's baked-in audio
--inparams ... Source specific input parameters
-
- ... Input sequence patterns, images, movies, or directories
--c Use region frame cache
--l Use look-ahead cache
--nc Use no caching
--s %f Image scale reduction
--ns Nuke style sequence notation
- (deprecated and ignored -- no longer needed)
--noRanges No separate frame ranges
- (i.e. 1-10 will be considered a file)
--sessionType %S Session type (sequence, stack) (deprecated, use -view)
--stereo %S Stereo mode
- (hardware, checker, scanline, anaglyph, lumanaglyph,
- left, right, pair, mirror, hsqueezed, vsqueezed)
--stereoSwap %d Swap left and right eyes stereo display
- (0 == no, 1 == yes, default=0)
--vsync %d Video Sync (1 = on, 0 = off, default = 1)
--comp %S Composite mode
- (over, add, difference, replace, topmost)
--layout %S Layout mode (packed, row, column, manual)
--over Same as -comp over -view defaultStack
--diff Same as -comp difference -view defaultStack
--replace Same as -comp replace -view defaultStack
--topmost Same as -comp topmost -view defaultStack
--layer Same as -comp topmost -view defaultStack, with strict
- frame ranges
--tile Same as -layout packed -view defaultLayout
--wipe Same as -over with wipes enabled
--view %S Start with a particular view
--noSequence Don't contract files into sequences
--inferSequence Infer sequences from one file
--autoRetime %d Automatically retime conflicting media fps in
- sequences and stacks (1 = on, 0 = off, default = 1)
--rthreads %d Number of reader threads (default=1)
--fullscreen Start in fullscreen mode
--present Start in presentation mode (using presentation device)
--presentAudio %d Use presentation audio device in presentation mode
- (1 = on, 0 = off)
--presentDevice %S Presentation mode device
--presentVideoFormat %S Presentation mode override video format
- (device specific)
--presentDataFormat %S Presentation mode override data format
- (device specific)
--screen %d Start on screen (0, 1, 2, ...)
--noBorders No window manager decorations
--geometry %d %d [%d %d] Start geometry X, Y, W, H
--fitMedia Fit the window to the first media shown
--init %S Override init script
--nofloat Turn off floating point by default
--maxbits %d Maximum default bit depth (default=32)
--gamma %f Set display gamma (default=1)
--sRGB Display using linear -> sRGB conversion
--rec709 Display using linear -> Rec 709 conversion
--dlut %S Apply display LUT
--brightness %f Set display relative brightness in stops (default=0)
--resampleMethod %S Resampling method
- (area, linear, cubic, nearest, default=area)
--eval %S Evaluate Mu expression at every session start
--pyeval %S Evaluate Python expression at every session start
--nomb Hide menu bar on start up
--play Play on startup
--playMode %d Playback mode (0=Context dependent, 1=Play all frames,
- 2=Realtime, default=0)
--loopMode %d Playback loop mode
- (0=Loop, 1=Play Once, 2=Ping-Pong, default=0)
--cli Mu command line interface
--vram %f VRAM usage limit in Mb, default = 64.000000
--cram %f Max region cache RAM usage in Gb,
- (6.4Gb available, default 1Gb)
--lram %f Max look-ahead cache RAM usage in Gb,
- (6.4Gb available, default 0.2Gb)
--noPBO Prevent use of GL PBOs for pixel transfer
--prefetch Prefetch images for rendering
--useAppleClientStorage Use APPLE_client_storage extension
--useThreadedUpload Use threading for texture uploading/downloading
- if possible
--bwait %f Max buffer wait time in cached seconds, default 5.0
--lookback %f Percentage of the lookahead cache reserved for
- frames behind the playhead, default 25
--yuv Assume YUV hardware conversion
--noaudio Turn off audio
--audiofs %d Use fixed audio frame size
- (results are hardware dependant ... try 512)
--audioCachePacket %d Audio cache packet size in samples (default=2048)
--audioMinCache %f Audio cache min size in seconds (default=0.300000)
--audioMaxCache %f Audio cache max size in seconds (default=0.600000)
--audioModule %S Use specific audio module
--audioDevice %S Use specific audio device
--audioRate %f Use specific output audio rate (default=ask hardware)
--audioPrecision %d Use specific output audio precision (default=16)
--audioNice %d Close audio device when not playing
- (may cause problems on some hardware) default=0
--audioNoLock %d Do not use hardware audio/video syncronization
- (use software instead, default=0)
--audioPreRoll %d Preroll audio on device open (Linux only; default=0)
--audioGlobalOffset %f Global audio offset in seconds
--audioDeviceLatency %f Audio device latency compensation in milliseconds
--bg %S Background pattern (default=black, white, grey18,
- grey50, checker, crosshatch)
--formats Show all supported image and movie formats
--apple Use Quicktime and NSImage libraries (on OS X)
--cinalt Use alternate Cineon/DPX readers
--exrcpus %d EXR thread count (default=0)
--exrRGBA EXR Always read as RGBA (default=false)
--exrInherit EXR guess channel inheritance (default=false)
--exrNoOneChannel EXR never use one channel planar images (default=false)
--exrIOMethod %d [%d] EXR I/O Method (0=standard, 1=buffered, 2=unbuffered,
- 3=MemoryMap, 4=AsyncBuffered, 5=AsyncUnbuffered,
- default=1) and optional chunk size (default=61440)
--exrReadWindowIsDisplayWindow
- EXR read window is display window (default=false)
--exrReadWindow %d EXR Read Window Method (0=Data, 1=Display,
- 2=Union, 3=Data inside Display, default=3)
--jpegRGBA Make JPEG four channel RGBA on read
- (default=no, use RGB or YUV)
--jpegIOMethod %d [%d] JPEG I/O Method (0=standard, 1=buffered,
- 2=unbuffered, 3=MemoryMap, 4=AsyncBuffered,
- 5=AsyncUnbuffered, default=1) and optional
- chunk size (default=61440)
--cinpixel %S Cineon pixel storage (default=RGB8_PLANAR)
--cinchroma Use Cineon chromaticity values
- (for default reader only)
--cinIOMethod %d [%d] Cineon I/O Method (0=standard, 1=buffered,
- 2=unbuffered, 3=MemoryMap, 4=AsyncBuffered,
- 5=AsyncUnbuffered, default=1) and optional
- chunk size (default=61440)
--dpxpixel %S DPX pixel storage (default=RGB8_PLANAR)
--dpxchroma Use DPX chromaticity values (for default reader only)
--dpxIOMethod %d [%d] DPX I/O Method (0=standard, 1=buffered, 2=unbuffered,
- 3=MemoryMap, 4=AsyncBuffered, 5=AsyncUnbuffered,
- default=1) and optional chunk size (default=61440)
--tgaIOMethod %d [%d] TARGA I/O Method (0=standard, 1=buffered,
- 2=unbuffered, 3=MemoryMap, 4=AsyncBuffered,
- 5=AsyncUnbuffered, default=1)
- and optional chunk size (default=61440)
--tiffIOMethod %d [%d] TIFF I/O Method (0=standard, 1=buffered,
- 2=unbuffered, 3=MemoryMap, 4=AsyncBuffered,
- 5=AsyncUnbuffered, default=1) and optional
- chunk size (default=61440)
--lic %S Use specific license file
--noPrefs Ignore preferences
--resetPrefs Reset preferences to default values
--qtcss %S Use QT style sheet for UI
--qtstyle %S Use QT style
--qtdesktop %d QT desktop aware, default=1 (on)
--xl Aggressively absorb screen space for large media
--mouse %d Force tablet/stylus events to be treated as a
- mouse events, default=0 (off)
--network Start networking
--networkPort %d Port for networking
--networkHost %S Alternate host/address for incoming connections
--networkTag %S Tag to mark automatically saved port file
--networkConnect %S [%d] Start networking and connect to host at port
--networkPerm %d Default network connection permission
- (0=Ask, 1=Allow, 2=Deny, default=0)
--reuse %d Try to re-use the current session for
- incoming URLs (1 = reuse session,
- 0 = new session, default = 1)
--nopackages Don't load any packages at startup (for debugging)
--encodeURL Encode the command line as
- an rvlink URL, print, and exit
--bakeURL Fully bake the command line as an
- rvlink URL, print, and exit
--sendEvent ... Send external events e.g. -sendEvent 'name' 'content'
--flags ... Arbitrary flags (flag, or 'name=value')
- for use in Mu code
--debug ... Debug category
--version Show RV version number
--strictlicense Exit rather than consume an rv license if no rvsolo
- licenses are available
--prefsPath %S Alternate path to preferences directory
--sleep %d Sleep (in seconds) before starting to
- allow attaching debugger
-"""
diff --git a/pype/ftrack/actions/action_set_version.py b/pype/ftrack/actions/action_set_version.py
index f6e745b3ec..5bf965e3ef 100644
--- a/pype/ftrack/actions/action_set_version.py
+++ b/pype/ftrack/actions/action_set_version.py
@@ -71,7 +71,7 @@ class SetVersion(BaseAction):
}
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register action. Called when used as an event plugin.'''
# Validate that session is an instance of ftrack_api.Session. If not,
@@ -80,7 +80,7 @@ def register(session, **kw):
if not isinstance(session, ftrack_api.session.Session):
return
- SetVersion(session).register()
+ SetVersion(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_start_timer.py b/pype/ftrack/actions/action_start_timer.py
index d27908541e..36752a1edc 100644
--- a/pype/ftrack/actions/action_start_timer.py
+++ b/pype/ftrack/actions/action_start_timer.py
@@ -19,61 +19,25 @@ class StartTimer(BaseAction):
entity = entities[0]
if entity.entity_type.lower() != 'task':
return
- self.start_ftrack_timer(entity)
- try:
- self.start_clockify_timer(entity)
- except Exception:
- self.log.warning(
- 'Failed starting Clockify timer for task: ' + entity['name']
- )
+
+ user = self.session.query(
+ "User where username is \"{}\"".format(self.session.api_user)
+ ).one()
+
+ user.start_timer(entity, force=True)
+ self.session.commit()
+
+ self.log.info(
+ "Starting Ftrack timer for task: {}".format(entity['name'])
+ )
+
return
- def start_ftrack_timer(self, task):
- user_query = 'User where username is "{}"'.format(self.session.api_user)
- user = self.session.query(user_query).one()
- self.log.info('Starting Ftrack timer for task: ' + task['name'])
- user.start_timer(task, force=True)
- self.session.commit()
- def start_clockify_timer(self, task):
- # Validate Clockify settings if Clockify is required
- clockify_timer = os.environ.get('CLOCKIFY_WORKSPACE', None)
- if clockify_timer is None:
- return
-
- from pype.clockify import ClockifyAPI
- clockapi = ClockifyAPI()
- if clockapi.verify_api() is False:
- return
- task_type = task['type']['name']
- project_name = task['project']['full_name']
-
- def get_parents(entity):
- output = []
- if entity.entity_type.lower() == 'project':
- return output
- output.extend(get_parents(entity['parent']))
- output.append(entity['name'])
-
- return output
-
- desc_items = get_parents(task['parent'])
- desc_items.append(task['name'])
- description = '/'.join(desc_items)
-
- project_id = clockapi.get_project_id(project_name)
- tag_ids = []
- tag_ids.append(clockapi.get_tag_id(task_type))
- clockapi.start_time_entry(
- description, project_id, tag_ids=tag_ids
- )
- self.log.info('Starting Clockify timer for task: ' + task['name'])
-
-
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- StartTimer(session).register()
+ StartTimer(session, plugins_presets).register()
diff --git a/pype/ftrack/actions/action_sync_hier_attrs_local.py b/pype/ftrack/actions/action_sync_hier_attrs_local.py
index c6b12028bc..01434470f3 100644
--- a/pype/ftrack/actions/action_sync_hier_attrs_local.py
+++ b/pype/ftrack/actions/action_sync_hier_attrs_local.py
@@ -19,16 +19,17 @@ class SyncHierarchicalAttrs(BaseAction):
#: Action identifier.
identifier = 'sync.hierarchical.attrs.local'
#: Action label.
- label = 'Sync HierAttrs - Local'
+ label = "Pype Admin"
+ variant = '- Sync Hier Attrs (Local)'
#: Action description.
description = 'Synchronize hierarchical attributes'
#: Icon
- icon = '{}/ftrack/action_icons/SyncHierarchicalAttrsLocal.svg'.format(
+ icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
#: roles that are allowed to register this action
- role_list = ['Administrator']
+ role_list = ['Pypeclub', 'Administrator', 'Project Manager']
def discover(self, session, entities, event):
''' Validation '''
@@ -41,6 +42,7 @@ class SyncHierarchicalAttrs(BaseAction):
return False
def launch(self, session, entities, event):
+ self.interface_messages = {}
user = session.query(
'User where id is "{}"'.format(event['source']['user']['id'])
).one()
@@ -53,13 +55,27 @@ class SyncHierarchicalAttrs(BaseAction):
})
})
session.commit()
+ self.log.debug('Job with id "{}" created'.format(job['id']))
+
+ process_session = ftrack_api.Session(
+ server_url=session.server_url,
+ api_key=session.api_key,
+ api_user=session.api_user,
+ auto_connect_event_hub=True
+ )
try:
# Collect hierarchical attrs
+ self.log.debug('Collecting Hierarchical custom attributes started')
custom_attributes = {}
- all_avalon_attr = session.query(
+ all_avalon_attr = process_session.query(
'CustomAttributeGroup where name is "avalon"'
).one()
+
+ error_key = (
+ 'Hierarchical attributes with set "default" value (not allowed)'
+ )
+
for cust_attr in all_avalon_attr['custom_attribute_configurations']:
if 'avalon_' in cust_attr['key']:
continue
@@ -68,6 +84,12 @@ class SyncHierarchicalAttrs(BaseAction):
continue
if cust_attr['default']:
+ if error_key not in self.interface_messages:
+ self.interface_messages[error_key] = []
+ self.interface_messages[error_key].append(
+ cust_attr['label']
+ )
+
self.log.warning((
'Custom attribute "{}" has set default value.'
' This attribute can\'t be synchronized'
@@ -76,6 +98,10 @@ class SyncHierarchicalAttrs(BaseAction):
custom_attributes[cust_attr['key']] = cust_attr
+ self.log.debug(
+ 'Collecting Hierarchical custom attributes has finished'
+ )
+
if not custom_attributes:
msg = 'No hierarchical attributes to sync.'
self.log.debug(msg)
@@ -93,28 +119,61 @@ class SyncHierarchicalAttrs(BaseAction):
self.db_con.install()
self.db_con.Session['AVALON_PROJECT'] = project_name
- for entity in entities:
+ _entities = self._get_entities(event, process_session)
+
+ for entity in _entities:
+ self.log.debug(30*'-')
+ self.log.debug(
+ 'Processing entity "{}"'.format(entity.get('name', entity))
+ )
+
+ ent_name = entity.get('name', entity)
+ if entity.entity_type.lower() == 'project':
+ ent_name = entity['full_name']
+
for key in custom_attributes:
+ self.log.debug(30*'*')
+ self.log.debug(
+ 'Processing Custom attribute key "{}"'.format(key)
+ )
# check if entity has that attribute
if key not in entity['custom_attributes']:
- self.log.debug(
- 'Hierachical attribute "{}" not found on "{}"'.format(
- key, entity.get('name', entity)
- )
+ error_key = 'Missing key on entities'
+ if error_key not in self.interface_messages:
+ self.interface_messages[error_key] = []
+
+ self.interface_messages[error_key].append(
+ '- key: "{}" - entity: "{}"'.format(key, ent_name)
)
+
+ self.log.error((
+ '- key "{}" not found on "{}"'
+ ).format(key, ent_name))
continue
value = self.get_hierarchical_value(key, entity)
if value is None:
- self.log.warning(
- 'Hierarchical attribute "{}" not set on "{}"'.format(
- key, entity.get('name', entity)
- )
+ error_key = (
+ 'Missing value for key on entity'
+ ' and its parents (synchronization was skipped)'
)
+ if error_key not in self.interface_messages:
+ self.interface_messages[error_key] = []
+
+ self.interface_messages[error_key].append(
+ '- key: "{}" - entity: "{}"'.format(key, ent_name)
+ )
+
+ self.log.warning((
+ '- key "{}" not set on "{}" or its parents'
+ ).format(key, ent_name))
continue
self.update_hierarchical_attribute(entity, key, value)
+ job['status'] = 'done'
+ session.commit()
+
except Exception:
self.log.error(
'Action "{}" failed'.format(self.label),
@@ -127,6 +186,11 @@ class SyncHierarchicalAttrs(BaseAction):
if job['status'] in ('queued', 'running'):
job['status'] = 'failed'
session.commit()
+ if self.interface_messages:
+ title = "Errors during SyncHierarchicalAttrs"
+ self.show_interface_from_dict(
+ messages=self.interface_messages, title=title, event=event
+ )
return True
@@ -146,6 +210,27 @@ class SyncHierarchicalAttrs(BaseAction):
entity.entity_type.lower() == 'task'
):
return
+
+ ent_name = entity.get('name', entity)
+ if entity.entity_type.lower() == 'project':
+ ent_name = entity['full_name']
+
+ hierarchy = '/'.join(
+ [a['name'] for a in entity.get('ancestors', [])]
+ )
+ if hierarchy:
+ hierarchy = '/'.join(
+ [entity['project']['full_name'], hierarchy, entity['name']]
+ )
+ elif entity.entity_type.lower() == 'project':
+ hierarchy = entity['full_name']
+ else:
+ hierarchy = '/'.join(
+ [entity['project']['full_name'], entity['name']]
+ )
+
+ self.log.debug('- updating entity "{}"'.format(hierarchy))
+
# collect entity's custom attributes
custom_attributes = entity.get('custom_attributes')
if not custom_attributes:
@@ -153,24 +238,49 @@ class SyncHierarchicalAttrs(BaseAction):
mongoid = custom_attributes.get(self.ca_mongoid)
if not mongoid:
- self.log.debug('Entity "{}" is not synchronized to avalon.'.format(
- entity.get('name', entity)
- ))
+ error_key = 'Missing MongoID on entities (try SyncToAvalon first)'
+ if error_key not in self.interface_messages:
+ self.interface_messages[error_key] = []
+
+ if ent_name not in self.interface_messages[error_key]:
+ self.interface_messages[error_key].append(ent_name)
+
+ self.log.warning(
+ '-- entity "{}" is not synchronized to avalon. Skipping'.format(
+ ent_name
+ )
+ )
return
try:
mongoid = ObjectId(mongoid)
except Exception:
- self.log.warning('Entity "{}" has stored invalid MongoID.'.format(
- entity.get('name', entity)
- ))
+ error_key = 'Invalid MongoID on entities (try SyncToAvalon)'
+ if error_key not in self.interface_messages:
+ self.interface_messages[error_key] = []
+
+ if ent_name not in self.interface_messages[error_key]:
+ self.interface_messages[error_key].append(ent_name)
+
+ self.log.warning(
+ '-- entity "{}" has stored invalid MongoID. Skipping'.format(
+ ent_name
+ )
+ )
return
# Find entity in Mongo DB
mongo_entity = self.db_con.find_one({'_id': mongoid})
if not mongo_entity:
+ error_key = 'Entities not found in Avalon DB (try SyncToAvalon)'
+ if error_key not in self.interface_messages:
+ self.interface_messages[error_key] = []
+
+ if ent_name not in self.interface_messages[error_key]:
+ self.interface_messages[error_key].append(ent_name)
+
self.log.warning(
- 'Entity "{}" is not synchronized to avalon.'.format(
- entity.get('name', entity)
+ '-- entity "{}" was not found in DB by id "{}". Skipping'.format(
+ ent_name, str(mongoid)
)
)
return
@@ -188,17 +298,21 @@ class SyncHierarchicalAttrs(BaseAction):
{'$set': {'data': data}}
)
+ self.log.debug(
+ '-- stored value "{}"'.format(value)
+ )
+
for child in entity.get('children', []):
self.update_hierarchical_attribute(child, key, value)
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- SyncHierarchicalAttrs(session).register()
+ SyncHierarchicalAttrs(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_sync_to_avalon_local.py b/pype/ftrack/actions/action_sync_to_avalon_local.py
index 34070c7e1f..6a43688026 100644
--- a/pype/ftrack/actions/action_sync_to_avalon_local.py
+++ b/pype/ftrack/actions/action_sync_to_avalon_local.py
@@ -47,11 +47,12 @@ class SyncToAvalon(BaseAction):
#: Action identifier.
identifier = 'sync.to.avalon.local'
#: Action label.
- label = 'SyncToAvalon - Local'
+ label = "Pype Admin"
+ variant = '- Sync To Avalon (Local)'
#: Action description.
description = 'Send data from Ftrack to Avalon'
#: Action icon.
- icon = '{}/ftrack/action_icons/SyncToAvalon-local.svg'.format(
+ icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
#: roles that are allowed to register this action
@@ -59,7 +60,7 @@ class SyncToAvalon(BaseAction):
#: Action priority
priority = 200
- def __init__(self, session):
+ def __init__(self, session, plugins_presets):
super(SyncToAvalon, self).__init__(session)
# reload utils on initialize (in case of server restart)
@@ -177,17 +178,7 @@ class SyncToAvalon(BaseAction):
job['status'] = 'failed'
session.commit()
- event = fa_session.ftrack_api.event.base.Event(
- topic='ftrack.action.launch',
- data=dict(
- actionIdentifier='sync.hierarchical.attrs.local',
- selection=event['data']['selection']
- ),
- source=dict(
- user=event['source']['user']
- )
- )
- session.event_hub.publish(event, on_error='ignore')
+ self.trigger_action("sync.hierarchical.attrs.local", event)
if len(message) > 0:
message = "Unable to sync: {}".format(message)
@@ -212,7 +203,7 @@ class SyncToAvalon(BaseAction):
self.add_childs_to_importable(child)
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
# Validate that session is an instance of ftrack_api.Session. If not,
@@ -221,7 +212,7 @@ def register(session, **kw):
if not isinstance(session, ftrack_api.session.Session):
return
- SyncToAvalon(session).register()
+ SyncToAvalon(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_test.py b/pype/ftrack/actions/action_test.py
index dcb9dd32d0..a2bc8bf892 100644
--- a/pype/ftrack/actions/action_test.py
+++ b/pype/ftrack/actions/action_test.py
@@ -11,12 +11,10 @@ from pype.ftrack import BaseAction
from avalon import io, inventory, schema
-ignore_me = True
-
-
class TestAction(BaseAction):
'''Edit meta data action.'''
+ ignore_me = True
#: Action identifier.
identifier = 'test.action'
#: Action label.
@@ -42,13 +40,13 @@ class TestAction(BaseAction):
return True
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- TestAction(session).register()
+ TestAction(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_thumbToChildern.py b/pype/ftrack/actions/action_thumbnail_to_childern.py
similarity index 92%
rename from pype/ftrack/actions/action_thumbToChildern.py
rename to pype/ftrack/actions/action_thumbnail_to_childern.py
index 4e7f1298f5..101b678512 100644
--- a/pype/ftrack/actions/action_thumbToChildern.py
+++ b/pype/ftrack/actions/action_thumbnail_to_childern.py
@@ -14,9 +14,11 @@ class ThumbToChildren(BaseAction):
# Action identifier
identifier = 'thumb.to.children'
# Action label
- label = 'Thumbnail to Children'
+ label = 'Thumbnail'
+ # Action variant
+ variant = " to Children"
# Action icon
- icon = '{}/ftrack/action_icons/thumbToChildren.svg'.format(
+ icon = '{}/ftrack/action_icons/Thumbnail.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
@@ -64,12 +66,12 @@ class ThumbToChildren(BaseAction):
}
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register action. Called when used as an event plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- ThumbToChildren(session).register()
+ ThumbToChildren(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_thumbToParent.py b/pype/ftrack/actions/action_thumbnail_to_parent.py
similarity index 94%
rename from pype/ftrack/actions/action_thumbToParent.py
rename to pype/ftrack/actions/action_thumbnail_to_parent.py
index 632d2a50b2..c382d9303c 100644
--- a/pype/ftrack/actions/action_thumbToParent.py
+++ b/pype/ftrack/actions/action_thumbnail_to_parent.py
@@ -13,9 +13,11 @@ class ThumbToParent(BaseAction):
# Action identifier
identifier = 'thumb.to.parent'
# Action label
- label = 'Thumbnail to Parent'
+ label = 'Thumbnail'
+ # Action variant
+ variant = " to Parent"
# Action icon
- icon = '{}/ftrack/action_icons/thumbToParent.svg'.format(
+ icon = '{}/ftrack/action_icons/Thumbnail.svg'.format(
os.environ.get('PYPE_STATICS_SERVER', '')
)
@@ -86,12 +88,12 @@ class ThumbToParent(BaseAction):
}
-def register(session, **kw):
+def register(session, plugins_presets={}):
'''Register action. Called when used as an event plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- ThumbToParent(session).register()
+ ThumbToParent(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/actions/action_where_run_ask.py b/pype/ftrack/actions/action_where_run_ask.py
new file mode 100644
index 0000000000..0351c09909
--- /dev/null
+++ b/pype/ftrack/actions/action_where_run_ask.py
@@ -0,0 +1,46 @@
+import os
+from pype.vendor import ftrack_api
+from pype.ftrack import BaseAction
+from pype.vendor.ftrack_api import session as fa_session
+
+
+class ActionAskWhereIRun(BaseAction):
+ """ Sometimes user forget where pipeline with his credentials is running.
+ - this action triggers `ActionShowWhereIRun`
+ """
+ # Action is ignored by default
+ ignore_me = True
+ #: Action identifier.
+ identifier = 'ask.where.i.run'
+ #: Action label.
+ label = 'Ask where I run'
+ #: Action description.
+ description = 'Triggers PC info where user have running Pype'
+ #: Action icon
+ icon = '{}/ftrack/action_icons/ActionAskWhereIRun.svg'.format(
+ os.environ.get('PYPE_STATICS_SERVER', '')
+ )
+
+ def discover(self, session, entities, event):
+ """ Hide by default - Should be enabled only if you want to run.
+ - best practise is to create another action that triggers this one
+ """
+
+ return True
+
+ def launch(self, session, entities, event):
+ more_data = {"event_hub_id": session.event_hub.id}
+ self.trigger_action(
+ "show.where.i.run", event, additional_event_data=more_data
+ )
+
+ return True
+
+
+def register(session, plugins_presets={}):
+ '''Register plugin. Called when used as an plugin.'''
+
+ if not isinstance(session, ftrack_api.session.Session):
+ return
+
+ ActionAskWhereIRun(session, plugins_presets).register()
diff --git a/pype/ftrack/actions/action_where_run_show.py b/pype/ftrack/actions/action_where_run_show.py
new file mode 100644
index 0000000000..7fea23e3b7
--- /dev/null
+++ b/pype/ftrack/actions/action_where_run_show.py
@@ -0,0 +1,86 @@
+import platform
+import socket
+import getpass
+from pype.vendor import ftrack_api
+from pype.ftrack import BaseAction
+
+
+class ActionShowWhereIRun(BaseAction):
+ """ Sometimes user forget where pipeline with his credentials is running.
+ - this action shows on which PC, Username and IP is running
+ - requirement action MUST be registered where we want to locate the PC:
+ - - can't be used retrospectively...
+ """
+ #: Action identifier.
+ identifier = 'show.where.i.run'
+ #: Action label.
+ label = 'Show where I run'
+ #: Action description.
+ description = 'Shows PC info where user have running Pype'
+
+ def discover(self, session, entities, event):
+ """ Hide by default - Should be enabled only if you want to run.
+ - best practise is to create another action that triggers this one
+ """
+
+ return False
+
+ def launch(self, session, entities, event):
+ # Don't show info when was launch from this session
+ if session.event_hub.id == event.get("data", {}).get("event_hub_id"):
+ return True
+
+ title = "Where Do I Run?"
+ msgs = {}
+ all_keys = ["Hostname", "IP", "Username", "System name", "PC name"]
+ try:
+ host_name = socket.gethostname()
+ msgs["Hostname"] = host_name
+ host_ip = socket.gethostbyname(host_name)
+ msgs["IP"] = host_ip
+ except Exception:
+ pass
+
+ try:
+ system_name, pc_name, *_ = platform.uname()
+ msgs["System name"] = system_name
+ msgs["PC name"] = pc_name
+ except Exception:
+ pass
+
+ try:
+ msgs["Username"] = getpass.getuser()
+ except Exception:
+ pass
+
+ for key in all_keys:
+ if not msgs.get(key):
+ msgs[key] = "-Undefined-"
+
+ items = []
+ first = True
+ splitter = {'type': 'label', 'value': '---'}
+ for key, value in msgs.items():
+ if first:
+ first = False
+ else:
+ items.append(splitter)
+ self.log.debug("{}: {}".format(key, value))
+
+ subtitle = {'type': 'label', 'value': '{}
'.format(key)}
+ items.append(subtitle)
+ message = {'type': 'label', 'value': '{}
'.format(value)}
+ items.append(message)
+
+ self.show_interface(items, title, event=event)
+
+ return True
+
+
+def register(session, plugins_presets={}):
+ '''Register plugin. Called when used as an plugin.'''
+
+ if not isinstance(session, ftrack_api.session.Session):
+ return
+
+ ActionShowWhereIRun(session, plugins_presets).register()
diff --git a/pype/ftrack/events/action_sync_hier_attrs.py b/pype/ftrack/events/action_sync_hier_attrs.py
index 7fa024edf4..22ad7bf5aa 100644
--- a/pype/ftrack/events/action_sync_hier_attrs.py
+++ b/pype/ftrack/events/action_sync_hier_attrs.py
@@ -20,11 +20,12 @@ class SyncHierarchicalAttrs(BaseAction):
#: Action identifier.
identifier = 'sync.hierarchical.attrs'
#: Action label.
- label = 'Sync HierAttrs'
+ label = "Pype Admin"
+ variant = '- Sync Hier Attrs (server)'
#: Action description.
description = 'Synchronize hierarchical attributes'
#: Icon
- icon = '{}/ftrack/action_icons/SyncHierarchicalAttrs.svg'.format(
+ icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get(
'PYPE_STATICS_SERVER',
'http://localhost:{}'.format(
@@ -61,7 +62,7 @@ class SyncHierarchicalAttrs(BaseAction):
if role['security_role']['name'] in role_list:
role_check = True
break
- print(self.icon)
+
if role_check is True:
for entity in entities:
context_type = entity.get('context_type', '').lower()
@@ -75,6 +76,8 @@ class SyncHierarchicalAttrs(BaseAction):
return discover
def launch(self, session, entities, event):
+ self.interface_messages = {}
+
user = session.query(
'User where id is "{}"'.format(event['source']['user']['id'])
).one()
@@ -87,13 +90,26 @@ class SyncHierarchicalAttrs(BaseAction):
})
})
session.commit()
+ self.log.debug('Job with id "{}" created'.format(job['id']))
+ process_session = ftrack_api.Session(
+ server_url=session.server_url,
+ api_key=session.api_key,
+ api_user=session.api_user,
+ auto_connect_event_hub=True
+ )
try:
# Collect hierarchical attrs
+ self.log.debug('Collecting Hierarchical custom attributes started')
custom_attributes = {}
- all_avalon_attr = session.query(
+ all_avalon_attr = process_session.query(
'CustomAttributeGroup where name is "avalon"'
).one()
+
+ error_key = (
+ 'Hierarchical attributes with set "default" value (not allowed)'
+ )
+
for cust_attr in all_avalon_attr['custom_attribute_configurations']:
if 'avalon_' in cust_attr['key']:
continue
@@ -102,6 +118,12 @@ class SyncHierarchicalAttrs(BaseAction):
continue
if cust_attr['default']:
+ if error_key not in self.interface_messages:
+ self.interface_messages[error_key] = []
+ self.interface_messages[error_key].append(
+ cust_attr['label']
+ )
+
self.log.warning((
'Custom attribute "{}" has set default value.'
' This attribute can\'t be synchronized'
@@ -110,6 +132,10 @@ class SyncHierarchicalAttrs(BaseAction):
custom_attributes[cust_attr['key']] = cust_attr
+ self.log.debug(
+ 'Collecting Hierarchical custom attributes has finished'
+ )
+
if not custom_attributes:
msg = 'No hierarchical attributes to sync.'
self.log.debug(msg)
@@ -127,28 +153,61 @@ class SyncHierarchicalAttrs(BaseAction):
self.db_con.install()
self.db_con.Session['AVALON_PROJECT'] = project_name
- for entity in entities:
+ _entities = self._get_entities(event, process_session)
+
+ for entity in _entities:
+ self.log.debug(30*'-')
+ self.log.debug(
+ 'Processing entity "{}"'.format(entity.get('name', entity))
+ )
+
+ ent_name = entity.get('name', entity)
+ if entity.entity_type.lower() == 'project':
+ ent_name = entity['full_name']
+
for key in custom_attributes:
+ self.log.debug(30*'*')
+ self.log.debug(
+ 'Processing Custom attribute key "{}"'.format(key)
+ )
# check if entity has that attribute
if key not in entity['custom_attributes']:
- self.log.debug(
- 'Hierachical attribute "{}" not found on "{}"'.format(
- key, entity.get('name', entity)
- )
+ error_key = 'Missing key on entities'
+ if error_key not in self.interface_messages:
+ self.interface_messages[error_key] = []
+
+ self.interface_messages[error_key].append(
+ '- key: "{}" - entity: "{}"'.format(key, ent_name)
)
+
+ self.log.error((
+ '- key "{}" not found on "{}"'
+ ).format(key, entity.get('name', entity)))
continue
value = self.get_hierarchical_value(key, entity)
if value is None:
- self.log.warning(
- 'Hierarchical attribute "{}" not set on "{}"'.format(
- key, entity.get('name', entity)
- )
+ error_key = (
+ 'Missing value for key on entity'
+ ' and its parents (synchronization was skipped)'
)
+ if error_key not in self.interface_messages:
+ self.interface_messages[error_key] = []
+
+ self.interface_messages[error_key].append(
+ '- key: "{}" - entity: "{}"'.format(key, ent_name)
+ )
+
+ self.log.warning((
+ '- key "{}" not set on "{}" or its parents'
+ ).format(key, ent_name))
continue
self.update_hierarchical_attribute(entity, key, value)
+ job['status'] = 'done'
+ session.commit()
+
except Exception:
self.log.error(
'Action "{}" failed'.format(self.label),
@@ -161,6 +220,9 @@ class SyncHierarchicalAttrs(BaseAction):
if job['status'] in ('queued', 'running'):
job['status'] = 'failed'
session.commit()
+
+ if self.interface_messages:
+ self.show_interface_from_dict(self.interface_messages, event)
return True
@@ -180,6 +242,27 @@ class SyncHierarchicalAttrs(BaseAction):
entity.entity_type.lower() == 'task'
):
return
+
+ ent_name = entity.get('name', entity)
+ if entity.entity_type.lower() == 'project':
+ ent_name = entity['full_name']
+
+ hierarchy = '/'.join(
+ [a['name'] for a in entity.get('ancestors', [])]
+ )
+ if hierarchy:
+ hierarchy = '/'.join(
+ [entity['project']['full_name'], hierarchy, entity['name']]
+ )
+ elif entity.entity_type.lower() == 'project':
+ hierarchy = entity['full_name']
+ else:
+ hierarchy = '/'.join(
+ [entity['project']['full_name'], entity['name']]
+ )
+
+ self.log.debug('- updating entity "{}"'.format(hierarchy))
+
# collect entity's custom attributes
custom_attributes = entity.get('custom_attributes')
if not custom_attributes:
@@ -187,24 +270,49 @@ class SyncHierarchicalAttrs(BaseAction):
mongoid = custom_attributes.get(self.ca_mongoid)
if not mongoid:
- self.log.debug('Entity "{}" is not synchronized to avalon.'.format(
- entity.get('name', entity)
- ))
+ error_key = 'Missing MongoID on entities (try SyncToAvalon first)'
+ if error_key not in self.interface_messages:
+ self.interface_messages[error_key] = []
+
+ if ent_name not in self.interface_messages[error_key]:
+ self.interface_messages[error_key].append(ent_name)
+
+ self.log.warning(
+ '-- entity "{}" is not synchronized to avalon. Skipping'.format(
+ ent_name
+ )
+ )
return
try:
mongoid = ObjectId(mongoid)
except Exception:
- self.log.warning('Entity "{}" has stored invalid MongoID.'.format(
- entity.get('name', entity)
- ))
+ error_key = 'Invalid MongoID on entities (try SyncToAvalon)'
+ if error_key not in self.interface_messages:
+ self.interface_messages[error_key] = []
+
+ if ent_name not in self.interface_messages[error_key]:
+ self.interface_messages[error_key].append(ent_name)
+
+ self.log.warning(
+ '-- entity "{}" has stored invalid MongoID. Skipping'.format(
+ ent_name
+ )
+ )
return
# Find entity in Mongo DB
mongo_entity = self.db_con.find_one({'_id': mongoid})
if not mongo_entity:
+ error_key = 'Entities not found in Avalon DB (try SyncToAvalon)'
+ if error_key not in self.interface_messages:
+ self.interface_messages[error_key] = []
+
+ if ent_name not in self.interface_messages[error_key]:
+ self.interface_messages[error_key].append(ent_name)
+
self.log.warning(
- 'Entity "{}" is not synchronized to avalon.'.format(
- entity.get('name', entity)
+ '-- entity "{}" was not found in DB by id "{}". Skipping'.format(
+ ent_name, str(mongoid)
)
)
return
@@ -226,13 +334,13 @@ class SyncHierarchicalAttrs(BaseAction):
self.update_hierarchical_attribute(child, key, value)
-def register(session, **kw):
+def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- SyncHierarchicalAttrs(session).register()
+ SyncHierarchicalAttrs(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/events/action_sync_to_avalon.py b/pype/ftrack/events/action_sync_to_avalon.py
index e78b209fac..51ffbc66d9 100644
--- a/pype/ftrack/events/action_sync_to_avalon.py
+++ b/pype/ftrack/events/action_sync_to_avalon.py
@@ -48,11 +48,12 @@ class Sync_To_Avalon(BaseAction):
#: Action identifier.
identifier = 'sync.to.avalon'
#: Action label.
- label = 'SyncToAvalon'
+ label = "Pype Admin"
+ variant = "- Sync To Avalon (Server)"
#: Action description.
description = 'Send data from Ftrack to Avalon'
#: Action icon.
- icon = '{}/ftrack/action_icons/SyncToAvalon.svg'.format(
+ icon = '{}/ftrack/action_icons/PypeAdmin.svg'.format(
os.environ.get(
'PYPE_STATICS_SERVER',
'http://localhost:{}'.format(
@@ -206,18 +207,8 @@ class Sync_To_Avalon(BaseAction):
job['status'] = 'failed'
session.commit()
-
- event = fa_session.ftrack_api.event.base.Event(
- topic='ftrack.action.launch',
- data=dict(
- actionIdentifier='sync.hierarchical.attrs',
- selection=event['data']['selection']
- ),
- source=dict(
- user=event['source']['user']
- )
- )
- session.event_hub.publish(event, on_error='ignore')
+
+ self.trigger_action("sync.hierarchical.attrs", event)
if len(message) > 0:
message = "Unable to sync: {}".format(message)
@@ -242,7 +233,7 @@ class Sync_To_Avalon(BaseAction):
self.add_childs_to_importable(child)
-def register(session, **kw):
+def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
# Validate that session is an instance of ftrack_api.Session. If not,
@@ -251,7 +242,7 @@ def register(session, **kw):
if not isinstance(session, ftrack_api.session.Session):
return
- Sync_To_Avalon(session).register()
+ SyncToAvalon(session, plugins_presets).register()
def main(arguments=None):
diff --git a/pype/ftrack/events/event_del_avalon_id_from_new.py b/pype/ftrack/events/event_del_avalon_id_from_new.py
index f27a329429..6f6320f51b 100644
--- a/pype/ftrack/events/event_del_avalon_id_from_new.py
+++ b/pype/ftrack/events/event_del_avalon_id_from_new.py
@@ -51,9 +51,9 @@ class DelAvalonIdFromNew(BaseEvent):
continue
-def register(session, **kw):
+def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- DelAvalonIdFromNew(session).register()
+ DelAvalonIdFromNew(session, plugins_presets).register()
diff --git a/pype/ftrack/events/event_next_task_update.py b/pype/ftrack/events/event_next_task_update.py
index 1ae06050bc..e25514a2b4 100644
--- a/pype/ftrack/events/event_next_task_update.py
+++ b/pype/ftrack/events/event_next_task_update.py
@@ -86,9 +86,9 @@ class NextTaskUpdate(BaseEvent):
session.rollback()
-def register(session, **kw):
+def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- NextTaskUpdate(session).register()
+ NextTaskUpdate(session, plugins_presets).register()
diff --git a/pype/ftrack/events/event_radio_buttons.py b/pype/ftrack/events/event_radio_buttons.py
index 769115f045..9c6f2d490a 100644
--- a/pype/ftrack/events/event_radio_buttons.py
+++ b/pype/ftrack/events/event_radio_buttons.py
@@ -2,11 +2,10 @@ from pype.vendor import ftrack_api
from pype.ftrack import BaseEvent
-ignore_me = True
-
-
class Radio_buttons(BaseEvent):
+ ignore_me = True
+
def launch(self, session, event):
'''Provides a readio button behaviour to any bolean attribute in
radio_button group.'''
@@ -34,9 +33,10 @@ class Radio_buttons(BaseEvent):
session.commit()
-def register(session):
+
+def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- Radio_buttons(session).register()
+ Radio_buttons(session, plugins_presets).register()
diff --git a/pype/ftrack/events/event_sync_hier_attr.py b/pype/ftrack/events/event_sync_hier_attr.py
index 867e2cde2b..7c5c4b820b 100644
--- a/pype/ftrack/events/event_sync_hier_attr.py
+++ b/pype/ftrack/events/event_sync_hier_attr.py
@@ -115,9 +115,9 @@ class SyncHierarchicalAttrs(BaseEvent):
self.update_hierarchical_attribute(child, key, value)
-def register(session, **kw):
+def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- SyncHierarchicalAttrs(session).register()
+ SyncHierarchicalAttrs(session, plugins_presets).register()
diff --git a/pype/ftrack/events/event_sync_to_avalon.py b/pype/ftrack/events/event_sync_to_avalon.py
index f6b2b48a1f..ae7ebbbf90 100644
--- a/pype/ftrack/events/event_sync_to_avalon.py
+++ b/pype/ftrack/events/event_sync_to_avalon.py
@@ -16,7 +16,7 @@ class Sync_to_Avalon(BaseEvent):
# If mongo_id textfield has changed: RETURN!
# - infinite loop
for ent in event['data']['entities']:
- if 'keys' in ent:
+ if ent.get('keys') is not None:
if ca_mongoid in ent['keys']:
return
@@ -109,19 +109,19 @@ class Sync_to_Avalon(BaseEvent):
' for more information.'
)
items = [
- {'type': 'label', 'value':'# Fatal Error'},
+ {'type': 'label', 'value': '# Fatal Error'},
{'type': 'label', 'value': '{}
'.format(ftrack_message)}
]
- self.show_interface(event, items, title)
+ self.show_interface(items, title, event=event)
self.log.error('Fatal error during sync: {}'.format(message))
return
-def register(session, **kw):
+def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- Sync_to_Avalon(session).register()
+ Sync_to_Avalon(session, plugins_presets).register()
diff --git a/pype/ftrack/events/event_test.py b/pype/ftrack/events/event_test.py
index f6746f2535..94d99dbf67 100644
--- a/pype/ftrack/events/event_test.py
+++ b/pype/ftrack/events/event_test.py
@@ -5,11 +5,10 @@ from pype.vendor import ftrack_api
from pype.ftrack import BaseEvent
-ignore_me = True
-
-
class Test_Event(BaseEvent):
+ ignore_me = True
+
priority = 10000
def launch(self, session, event):
@@ -21,9 +20,9 @@ class Test_Event(BaseEvent):
return True
-def register(session, **kw):
+def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- Test_Event(session).register()
+ Test_Event(session, plugins_presets).register()
diff --git a/pype/ftrack/events/event_thumbnail_updates.py b/pype/ftrack/events/event_thumbnail_updates.py
index 042f6cc600..7f52177161 100644
--- a/pype/ftrack/events/event_thumbnail_updates.py
+++ b/pype/ftrack/events/event_thumbnail_updates.py
@@ -45,9 +45,9 @@ class ThumbnailEvents(BaseEvent):
pass
-def register(session, **kw):
+def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- ThumbnailEvents(session).register()
+ ThumbnailEvents(session, plugins_presets).register()
diff --git a/pype/ftrack/events/event_user_assigment.py b/pype/ftrack/events/event_user_assigment.py
index 0bb7f21590..3e250b988a 100644
--- a/pype/ftrack/events/event_user_assigment.py
+++ b/pype/ftrack/events/event_user_assigment.py
@@ -229,11 +229,11 @@ class UserAssigmentEvent(BaseEvent):
return True
-def register(session, **kw):
+def register(session, plugins_presets):
"""
Register plugin. Called when used as an plugin.
"""
if not isinstance(session, ftrack_api.session.Session):
return
- UserAssigmentEvent(session).register()
+ UserAssigmentEvent(session, plugins_presets).register()
diff --git a/pype/ftrack/events/event_version_to_task_statuses.py b/pype/ftrack/events/event_version_to_task_statuses.py
index 8b14e025d3..306d594647 100644
--- a/pype/ftrack/events/event_version_to_task_statuses.py
+++ b/pype/ftrack/events/event_version_to_task_statuses.py
@@ -69,9 +69,9 @@ class VersionToTaskStatus(BaseEvent):
path, task_status['name']))
-def register(session, **kw):
+def register(session, plugins_presets):
'''Register plugin. Called when used as an plugin.'''
if not isinstance(session, ftrack_api.session.Session):
return
- VersionToTaskStatus(session).register()
+ VersionToTaskStatus(session, plugins_presets).register()
diff --git a/pype/ftrack/ftrack_server/ftrack_server.py b/pype/ftrack/ftrack_server/ftrack_server.py
index 14dd3d11f7..2a58c12d09 100644
--- a/pype/ftrack/ftrack_server/ftrack_server.py
+++ b/pype/ftrack/ftrack_server/ftrack_server.py
@@ -5,7 +5,9 @@ import importlib
from pype.vendor import ftrack_api
import time
import logging
-from pypeapp import Logger
+import inspect
+from pypeapp import Logger, config
+
log = Logger().get_logger(__name__)
@@ -27,8 +29,8 @@ PYTHONPATH # Path to ftrack_api and paths to all modules used in actions
"""
-class FtrackServer():
- def __init__(self, type='action'):
+class FtrackServer:
+ def __init__(self, server_type='action'):
"""
- 'type' is by default set to 'action' - Runs Action server
- enter 'event' for Event server
@@ -43,21 +45,12 @@ class FtrackServer():
ftrack_log = logging.getLogger("ftrack_api")
ftrack_log.setLevel(logging.WARNING)
- self.type = type
- self.actionsAvailable = True
- self.eventsAvailable = True
- # Separate all paths
- if "FTRACK_ACTIONS_PATH" in os.environ:
- all_action_paths = os.environ["FTRACK_ACTIONS_PATH"]
- self.actionsPaths = all_action_paths.split(os.pathsep)
- else:
- self.actionsAvailable = False
+ env_key = "FTRACK_ACTIONS_PATH"
+ if server_type.lower() == 'event':
+ env_key = "FTRACK_EVENTS_PATH"
- if "FTRACK_EVENTS_PATH" in os.environ:
- all_event_paths = os.environ["FTRACK_EVENTS_PATH"]
- self.eventsPaths = all_event_paths.split(os.pathsep)
- else:
- self.eventsAvailable = False
+ self.server_type = server_type
+ self.env_key = env_key
def stop_session(self):
if self.session.event_hub.connected is True:
@@ -67,7 +60,7 @@ class FtrackServer():
def set_files(self, paths):
# Iterate all paths
- functions = []
+ register_functions_dict = []
for path in paths:
# add path to PYTHON PATH
if path not in sys.path:
@@ -80,32 +73,23 @@ class FtrackServer():
if '.pyc' in file or '.py' not in file:
continue
- ignore = 'ignore_me'
mod = importlib.import_module(os.path.splitext(file)[0])
importlib.reload(mod)
mod_functions = dict(
[
(name, function)
for name, function in mod.__dict__.items()
- if isinstance(function, types.FunctionType) or
- name == ignore
+ if isinstance(function, types.FunctionType)
]
)
- # Don't care about ignore_me files
- if (
- ignore in mod_functions and
- mod_functions[ignore] is True
- ):
- continue
+
# separate files by register function
if 'register' not in mod_functions:
- msg = (
- '"{0}" - Missing register method'
- ).format(file, self.type)
+ msg = ('"{}" - Missing register method').format(file)
log.warning(msg)
continue
- functions.append({
+ register_functions_dict.append({
'name': file,
'register': mod_functions['register']
})
@@ -115,43 +99,47 @@ class FtrackServer():
)
log.warning(msg)
- if len(functions) < 1:
+ if len(register_functions_dict) < 1:
raise Exception
+ # Load presets for setting plugins
+ key = "user"
+ if self.server_type.lower() == "event":
+ key = "server"
+ plugins_presets = config.get_presets().get(
+ "ftrack", {}
+ ).get("plugins", {}).get(key, {})
+
function_counter = 0
- for function in functions:
+ for function_dict in register_functions_dict:
+ register = function_dict["register"]
try:
- function['register'](self.session)
+ if len(inspect.signature(register).parameters) == 1:
+ register(self.session)
+ else:
+ register(self.session, plugins_presets=plugins_presets)
+
if function_counter%7 == 0:
time.sleep(0.1)
function_counter += 1
- except Exception as e:
+ except Exception as exc:
msg = '"{}" - register was not successful ({})'.format(
- function['name'], str(e)
+ function_dict['name'], str(exc)
)
log.warning(msg)
def run_server(self):
self.session = ftrack_api.Session(auto_connect_event_hub=True,)
- if self.type.lower() == 'event':
- if self.eventsAvailable is False:
- msg = (
- 'FTRACK_EVENTS_PATH is not set'
- ', event server won\'t launch'
- )
- log.error(msg)
- return
- self.set_files(self.eventsPaths)
- else:
- if self.actionsAvailable is False:
- msg = (
- 'FTRACK_ACTIONS_PATH is not set'
- ', action server won\'t launch'
- )
- log.error(msg)
- return
- self.set_files(self.actionsPaths)
+ paths_str = os.environ.get(self.env_key)
+ if paths_str is None:
+ log.error((
+ "Env var \"{}\" is not set, \"{}\" server won\'t launch"
+ ).format(self.env_key, self.server_type))
+ return
+
+ paths = paths_str.split(os.pathsep)
+ self.set_files(paths)
log.info(60*"*")
log.info('Registration of actions/events has finished!')
diff --git a/pype/ftrack/lib/avalon_sync.py b/pype/ftrack/lib/avalon_sync.py
index 4eaf28eae4..169bc4b051 100644
--- a/pype/ftrack/lib/avalon_sync.py
+++ b/pype/ftrack/lib/avalon_sync.py
@@ -1,14 +1,13 @@
import os
import re
import json
-from pype import lib as pypelib
from pype.lib import get_avalon_database
from bson.objectid import ObjectId
import avalon
import avalon.api
from avalon import schema
from avalon.vendor import toml, jsonschema
-from pypeapp import Logger
+from pypeapp import Logger, Anatomy, config
ValidationError = jsonschema.ValidationError
@@ -53,8 +52,8 @@ def import_to_avalon(
if entity_type in ['Project']:
type = 'project'
- config = get_project_config(entity)
- schema.validate(config)
+ proj_config = get_project_config(entity)
+ schema.validate(proj_config)
av_project_code = None
if av_project is not None and 'code' in av_project['data']:
@@ -62,13 +61,12 @@ def import_to_avalon(
ft_project_code = ft_project['name']
if av_project is None:
- project_schema = pypelib.get_avalon_project_template_schema()
item = {
- 'schema': project_schema,
+ 'schema': "avalon-core:project-2.0",
'type': type,
'name': project_name,
'data': dict(),
- 'config': config,
+ 'config': proj_config,
'parent': None,
}
schema.validate(item)
@@ -118,13 +116,13 @@ def import_to_avalon(
# not override existing templates!
templates = av_project['config'].get('template', None)
if templates is not None:
- for key, value in config['template'].items():
+ for key, value in proj_config['template'].items():
if (
key in templates and
templates[key] is not None and
templates[key] != value
):
- config['template'][key] = templates[key]
+ proj_config['template'][key] = templates[key]
projectId = av_project['_id']
@@ -144,7 +142,7 @@ def import_to_avalon(
{'_id': ObjectId(projectId)},
{'$set': {
'name': project_name,
- 'config': config,
+ 'config': proj_config,
'data': data
}}
)
@@ -214,9 +212,8 @@ def import_to_avalon(
{'type': 'asset', 'name': name}
)
if avalon_asset is None:
- asset_schema = pypelib.get_avalon_asset_template_schema()
item = {
- 'schema': asset_schema,
+ 'schema': "avalon-core:asset-2.0",
'name': name,
'silo': silo,
'parent': ObjectId(projectId),
@@ -329,13 +326,26 @@ def import_to_avalon(
return output
-def get_avalon_attr(session):
+def get_avalon_attr(session, split_hierarchical=False):
custom_attributes = []
+ hier_custom_attributes = []
query = 'CustomAttributeGroup where name is "avalon"'
all_avalon_attr = session.query(query).one()
for cust_attr in all_avalon_attr['custom_attribute_configurations']:
- if 'avalon_' not in cust_attr['key']:
- custom_attributes.append(cust_attr)
+ if 'avalon_' in cust_attr['key']:
+ continue
+
+ if split_hierarchical:
+ if cust_attr["is_hierarchical"]:
+ hier_custom_attributes.append(cust_attr)
+ continue
+
+ custom_attributes.append(cust_attr)
+
+ if split_hierarchical:
+ # return tuple
+ return custom_attributes, hier_custom_attributes
+
return custom_attributes
@@ -345,13 +355,12 @@ def changeability_check_childs(entity):
childs = entity['children']
for child in childs:
if child.entity_type.lower() == 'task':
- config = get_config_data()
- if 'sync_to_avalon' in config:
- config = config['sync_to_avalon']
- if 'statuses_name_change' in config:
- available_statuses = config['statuses_name_change']
- else:
- available_statuses = []
+ available_statuses = config.get_presets().get(
+ "ftrack", {}).get(
+ "ftrack_config", {}).get(
+ "sync_to_avalon", {}).get(
+ "statuses_name_change", []
+ )
ent_status = child['status']['name'].lower()
if ent_status not in available_statuses:
return False
@@ -480,14 +489,28 @@ def get_avalon_project(ft_project):
return avalon_project
-def get_project_config(entity):
- config = {}
- config['schema'] = pypelib.get_avalon_project_config_schema()
- config['tasks'] = get_tasks(entity)
- config['apps'] = get_project_apps(entity)
- config['template'] = pypelib.get_avalon_project_template()
+def get_avalon_project_template():
+ """Get avalon template
- return config
+ Returns:
+ dictionary with templates
+ """
+ templates = Anatomy().templates
+ return {
+ 'workfile': templates["avalon"]["workfile"],
+ 'work': templates["avalon"]["work"],
+ 'publish': templates["avalon"]["publish"]
+ }
+
+
+def get_project_config(entity):
+ proj_config = {}
+ proj_config['schema'] = 'avalon-core:config-1.0'
+ proj_config['tasks'] = get_tasks(entity)
+ proj_config['apps'] = get_project_apps(entity)
+ proj_config['template'] = get_avalon_project_template()
+
+ return proj_config
def get_tasks(project):
@@ -507,11 +530,17 @@ def get_project_apps(entity):
apps = []
for app in entity['custom_attributes']['applications']:
try:
- app_config = {}
- app_config['name'] = app
- app_config['label'] = toml.load(avalon.lib.which_app(app))['label']
+ toml_path = avalon.lib.which_app(app)
+ if not toml_path:
+ log.warning((
+ 'Missing config file for application "{}"'
+ ).format(app))
+ continue
- apps.append(app_config)
+ apps.append({
+ 'name': app,
+ 'label': toml.load(toml_path)['label']
+ })
except Exception as e:
log.warning('Error with application {0} - {1}'.format(app, e))
@@ -533,7 +562,7 @@ def avalon_check_name(entity, inSchema=None):
if entity.entity_type in ['Project']:
# data['type'] = 'project'
name = entity['full_name']
- # schema = get_avalon_project_template_schema()
+ # schema = "avalon-core:project-2.0"
data['silo'] = 'Film'
@@ -551,24 +580,6 @@ def avalon_check_name(entity, inSchema=None):
raise ValueError(msg.format(name))
-def get_config_data():
- path_items = [pypelib.get_presets_path(), 'ftrack', 'ftrack_config.json']
- filepath = os.path.sep.join(path_items)
- data = dict()
- try:
- with open(filepath) as data_file:
- data = json.load(data_file)
-
- except Exception as e:
- msg = (
- 'Loading "Ftrack Config file" Failed.'
- ' Please check log for more information.'
- )
- log.warning("{} - {}".format(msg, str(e)))
-
- return data
-
-
def show_errors(obj, event, errors):
title = 'Hey You! You raised few Errors! (*look below*)'
items = []
@@ -590,4 +601,4 @@ def show_errors(obj, event, errors):
obj.log.error(
'{}: {}'.format(key, message)
)
- obj.show_interface(event, items, title)
+ obj.show_interface(items, title, event=event)
diff --git a/pype/ftrack/lib/ftrack_action_handler.py b/pype/ftrack/lib/ftrack_action_handler.py
index 7a25155718..7fd7eccfb7 100644
--- a/pype/ftrack/lib/ftrack_action_handler.py
+++ b/pype/ftrack/lib/ftrack_action_handler.py
@@ -21,9 +21,9 @@ class BaseAction(BaseHandler):
icon = None
type = 'Action'
- def __init__(self, session):
+ def __init__(self, session, plugins_presets={}):
'''Expects a ftrack_api.Session instance'''
- super().__init__(session)
+ super().__init__(session, plugins_presets)
if self.label is None:
raise ValueError(
diff --git a/pype/ftrack/lib/ftrack_app_handler.py b/pype/ftrack/lib/ftrack_app_handler.py
index 2d1d88f7d4..dbb38a3247 100644
--- a/pype/ftrack/lib/ftrack_app_handler.py
+++ b/pype/ftrack/lib/ftrack_app_handler.py
@@ -5,7 +5,7 @@ from avalon import lib as avalonlib
import acre
from pype import api as pype
from pype import lib as pypelib
-from .avalon_sync import get_config_data
+from pypeapp import config
from .ftrack_base_handler import BaseHandler
from pypeapp import Anatomy
@@ -26,10 +26,10 @@ class AppAction(BaseHandler):
preactions = ['start.timer']
def __init__(
- self, session, label, name, executable,
- variant=None, icon=None, description=None, preactions=[]
+ self, session, label, name, executable, variant=None,
+ icon=None, description=None, preactions=[], plugins_presets={}
):
- super().__init__(session)
+ super().__init__(session, plugins_presets)
'''Expects a ftrack_api.Session instance'''
if label is None:
@@ -94,6 +94,9 @@ class AppAction(BaseHandler):
):
return False
+ if entities[0]['parent'].entity_type.lower() == 'project':
+ return False
+
ft_project = entities[0]['project']
database = pypelib.get_avalon_database()
@@ -218,11 +221,22 @@ class AppAction(BaseHandler):
anatomy = anatomy.format(data)
work_template = anatomy["work"]["folder"]
- except Exception as e:
- self.log.exception(
- "{0} Error in anatomy.format: {1}".format(__name__, e)
+ except Exception as exc:
+ msg = "{} Error in anatomy.format: {}".format(
+ __name__, str(exc)
)
- os.environ["AVALON_WORKDIR"] = os.path.normpath(work_template)
+ self.log.error(msg, exc_info=True)
+ return {
+ 'success': False,
+ 'message': msg
+ }
+
+ workdir = os.path.normpath(work_template)
+ os.environ["AVALON_WORKDIR"] = workdir
+ try:
+ os.makedirs(workdir)
+ except FileExistsError:
+ pass
# collect all parents from the task
parents = []
@@ -325,10 +339,10 @@ class AppAction(BaseHandler):
pass
# Change status of task to In progress
- config = get_config_data()
+ presets = config.get_presets()["ftrack"]["ftrack_config"]
- if 'status_update' in config:
- statuses = config['status_update']
+ if 'status_update' in presets:
+ statuses = presets['status_update']
actual_status = entity['status']['name'].lower()
next_status_name = None
@@ -348,7 +362,7 @@ class AppAction(BaseHandler):
session.commit()
except Exception:
msg = (
- 'Status "{}" in config wasn\'t found on Ftrack'
+ 'Status "{}" in presets wasn\'t found on Ftrack'
).format(next_status_name)
self.log.warning(msg)
diff --git a/pype/ftrack/lib/ftrack_base_handler.py b/pype/ftrack/lib/ftrack_base_handler.py
index 7dc1b0a47c..9eda74f0f3 100644
--- a/pype/ftrack/lib/ftrack_base_handler.py
+++ b/pype/ftrack/lib/ftrack_base_handler.py
@@ -26,9 +26,10 @@ class BaseHandler(object):
priority = 100
# Type is just for logging purpose (e.g.: Action, Event, Application,...)
type = 'No-type'
+ ignore_me = False
preactions = []
- def __init__(self, session):
+ def __init__(self, session, plugins_presets={}):
'''Expects a ftrack_api.Session instance'''
self._session = session
self.log = Logger().get_logger(self.__class__.__name__)
@@ -36,11 +37,23 @@ class BaseHandler(object):
# Using decorator
self.register = self.register_decorator(self.register)
self.launch = self.launch_log(self.launch)
+ self.plugins_presets = plugins_presets
# Decorator
def register_decorator(self, func):
@functools.wraps(func)
def wrapper_register(*args, **kwargs):
+
+ presets_data = self.plugins_presets.get(self.__class__.__name__)
+ if presets_data:
+ for key, value in presets_data.items():
+ if not hasattr(self, key):
+ continue
+ setattr(self, key, value)
+
+ if self.ignore_me:
+ return
+
label = self.__class__.__name__
if hasattr(self, 'label'):
if self.variant is None:
@@ -72,7 +85,7 @@ class BaseHandler(object):
self.type, label)
)
except Exception as e:
- self.log.exception('{} "{}" - Registration failed ({})'.format(
+ self.log.error('{} "{}" - Registration failed ({})'.format(
self.type, label, str(e))
)
return wrapper_register
@@ -83,23 +96,23 @@ class BaseHandler(object):
def wrapper_launch(*args, **kwargs):
label = self.__class__.__name__
if hasattr(self, 'label'):
- if self.variant is None:
- label = self.label
- else:
- label = '{} {}'.format(self.label, self.variant)
+ label = self.label
+ if hasattr(self, 'variant'):
+ if self.variant is not None:
+ label = '{} {}'.format(self.label, self.variant)
+ self.log.info(('{} "{}": Launched').format(self.type, label))
try:
- self.log.info(('{} "{}": Launched').format(self.type, label))
- result = func(*args, **kwargs)
- self.log.info(('{} "{}": Finished').format(self.type, label))
- return result
- except Exception as e:
- msg = '{} "{}": Failed ({})'.format(self.type, label, str(e))
- self.log.exception(msg)
+ return func(*args, **kwargs)
+ except Exception as exc:
+ msg = '{} "{}": Failed ({})'.format(self.type, label, str(exc))
+ self.log.error(msg, exc_info=True)
return {
'success': False,
'message': msg
}
+ finally:
+ self.log.info(('{} "{}": Finished').format(self.type, label))
return wrapper_launch
@property
@@ -127,6 +140,13 @@ class BaseHandler(object):
# Custom validations
result = self.preregister()
+ if result is None:
+ self.log.debug((
+ "\"{}\" 'preregister' method returned 'None'. Expected it"
+ " didn't fail and continue as preregister returned True."
+ ).format(self.__class__.__name__))
+ return
+
if result is True:
return
msg = "Pre-register conditions were not met"
@@ -194,7 +214,6 @@ class BaseHandler(object):
def _translate_event(self, session, event):
'''Return *event* translated structure to be used with the API.'''
- '''Return *event* translated structure to be used with the API.'''
_entities = event['data'].get('entities_object', None)
if (
_entities is None or
@@ -209,26 +228,29 @@ class BaseHandler(object):
event
]
- def _get_entities(self, event):
- self.session._local_cache.clear()
- selection = event['data'].get('selection', [])
+ def _get_entities(self, event, session=None):
+ if session is None:
+ session = self.session
+ session._local_cache.clear()
+ selection = event['data'].get('selection') or []
_entities = []
for entity in selection:
- _entities.append(
- self.session.get(
- self._get_entity_type(entity),
- entity.get('entityId')
- )
- )
+ _entities.append(session.get(
+ self._get_entity_type(entity, session),
+ entity.get('entityId')
+ ))
event['data']['entities_object'] = _entities
return _entities
- def _get_entity_type(self, entity):
+ def _get_entity_type(self, entity, session=None):
'''Return translated entity type tht can be used with API.'''
# Get entity type and make sure it is lower cased. Most places except
# the component tab in the Sidebar will use lower case notation.
entity_type = entity.get('entityType').replace('_', '').lower()
+ if session is None:
+ session = self.session
+
for schema in self.session.schemas:
alias_for = schema.get('alias_for')
@@ -305,30 +327,13 @@ class BaseHandler(object):
# Launch preactions
for preaction in self.preactions:
- event = fa_session.ftrack_api.event.base.Event(
- topic='ftrack.action.launch',
- data=dict(
- actionIdentifier=preaction,
- selection=selection
- ),
- source=dict(
- user=dict(username=session.api_user)
- )
- )
- session.event_hub.publish(event, on_error='ignore')
+ self.trigger_action(preaction, event)
+
# Relaunch this action
- event = fa_session.ftrack_api.event.base.Event(
- topic='ftrack.action.launch',
- data=dict(
- actionIdentifier=self.identifier,
- selection=selection,
- preactions_launched=True
- ),
- source=dict(
- user=dict(username=session.api_user)
- )
+ additional_data = {"preactions_launched": True}
+ self.trigger_action(
+ self.identifier, event, additional_event_data=additional_data
)
- session.event_hub.publish(event, on_error='ignore')
return False
@@ -430,12 +435,47 @@ class BaseHandler(object):
on_error='ignore'
)
- def show_interface(self, event, items, title=''):
+ def show_interface(
+ self, items, title='',
+ event=None, user=None, username=None, user_id=None
+ ):
"""
- Shows interface to user who triggered event
+ Shows interface to user
+ - to identify user must be entered one of args:
+ event, user, username, user_id
- 'items' must be list containing Ftrack interface items
"""
- user_id = event['source']['user']['id']
+ if not any([event, user, username, user_id]):
+ raise TypeError((
+ 'Missing argument `show_interface` requires one of args:'
+ ' event (ftrack_api Event object),'
+ ' user (ftrack_api User object)'
+ ' username (string) or user_id (string)'
+ ))
+
+ if event:
+ user_id = event['source']['user']['id']
+ elif user:
+ user_id = user['id']
+ else:
+ if user_id:
+ key = 'id'
+ value = user_id
+ else:
+ key = 'username'
+ value = username
+
+ user = self.session.query(
+ 'User where {} is "{}"'.format(key, value)
+ ).first()
+
+ if not user:
+ raise TypeError((
+ 'Ftrack user with {} "{}" was not found!'.format(key, value)
+ ))
+
+ user_id = user['id']
+
target = (
'applicationId=ftrack.client.web and user.id="{0}"'
).format(user_id)
@@ -452,3 +492,90 @@ class BaseHandler(object):
),
on_error='ignore'
)
+
+ def show_interface_from_dict(
+ self, messages, title="", event=None,
+ user=None, username=None, user_id=None
+ ):
+ if not messages:
+ self.log.debug("No messages to show! (messages dict is empty)")
+ return
+ items = []
+ splitter = {'type': 'label', 'value': '---'}
+ first = True
+ for key, value in messages.items():
+ if not first:
+ items.append(splitter)
+ else:
+ first = False
+
+ subtitle = {'type': 'label', 'value':'{}
'.format(key)}
+ items.append(subtitle)
+ if isinstance(value, list):
+ for item in value:
+ message = {
+ 'type': 'label', 'value': '{}
'.format(item)
+ }
+ items.append(message)
+ else:
+ message = {'type': 'label', 'value': '{}
'.format(value)}
+ items.append(message)
+
+ self.show_interface(items, title, event, user, username, user_id)
+
+ def trigger_action(
+ self, action_name, event=None, session=None,
+ selection=None, user_data=None,
+ topic="ftrack.action.launch", additional_event_data={},
+ on_error="ignore"
+ ):
+ self.log.debug("Triggering action \"{}\" Begins".format(action_name))
+
+ if not session:
+ session = self.session
+
+ # Getting selection and user data
+ _selection = None
+ _user_data = None
+
+ if event:
+ _selection = event.get("data", {}).get("selection")
+ _user_data = event.get("source", {}).get("user")
+
+ if selection is not None:
+ _selection = selection
+
+ if user_data is not None:
+ _user_data = user_data
+
+ # Without selection and user data skip triggering
+ msg = "Can't trigger \"{}\" action without {}."
+ if _selection is None:
+ self.log.error(msg.format(action_name, "selection"))
+ return
+
+ if _user_data is None:
+ self.log.error(msg.format(action_name, "user data"))
+ return
+
+ _event_data = {
+ "actionIdentifier": action_name,
+ "selection": _selection
+ }
+
+ # Add additional data
+ if additional_event_data:
+ _event_data.update(additional_event_data)
+
+ # Create and trigger event
+ session.event_hub.publish(
+ fa_session.ftrack_api.event.base.Event(
+ topic=topic,
+ data=_event_data,
+ source=dict(user=_user_data)
+ ),
+ on_error=on_error
+ )
+ self.log.debug(
+ "Action \"{}\" Triggered successfully".format(action_name)
+ )
diff --git a/pype/ftrack/lib/ftrack_event_handler.py b/pype/ftrack/lib/ftrack_event_handler.py
index c6c91e7428..db55eef16e 100644
--- a/pype/ftrack/lib/ftrack_event_handler.py
+++ b/pype/ftrack/lib/ftrack_event_handler.py
@@ -15,9 +15,9 @@ class BaseEvent(BaseHandler):
type = 'Event'
- def __init__(self, session):
+ def __init__(self, session, plugins_presets={}):
'''Expects a ftrack_api.Session instance'''
- super().__init__(session)
+ super().__init__(session, plugins_presets)
# Decorator
def launch_log(self, func):
@@ -25,9 +25,12 @@ class BaseEvent(BaseHandler):
def wrapper_launch(*args, **kwargs):
try:
func(*args, **kwargs)
- except Exception as e:
- self.log.info('{} Failed ({})'.format(
- self.__class__.__name__, str(e))
+ except Exception as exc:
+ self.log.error(
+ 'Event "{}" Failed: {}'.format(
+ self.__class__.__name__, str(exc)
+ ),
+ exc_info=True
)
return wrapper_launch
@@ -43,11 +46,7 @@ class BaseEvent(BaseHandler):
self.session.rollback()
self.session._local_cache.clear()
- self.launch(
- self.session, event
- )
-
- return
+ self.launch(self.session, event)
def _translate_event(self, session, event):
'''Return *event* translated structure to be used with the API.'''
diff --git a/pype/ftrack/tray/ftrack_module.py b/pype/ftrack/tray/ftrack_module.py
index adcce9c2b1..ce2754c25d 100644
--- a/pype/ftrack/tray/ftrack_module.py
+++ b/pype/ftrack/tray/ftrack_module.py
@@ -88,9 +88,11 @@ class FtrackModule:
def set_action_server(self):
try:
self.action_server.run_server()
- except Exception:
- msg = 'Ftrack Action server crashed! Please try to start again.'
- log.error(msg)
+ except Exception as exc:
+ log.error(
+ "Ftrack Action server crashed! Please try to start again.",
+ exc_info=True
+ )
# TODO show message to user
self.bool_action_server = False
self.set_menu_visibility()
diff --git a/pype/fusion/scripts/fusion_switch_shot.py b/pype/fusion/scripts/fusion_switch_shot.py
index 6e6dc04733..9cc572164d 100644
--- a/pype/fusion/scripts/fusion_switch_shot.py
+++ b/pype/fusion/scripts/fusion_switch_shot.py
@@ -138,8 +138,8 @@ def update_frame_range(comp, representations):
versions = io.find({"type": "version", "_id": {"$in": version_ids}})
versions = list(versions)
- start = min(v["data"]["startFrame"] for v in versions)
- end = max(v["data"]["endFrame"] for v in versions)
+ start = min(v["data"]["frameStart"] for v in versions)
+ end = max(v["data"]["frameEnd"] for v in versions)
fusion_lib.update_frame_range(start, end, comp=comp)
diff --git a/pype/houdini/__init__.py b/pype/houdini/__init__.py
index f432a4a5e5..12b0b36de5 100644
--- a/pype/houdini/__init__.py
+++ b/pype/houdini/__init__.py
@@ -10,10 +10,7 @@ from avalon.houdini import pipeline as houdini
from pype.houdini import lib
-from pype.lib import (
- any_outdated,
- update_task_from_path
-)
+from pype.lib import any_outdated
PARENT_DIR = os.path.dirname(__file__)
@@ -57,8 +54,6 @@ def on_save(*args):
avalon.logger.info("Running callback on save..")
- update_task_from_path(hou.hipFile.path())
-
nodes = lib.get_id_required_nodes()
for node, new_id in lib.generate_ids(nodes):
lib.set_id(node, new_id, overwrite=False)
@@ -68,8 +63,6 @@ def on_open(*args):
avalon.logger.info("Running callback on open..")
- update_task_from_path(hou.hipFile.path())
-
if any_outdated():
from ..widgets import popup
diff --git a/pype/houdini/lib.py b/pype/houdini/lib.py
index e1e95912ee..10b5386a2e 100644
--- a/pype/houdini/lib.py
+++ b/pype/houdini/lib.py
@@ -205,7 +205,7 @@ def validate_fps():
"""
- fps = lib.get_asset_fps()
+ fps = lib.get_asset()["data"]["fps"]
current_fps = hou.fps() # returns float
if current_fps != fps:
diff --git a/pype/lib.py b/pype/lib.py
index e163cc14fc..c097ad1f10 100644
--- a/pype/lib.py
+++ b/pype/lib.py
@@ -4,6 +4,7 @@ import logging
import importlib
import itertools
import contextlib
+import subprocess
from .vendor import pather
from .vendor.pather.error import ParseError
@@ -15,11 +16,66 @@ import avalon
log = logging.getLogger(__name__)
-def get_handle_irregular(asset):
- data = asset["data"]
- handle_start = data.get("handle_start", 0)
- handle_end = data.get("handle_end", 0)
- return (handle_start, handle_end)
+# Special naming case for subprocess since its a built-in method.
+def _subprocess(args):
+ """Convenience method for getting output errors for subprocess."""
+
+ proc = subprocess.Popen(
+ args,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ stdin=subprocess.PIPE,
+ env=os.environ
+ )
+
+ output = proc.communicate()[0]
+
+ if proc.returncode != 0:
+ raise ValueError("\"{}\" was not successful: {}".format(args, output))
+
+
+def get_hierarchy(asset_name=None):
+ """
+ Obtain asset hierarchy path string from mongo db
+
+ Returns:
+ string: asset hierarchy path
+
+ """
+ if not asset_name:
+ asset_name = io.Session.get("AVALON_ASSET", os.environ["AVALON_ASSET"])
+
+ asset_entity = io.find_one({
+ "type": 'asset',
+ "name": asset_name
+ })
+
+ not_set = "PARENTS_NOT_SET"
+ entity_parents = asset_entity.get("data", {}).get("parents", not_set)
+
+ # If entity already have parents then just return joined
+ if entity_parents != not_set:
+ return "/".join(entity_parents)
+
+ # Else query parents through visualParents and store result to entity
+ hierarchy_items = []
+ entity = asset_entity
+ while True:
+ parent_id = entity.get("data", {}).get("visualParent")
+ if not parent_id:
+ break
+ entity = io.find_one({"_id": parent_id})
+ hierarchy_items.append(entity["name"])
+
+ # Add parents to entity data for next query
+ entity_data = asset_entity.get("data", {})
+ entity_data["parents"] = hierarchy_items
+ io.update_many(
+ {"_id": asset_entity["_id"]},
+ {"$set": {"data": entity_data}}
+ )
+
+ return "/".join(hierarchy_items)
def add_tool_to_environment(tools):
@@ -138,45 +194,6 @@ def any_outdated():
return False
-def update_task_from_path(path):
- """Update the context using the current scene state.
-
- When no changes to the context it will not trigger an update.
- When the context for a file could not be parsed an error is logged but not
- raised.
-
- """
- if not path:
- log.warning("Can't update the current task. Scene is not saved.")
- return
-
- # Find the current context from the filename
- project = io.find_one({"type": "project"},
- projection={"config.template.work": True})
- template = project['config']['template']['work']
- # Force to use the registered to root to avoid using wrong paths
- template = pather.format(template, {"root": avalon.api.registered_root()})
- try:
- context = pather.parse(template, path)
- except ParseError:
- log.error("Can't update the current task. Unable to parse the "
- "task for: %s (pattern: %s)", path, template)
- return
-
- # Find the changes between current Session and the path's context.
- current = {
- "asset": avalon.api.Session["AVALON_ASSET"],
- "task": avalon.api.Session["AVALON_TASK"]
- # "app": avalon.api.Session["AVALON_APP"]
- }
- changes = {key: context[key] for key, current_value in current.items()
- if context[key] != current_value}
-
- if changes:
- log.info("Updating work task to: %s", context)
- avalon.api.update_current_task(**changes)
-
-
def _rreplace(s, a, b, n=1):
"""Replace a with b in string s from right side n times"""
return b.join(s.rsplit(a, n))
@@ -196,7 +213,7 @@ def version_up(filepath):
dirname = os.path.dirname(filepath)
basename, ext = os.path.splitext(os.path.basename(filepath))
- regex = "[._]v\d+"
+ regex = r"[._]v\d+"
matches = re.findall(regex, str(basename), re.IGNORECASE)
if not matches:
log.info("Creating version...")
@@ -204,7 +221,7 @@ def version_up(filepath):
new_basename = "{}{}".format(basename, new_label)
else:
label = matches[-1]
- version = re.search("\d+", label).group()
+ version = re.search(r"\d+", label).group()
padding = len(version)
new_version = int(version) + 1
@@ -312,140 +329,107 @@ def _get_host_name():
return _host.__name__.rsplit(".", 1)[-1]
-def collect_container_metadata(container):
- """Add additional data based on the current host
+def get_asset(asset_name=None):
+ entity_data_keys_from_project_when_miss = [
+ "frameStart", "frameEnd", "handleStart", "handleEnd", "fps",
+ "resolutionWidth", "resolutionHeight"
+ ]
- If the host application's lib module does not have a function to inject
- additional data it will return the input container
+ entity_keys_from_project_when_miss = []
+
+ alternatives = {
+ "handleStart": "handles",
+ "handleEnd": "handles"
+ }
+
+ defaults = {
+ "handleStart": 0,
+ "handleEnd": 0
+ }
+
+ if not asset_name:
+ asset_name = avalon.api.Session["AVALON_ASSET"]
+
+ asset_document = io.find_one({"name": asset_name, "type": "asset"})
+ if not asset_document:
+ raise TypeError("Entity \"{}\" was not found in DB".format(asset_name))
+
+ project_document = io.find_one({"type": "project"})
+
+ for key in entity_data_keys_from_project_when_miss:
+ if asset_document["data"].get(key):
+ continue
+
+ value = project_document["data"].get(key)
+ if value is not None or key not in alternatives:
+ asset_document["data"][key] = value
+ continue
+
+ alt_key = alternatives[key]
+ value = asset_document["data"].get(alt_key)
+ if value is not None:
+ asset_document["data"][key] = value
+ continue
+
+ value = project_document["data"].get(alt_key)
+ if value:
+ asset_document["data"][key] = value
+ continue
+
+ if key in defaults:
+ asset_document["data"][key] = defaults[key]
+
+ for key in entity_keys_from_project_when_miss:
+ if asset_document.get(key):
+ continue
+
+ value = project_document.get(key)
+ if value is not None or key not in alternatives:
+ asset_document[key] = value
+ continue
+
+ alt_key = alternatives[key]
+ value = asset_document.get(alt_key)
+ if value:
+ asset_document[key] = value
+ continue
+
+ value = project_document.get(alt_key)
+ if value:
+ asset_document[key] = value
+ continue
+
+ if key in defaults:
+ asset_document[key] = defaults[key]
+
+ return asset_document
+
+
+def get_project():
+ io.install()
+ return io.find_one({"type": "project"})
+
+
+def get_version_from_path(file):
+ """
+ Finds version number in file path string
Args:
- container (dict): collection if representation data in host
+ file (string): file path
Returns:
- generator
- """
- # TODO: Improve method of getting the host lib module
- host_name = _get_host_name()
- package_name = "pype.{}.lib".format(host_name)
- hostlib = importlib.import_module(package_name)
-
- if not hasattr(hostlib, "get_additional_data"):
- return {}
-
- return hostlib.get_additional_data(container)
-
-
-def get_asset_fps():
- """Returns project's FPS, if not found will return 25 by default
-
- Returns:
- int, float
+ v: version number in string ('001')
"""
-
- key = "fps"
-
- # FPS from asset data (if set)
- asset_data = get_asset_data()
- if key in asset_data:
- return asset_data[key]
-
- # FPS from project data (if set)
- project_data = get_project_data()
- if key in project_data:
- return project_data[key]
-
- # Fallback to 25 FPS
- return 25.0
-
-
-def get_project_data():
- """Get the data of the current project
-
- The data of the project can contain things like:
- resolution
- fps
- renderer
-
- Returns:
- dict:
-
- """
-
- project_name = io.active_project()
- project = io.find_one({"name": project_name,
- "type": "project"},
- projection={"data": True})
-
- data = project.get("data", {})
-
- return data
-
-
-def get_asset_data(asset=None):
- """Get the data from the current asset
-
- Args:
- asset(str, Optional): name of the asset, eg:
-
- Returns:
- dict
- """
- asset_name = asset or avalon.api.Session["AVALON_ASSET"]
- document = io.find_one({"name": asset_name,
- "type": "asset"})
- data = document.get("data", {})
-
- return data
-
-
-def get_data_hierarchical_attr(entity, attr_name):
- vp_attr = 'visualParent'
- data = entity['data']
- value = data.get(attr_name, None)
- if value is not None:
- return value
- elif vp_attr in data:
- if data[vp_attr] is None:
- parent_id = entity['parent']
- else:
- parent_id = data[vp_attr]
- parent = io.find_one({"_id": parent_id})
- return get_data_hierarchical_attr(parent, attr_name)
- else:
- return None
-
-
-def get_avalon_project_config_schema():
- schema = 'avalon-core:config-1.0'
- return schema
-
-
-def get_avalon_project_template_schema():
- schema = "avalon-core:project-2.0"
- return schema
-
-
-def get_avalon_project_template():
- from pypeapp import Anatomy
-
- """
- Get avalon template
-
- Returns:
- dictionary with templates
- """
- templates = Anatomy().templates
- proj_template = {}
- proj_template['workfile'] = templates["avalon"]["workfile"]
- proj_template['work'] = templates["avalon"]["work"]
- proj_template['publish'] = templates["avalon"]["publish"]
- return proj_template
-
-
-def get_avalon_asset_template_schema():
- schema = "avalon-core:asset-2.0"
- return schema
+ pattern = re.compile(r"[\._]v([0-9]+)")
+ try:
+ return pattern.findall(file)[0]
+ except IndexError:
+ log.error(
+ "templates:get_version_from_workfile:"
+ "`{}` missing version string."
+ "Example `v004`".format(file)
+ )
def get_avalon_database():
@@ -455,31 +439,20 @@ def get_avalon_database():
def set_io_database():
- project = os.environ.get('AVALON_PROJECT', '')
- asset = os.environ.get('AVALON_ASSET', '')
- silo = os.environ.get('AVALON_SILO', '')
- os.environ['AVALON_PROJECT'] = project
- os.environ['AVALON_ASSET'] = asset
- os.environ['AVALON_SILO'] = silo
+ required_keys = ["AVALON_PROJECT", "AVALON_ASSET", "AVALON_SILO"]
+ for key in required_keys:
+ os.environ[key] = os.environ.get(key, "")
io.install()
def get_all_avalon_projects():
db = get_avalon_database()
- project_names = db.collection_names()
projects = []
- for name in project_names:
+ for name in db.collection_names():
projects.append(db[name].find_one({'type': 'project'}))
return projects
-def get_presets_path():
- templates = os.environ['PYPE_CONFIG']
- path_items = [templates, 'presets']
- filepath = os.path.sep.join(path_items)
- return filepath
-
-
def filter_pyblish_plugins(plugins):
"""
This servers as plugin filter / modifier for pyblish. It will load plugin
@@ -494,10 +467,18 @@ def filter_pyblish_plugins(plugins):
host = api.current_host()
+ presets = config.get_presets().get('plugins', {}).get(host, {}).get(
+ "publish", {}
+ )
+
# iterate over plugins
for plugin in plugins[:]:
+ # skip if there are no presets to process
+ if not presets:
+ continue
+
try:
- config_data = config.get_presets()['plugins'][host]["publish"][plugin.__name__] # noqa: E501
+ config_data = presets[plugin.__name__] # noqa: E501
except KeyError:
continue
@@ -510,3 +491,73 @@ def filter_pyblish_plugins(plugins):
option, value, plugin.__name__))
setattr(plugin, option, value)
+
+
+def get_subsets(asset_name,
+ regex_filter=None,
+ version=None,
+ representations=["exr", "dpx"]):
+ """
+ Query subsets with filter on name.
+
+ The method will return all found subsets and its defined version and subsets. Version could be specified with number. Representation can be filtered.
+
+ Arguments:
+ asset_name (str): asset (shot) name
+ regex_filter (raw): raw string with filter pattern
+ version (str or int): `last` or number of version
+ representations (list): list for all representations
+
+ Returns:
+ dict: subsets with version and representaions in keys
+ """
+ from avalon import io
+
+ # query asset from db
+ asset_io = io.find_one({"type": "asset",
+ "name": asset_name})
+
+ # check if anything returned
+ assert asset_io, "Asset not existing. \
+ Check correct name: `{}`".format(asset_name)
+
+ # create subsets query filter
+ filter_query = {"type": "subset", "parent": asset_io["_id"]}
+
+ # add reggex filter string into query filter
+ if regex_filter:
+ filter_query.update({"name": {"$regex": r"{}".format(regex_filter)}})
+ else:
+ filter_query.update({"name": {"$regex": r'.*'}})
+
+ # query all assets
+ subsets = [s for s in io.find(filter_query)]
+
+ assert subsets, "No subsets found. Check correct filter. Try this for start `r'.*'`: asset: `{}`".format(asset_name)
+
+ output_dict = {}
+ # Process subsets
+ for subset in subsets:
+ if not version:
+ version_sel = io.find_one({"type": "version",
+ "parent": subset["_id"]},
+ sort=[("name", -1)])
+ else:
+ assert isinstance(version, int), "version needs to be `int` type"
+ version_sel = io.find_one({"type": "version",
+ "parent": subset["_id"],
+ "name": int(version)})
+
+ find_dict = {"type": "representation",
+ "parent": version_sel["_id"]}
+
+ filter_repr = {"$or": [{"name": repr} for repr in representations]}
+
+ find_dict.update(filter_repr)
+ repres_out = [i for i in io.find(find_dict)]
+
+ if len(repres_out) > 0:
+ output_dict[subset["name"]] = {"version": version_sel,
+ "representaions": repres_out}
+
+ return output_dict
diff --git a/pype/maya/lib.py b/pype/maya/lib.py
index ee2ef57e34..e54dac78f2 100644
--- a/pype/maya/lib.py
+++ b/pype/maya/lib.py
@@ -280,8 +280,8 @@ def collect_animation_data():
# build attributes
data = OrderedDict()
- data["startFrame"] = start
- data["endFrame"] = end
+ data["frameStart"] = start
+ data["frameEnd"] = end
data["handles"] = 0
data["step"] = 1.0
data["fps"] = fps
@@ -1858,16 +1858,16 @@ def set_context_settings():
# Todo (Wijnand): apply renderer and resolution of project
- project_data = lib.get_project_data()
- asset_data = lib.get_asset_data()
+ project_data = lib.get_project()["data"]
+ asset_data = lib.get_asset()["data"]
# Set project fps
fps = asset_data.get("fps", project_data.get("fps", 25))
set_scene_fps(fps)
# Set project resolution
- width_key = "resolution_width"
- height_key = "resolution_height"
+ width_key = "resolutionWidth"
+ height_key = "resolutionHeight"
width = asset_data.get(width_key, project_data.get(width_key, 1920))
height = asset_data.get(height_key, project_data.get(height_key, 1080))
@@ -1887,7 +1887,7 @@ def validate_fps():
"""
- fps = lib.get_asset_fps()
+ fps = lib.get_asset()["data"]["fps"]
current_fps = mel.eval('currentTimeUnitToFPS()') # returns float
if current_fps != fps:
diff --git a/pype/nuke/__init__.py b/pype/nuke/__init__.py
index b64f728771..42ca633e40 100644
--- a/pype/nuke/__init__.py
+++ b/pype/nuke/__init__.py
@@ -1,28 +1,15 @@
import os
import sys
+import logging
+
+import nuke
from avalon import api as avalon
from avalon.tools import workfiles
from pyblish import api as pyblish
-
-from .. import api
-
from pype.nuke import menu
-import logging
-
-from .lib import (
- create_write_node
-)
-
-import nuke
-
from pypeapp import Logger
-
-# #removing logger handler created in avalon_core
-# for name, handler in [(handler.get_name(), handler)
-# for handler in Logger.logging.root.handlers[:]]:
-# if "pype" not in str(name).lower():
-# Logger.logging.root.removeHandler(handler)
+from . import lib
self = sys.modules[__name__]
@@ -72,13 +59,14 @@ class NukeHandler(logging.Handler):
'''Adding Nuke Logging Handler'''
+log.info([handler.get_name() for handler in logging.root.handlers[:]])
nuke_handler = NukeHandler()
if nuke_handler.get_name() \
not in [handler.get_name()
for handler in logging.root.handlers[:]]:
logging.getLogger().addHandler(nuke_handler)
logging.getLogger().setLevel(logging.INFO)
-
+log.info([handler.get_name() for handler in logging.root.handlers[:]])
def reload_config():
"""Attempt to reload pipeline at run-time.
@@ -90,10 +78,7 @@ def reload_config():
import importlib
for module in (
- "app",
- "app.api",
"{}.api".format(AVALON_CONFIG),
- "{}.templates".format(AVALON_CONFIG),
"{}.nuke.actions".format(AVALON_CONFIG),
"{}.nuke.templates".format(AVALON_CONFIG),
"{}.nuke.menu".format(AVALON_CONFIG),
@@ -109,9 +94,8 @@ def reload_config():
def install():
-
- # api.set_avalon_workdir()
- # reload_config()
+ ''' Installing all requarements for Nuke host
+ '''
log.info("Registering Nuke plug-ins..")
pyblish.register_plugin_path(PUBLISH_PATH)
@@ -120,7 +104,7 @@ def install():
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
-
+ workfile_settings = lib.WorkfileSettings()
# Disable all families except for the ones we explicitly want to see
family_states = [
"write",
@@ -130,22 +114,30 @@ def install():
avalon.data["familiesStateDefault"] = False
avalon.data["familiesStateToggled"] = family_states
- menu.install()
-
# Workfiles.
launch_workfiles = os.environ.get("WORKFILES_STARTUP")
if launch_workfiles:
nuke.addOnCreate(launch_workfiles_app, nodeClass="Root")
+ # Set context settings.
+ nuke.addOnCreate(workfile_settings.set_context_settings, nodeClass="Root")
+
+ menu.install()
+
+
def launch_workfiles_app():
+ '''Function letting start workfiles after start of host
+ '''
if not self.workfiles_launched:
self.workfiles_launched = True
workfiles.show(os.environ["AVALON_WORKDIR"])
def uninstall():
+ '''Uninstalling host's integration
+ '''
log.info("Deregistering Nuke plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
@@ -154,8 +146,13 @@ def uninstall():
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
+ reload_config()
+ menu.uninstall()
+
+
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle node passthrough states on instance toggles."""
+
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
instance, old_value, new_value))
diff --git a/pype/nuke/actions.py b/pype/nuke/actions.py
index 640e41a7de..c0c95e9080 100644
--- a/pype/nuke/actions.py
+++ b/pype/nuke/actions.py
@@ -1,6 +1,3 @@
-# absolute_import is needed to counter the `module has no cmds error` in Maya
-from __future__ import absolute_import
-
import pyblish.api
from avalon.nuke.lib import (
@@ -12,7 +9,7 @@ from ..action import get_errored_instances_from_context
class SelectInvalidAction(pyblish.api.Action):
- """Select invalid nodes in Maya when plug-in failed.
+ """Select invalid nodes in Nuke when plug-in failed.
To retrieve the invalid nodes this assumes a static `get_invalid()`
method is available on the plugin.
diff --git a/pype/nuke/lib.py b/pype/nuke/lib.py
index 6a57704fff..f182088457 100644
--- a/pype/nuke/lib.py
+++ b/pype/nuke/lib.py
@@ -1,10 +1,12 @@
import os
import sys
+import getpass
from collections import OrderedDict
from pprint import pprint
from avalon import api, io, lib
import avalon.nuke
import pype.api as pype
+
import nuke
from .templates import (
get_colorspace_preset,
@@ -12,13 +14,21 @@ from .templates import (
get_node_colorspace_preset
)
+from .templates import (
+ get_anatomy
+)
+# TODO: remove get_anatomy and import directly Anatomy() here
+
from pypeapp import Logger
log = Logger().get_logger(__name__, "nuke")
self = sys.modules[__name__]
self._project = None
+
def onScriptLoad():
+ ''' Callback for ffmpeg support
+ '''
if nuke.env['LINUX']:
nuke.tcl('load ffmpegReader')
nuke.tcl('load ffmpegWriter')
@@ -36,6 +46,7 @@ def checkInventoryVersions():
and check if the node is having actual version. If not then it will color
it to red.
"""
+ # TODO: make it for all nodes not just Read (Loader
# get all Loader nodes by avalon attribute metadata
for each in nuke.allNodes():
@@ -43,8 +54,8 @@ def checkInventoryVersions():
container = avalon.nuke.parse_container(each)
if container:
- node = container["_tool"]
- avalon_knob_data = get_avalon_knob_data(node)
+ node = container["_node"]
+ avalon_knob_data = avalon.nuke.get_avalon_knob_data(node)
# get representation from io
representation = io.find_one({
@@ -75,47 +86,55 @@ def checkInventoryVersions():
def writes_version_sync():
+ ''' Callback synchronizing version of publishable write nodes
+ '''
+ # TODO: make it work with new write node group
try:
rootVersion = pype.get_version_from_path(nuke.root().name())
padding = len(rootVersion)
new_version = "v" + str("{" + ":0>{}".format(padding) + "}").format(
int(rootVersion)
)
- log.info("new_version: {}".format(new_version))
+ log.debug("new_version: {}".format(new_version))
except Exception:
return
for each in nuke.allNodes():
if each.Class() == 'Write':
- avalon_knob_data = get_avalon_knob_data(each)
+ avalon_knob_data = avalon.nuke.get_avalon_knob_data(each)
try:
if avalon_knob_data['families'] not in ["render"]:
- log.info(avalon_knob_data['families'])
+ log.debug(avalon_knob_data['families'])
continue
node_file = each['file'].value()
- log.info("node_file: {}".format(node_file))
node_version = "v" + pype.get_version_from_path(node_file)
- log.info("node_version: {}".format(node_version))
+ log.debug("node_version: {}".format(node_version))
node_new_file = node_file.replace(node_version, new_version)
each['file'].setValue(node_new_file)
+ if not os.path.isdir(os.path.dirname(node_new_file)):
+ log.warning("Path does not exist! I am creating it.")
+ os.makedirs(os.path.dirname(node_new_file), 0o766)
except Exception as e:
- log.debug(
+ log.warning(
"Write node: `{}` has no version in path: {}".format(each.name(), e))
def version_up_script():
+ ''' Raising working script's version
+ '''
import nukescripts
nukescripts.script_and_write_nodes_version_up()
def get_render_path(node):
-
+ ''' Generate Render path from presets regarding avalon knob data
+ '''
data = dict()
- data['avalon'] = get_avalon_knob_data(node)
+ data['avalon'] = avalon.nuke.get_avalon_knob_data(node)
data_preset = {
"class": data['avalon']['family'],
@@ -137,42 +156,85 @@ def get_render_path(node):
def format_anatomy(data):
- from .templates import (
- get_anatomy
- )
+ ''' Helping function for formating of anatomy paths
+
+ Arguments:
+ data (dict): dictionary with attributes used for formating
+
+ Return:
+ path (str)
+ '''
+ # TODO: perhaps should be nonPublic
anatomy = get_anatomy()
- log.info("__ anatomy.templates: {}".format(anatomy.templates))
+ log.debug("__ anatomy.templates: {}".format(anatomy.templates))
+
# TODO: perhaps should be in try!
padding = int(anatomy.templates['render']['padding'])
version = data.get("version", None)
if not version:
file = script_name()
data["version"] = pype.get_version_from_path(file)
-
+ project_document = pype.get_project()
data.update({
"root": api.Session["AVALON_PROJECTS"],
"subset": data["avalon"]["subset"],
"asset": data["avalon"]["asset"],
- "task": str(pype.get_task()).lower(),
+ "task": api.Session["AVALON_TASK"].lower(),
"family": data["avalon"]["family"],
- "project": {"name": pype.get_project_name(),
- "code": pype.get_project_code()},
+ "project": {"name": project_document["name"],
+ "code": project_document["data"].get("code", '')},
"representation": data["nuke_dataflow_writes"]["file_type"],
"app": data["application"]["application_dir"],
"hierarchy": pype.get_hierarchy(),
"frame": "#" * padding,
})
- log.info("__ data: {}".format(data))
- log.info("__ format_anatomy: {}".format(anatomy.format(data)))
return anatomy.format(data)
def script_name():
+ ''' Returns nuke script path
+ '''
return nuke.root().knob('name').value()
-def create_write_node(name, data):
+def add_button_write_to_read(node):
+ name = "createReadNode"
+ label = "Create Read"
+ value = "import write_to_read;write_to_read.write_to_read(nuke.thisNode())"
+ k = nuke.PyScript_Knob(name, label, value)
+ k.setFlag(0x1000)
+ node.addKnob(k)
+
+
+def create_write_node(name, data, prenodes=None):
+ ''' Creating write node which is group node
+
+ Arguments:
+ name (str): name of node
+ data (dict): data to be imprinted
+ prenodes (list, optional): list of lists, definitions for nodes
+ to be created before write
+
+ Example:
+ prenodes = [(
+ "NameNode", # string
+ "NodeClass", # string
+ ( # OrderDict: knob and values pairs
+ ("knobName", "knobValue"),
+ ("knobName", "knobValue")
+ ),
+ ( # list inputs
+ "firstPrevNodeName",
+ "secondPrevNodeName"
+ )
+ )
+ ]
+
+ Return:
+ node (obj): group node with avalon data as Knobs
+ '''
+
nuke_dataflow_writes = get_node_dataflow_preset(**data)
nuke_colorspace_writes = get_node_colorspace_preset(**data)
application = lib.get_application(os.environ["AVALON_APP_NAME"])
@@ -183,7 +245,6 @@ def create_write_node(name, data):
"nuke_dataflow_writes": nuke_dataflow_writes,
"nuke_colorspace_writes": nuke_colorspace_writes
})
-
anatomy_filled = format_anatomy(data)
except Exception as e:
@@ -191,14 +252,15 @@ def create_write_node(name, data):
# build file path to workfiles
fpath = str(anatomy_filled["work"]["folder"]).replace("\\", "/")
- fpath = '{work}/renders/v{version}/{subset}.{frame}.{ext}'.format(
+ fpath = data["fpath_template"].format(
work=fpath, version=data["version"], subset=data["subset"],
frame=data["frame"],
- ext=data["nuke_dataflow_writes"]["file_type"])
+ ext=data["nuke_dataflow_writes"]["file_type"]
+ )
# create directory
if not os.path.isdir(os.path.dirname(fpath)):
- log.info("path does not exist")
+ log.warning("Path does not exist! I am creating it.")
os.makedirs(os.path.dirname(fpath), 0o766)
_data = OrderedDict({
@@ -219,17 +281,95 @@ def create_write_node(name, data):
log.debug(_data)
_data["frame_range"] = data.get("frame_range", None)
- log.info("__ _data3: {}".format(_data))
- instance = avalon.nuke.lib.add_write_node(
- name,
- **_data
- )
- instance = avalon.nuke.lib.imprint(instance, data["avalon"])
- add_rendering_knobs(instance)
- return instance
+
+ # todo: hange this to new way
+ GN = nuke.createNode("Group", "name {}".format(name))
+
+ prev_node = None
+ with GN:
+ # creating pre-write nodes `prenodes`
+ if prenodes:
+ for name, klass, properties, set_input_to in prenodes:
+ # create node
+ now_node = nuke.createNode(klass, "name {}".format(name))
+
+ # add data to knob
+ for k, v in properties:
+ if k and v:
+ now_node[k].serValue(str(v))
+
+ # connect to previous node
+ if set_input_to:
+ if isinstance(set_input_to, (tuple or list)):
+ for i, node_name in enumerate(set_input_to):
+ input_node = nuke.toNode(node_name)
+ now_node.setInput(1, input_node)
+ elif isinstance(set_input_to, str):
+ input_node = nuke.toNode(set_input_to)
+ now_node.setInput(0, input_node)
+ else:
+ now_node.setInput(0, prev_node)
+
+ # swith actual node to previous
+ prev_node = now_node
+ else:
+ prev_node = nuke.createNode("Input", "name rgba")
+
+ # creating write node
+ now_node = avalon.nuke.lib.add_write_node("inside_{}".format(name),
+ **_data
+ )
+ write_node = now_node
+ # connect to previous node
+ now_node.setInput(0, prev_node)
+
+ # swith actual node to previous
+ prev_node = now_node
+
+ now_node = nuke.createNode("Output", "name write")
+
+ # connect to previous node
+ now_node.setInput(0, prev_node)
+
+ # imprinting group node
+ GN = avalon.nuke.imprint(GN, data["avalon"])
+
+ divider = nuke.Text_Knob('')
+ GN.addKnob(divider)
+
+ add_rendering_knobs(GN)
+
+ # adding write to read button
+ add_button_write_to_read(GN)
+
+ divider = nuke.Text_Knob('')
+ GN.addKnob(divider)
+
+ # set tile color
+ tile_color = _data.get("tile_color", "0xff0000ff")
+ GN["tile_color"].setValue(tile_color)
+
+ # add render button
+ lnk = nuke.Link_Knob("Render")
+ lnk.makeLink(write_node.name(), "Render")
+ lnk.setName("Render")
+ GN.addKnob(lnk)
+
+ # Deadline tab.
+ add_deadline_tab(GN)
+
+ return GN
def add_rendering_knobs(node):
+ ''' Adds additional rendering knobs to given node
+
+ Arguments:
+ node (obj): nuke node object to be fixed
+
+ Return:
+ node (obj): with added knobs
+ '''
if "render" not in node.knobs():
knob = nuke.Boolean_Knob("render", "Render")
knob.setFlag(0x1000)
@@ -242,316 +382,391 @@ def add_rendering_knobs(node):
return node
-def set_viewers_colorspace(viewer):
- assert isinstance(viewer, dict), log.error(
- "set_viewers_colorspace(): argument should be dictionary")
+def add_deadline_tab(node):
+ node.addKnob(nuke.Tab_Knob("Deadline"))
- filter_knobs = [
- "viewerProcess",
- "wipe_position"
- ]
- viewers = [n for n in nuke.allNodes() if n.Class() == 'Viewer']
- erased_viewers = []
-
- for v in viewers:
- v['viewerProcess'].setValue(str(viewer["viewerProcess"]))
- if str(viewer["viewerProcess"]) not in v['viewerProcess'].value():
- copy_inputs = v.dependencies()
- copy_knobs = {k: v[k].value() for k in v.knobs()
- if k not in filter_knobs}
- pprint(copy_knobs)
- # delete viewer with wrong settings
- erased_viewers.append(v['name'].value())
- nuke.delete(v)
-
- # create new viewer
- nv = nuke.createNode("Viewer")
-
- # connect to original inputs
- for i, n in enumerate(copy_inputs):
- nv.setInput(i, n)
-
- # set coppied knobs
- for k, v in copy_knobs.items():
- print(k, v)
- nv[k].setValue(v)
-
- # set viewerProcess
- nv['viewerProcess'].setValue(str(viewer["viewerProcess"]))
-
- if erased_viewers:
- log.warning(
- "Attention! Viewer nodes {} were erased."
- "It had wrong color profile".format(erased_viewers))
+ knob = nuke.Int_Knob("deadlineChunkSize", "Chunk Size")
+ knob.setValue(1)
+ node.addKnob(knob)
-def set_root_colorspace(root_dict):
- assert isinstance(root_dict, dict), log.error(
- "set_root_colorspace(): argument should be dictionary")
+def create_backdrop(label="", color=None, layer=0,
+ nodes=None):
+ """
+ Create Backdrop node
- # first set OCIO
- if nuke.root()["colorManagement"].value() not in str(root_dict["colorManagement"]):
- nuke.root()["colorManagement"].setValue(
- str(root_dict["colorManagement"]))
+ Arguments:
+ color (str): nuke compatible string with color code
+ layer (int): layer of node usually used (self.pos_layer - 1)
+ label (str): the message
+ nodes (list): list of nodes to be wrapped into backdrop
- # second set ocio version
- if nuke.root()["OCIO_config"].value() not in str(root_dict["OCIO_config"]):
- nuke.root()["OCIO_config"].setValue(str(root_dict["OCIO_config"]))
+ """
+ assert isinstance(nodes, list), "`nodes` should be a list of nodes"
- # then set the rest
- for knob, value in root_dict.items():
- if nuke.root()[knob].value() not in value:
- nuke.root()[knob].setValue(str(value))
- log.info("nuke.root()['{}'] changed to: {}".format(knob, value))
+ # Calculate bounds for the backdrop node.
+ bdX = min([node.xpos() for node in nodes])
+ bdY = min([node.ypos() for node in nodes])
+ bdW = max([node.xpos() + node.screenWidth() for node in nodes]) - bdX
+ bdH = max([node.ypos() + node.screenHeight() for node in nodes]) - bdY
+
+ # Expand the bounds to leave a little border. Elements are offsets
+ # for left, top, right and bottom edges respectively
+ left, top, right, bottom = (-20, -65, 20, 60)
+ bdX += left
+ bdY += top
+ bdW += (right - left)
+ bdH += (bottom - top)
+
+ bdn = nuke.createNode("BackdropNode")
+ bdn["z_order"].setValue(layer)
+
+ if color:
+ bdn["tile_color"].setValue(int(color, 16))
+
+ bdn["xpos"].setValue(bdX)
+ bdn["ypos"].setValue(bdY)
+ bdn["bdwidth"].setValue(bdW)
+ bdn["bdheight"].setValue(bdH)
+
+ if label:
+ bdn["label"].setValue(label)
+
+ bdn["note_font_size"].setValue(20)
+ return bdn
-def set_writes_colorspace(write_dict):
- assert isinstance(write_dict, dict), log.error(
- "set_root_colorspace(): argument should be dictionary")
- log.info("set_writes_colorspace(): {}".format(write_dict))
+class WorkfileSettings(object):
+ """
+ All settings for workfile will be set
+ This object is setting all possible root settings to the workfile.
+ Including Colorspace, Frame ranges, Resolution format. It can set it
+ to Root node or to any given node.
-def set_colorspace():
+ Arguments:
+ root (node): nuke's root node
+ nodes (list): list of nuke's nodes
+ nodes_filter (list): filtering classes for nodes
- nuke_colorspace = get_colorspace_preset().get("nuke", None)
+ """
- try:
- set_root_colorspace(nuke_colorspace["root"])
- except AttributeError:
- log.error(
- "set_colorspace(): missing `root` settings in template")
- try:
- set_viewers_colorspace(nuke_colorspace["viewer"])
- except AttributeError:
- log.error(
- "set_colorspace(): missing `viewer` settings in template")
- try:
- set_writes_colorspace(nuke_colorspace["write"])
- except AttributeError:
- log.error(
- "set_colorspace(): missing `write` settings in template")
+ def __init__(self,
+ root_node=None,
+ nodes=None,
+ **kwargs):
+ self._project = kwargs.get(
+ "project") or io.find_one({"type": "project"})
+ self._asset = kwargs.get("asset_name") or api.Session["AVALON_ASSET"]
+ self._asset_entity = pype.get_asset(self._asset)
+ self._root_node = root_node or nuke.root()
+ self._nodes = self.get_nodes(nodes=nodes)
- try:
- for key in nuke_colorspace:
- log.info("{}".format(key))
- except TypeError:
- log.error("Nuke is not in templates! \n\n\n"
- "contact your supervisor!")
+ self.data = kwargs
+ def get_nodes(self, nodes=None, nodes_filter=None):
+ # filter out only dictionaries for node creation
+ #
+ # print("\n\n")
+ # pprint(self._nodes)
+ #
-def reset_frame_range_handles():
- """Set frame range to current asset"""
+ if not isinstance(nodes, list) and not isinstance(nodes_filter, list):
+ return [n for n in nuke.allNodes()]
+ elif not isinstance(nodes, list) and isinstance(nodes_filter, list):
+ nodes = list()
+ for filter in nodes_filter:
+ [nodes.append(n) for n in nuke.allNodes(filter=filter)]
+ return nodes
+ elif isinstance(nodes, list) and not isinstance(nodes_filter, list):
+ return [n for n in self._nodes]
+ elif isinstance(nodes, list) and isinstance(nodes_filter, list):
+ for filter in nodes_filter:
+ return [n for n in self._nodes if filter in n.Class()]
- root = nuke.root()
- name = api.Session["AVALON_ASSET"]
- asset = io.find_one({"name": name, "type": "asset"})
+ def set_viewers_colorspace(self, viewer_dict):
+ ''' Adds correct colorspace to viewer
- if "data" not in asset:
- msg = "Asset {} don't have set any 'data'".format(name)
- log.warning(msg)
- nuke.message(msg)
- return
- data = asset["data"]
+ Arguments:
+ viewer_dict (dict): adjustments from presets
- missing_cols = []
- check_cols = ["fps", "fstart", "fend", "handle_start", "handle_end"]
+ '''
+ assert isinstance(viewer_dict, dict), log.error(
+ "set_viewers_colorspace(): argument should be dictionary")
- for col in check_cols:
- if col not in data:
- missing_cols.append(col)
+ filter_knobs = [
+ "viewerProcess",
+ "wipe_position"
+ ]
- if len(missing_cols) > 0:
- missing = ", ".join(missing_cols)
- msg = "'{}' are not set for asset '{}'!".format(missing, name)
- log.warning(msg)
- nuke.message(msg)
- return
+ erased_viewers = []
+ for v in [n for n in self._nodes
+ if "Viewer" in n.Class()]:
+ v['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
+ if str(viewer_dict["viewerProcess"]) \
+ not in v['viewerProcess'].value():
+ copy_inputs = v.dependencies()
+ copy_knobs = {k: v[k].value() for k in v.knobs()
+ if k not in filter_knobs}
- # get handles values
- handles = avalon.nuke.get_handles(asset)
- handle_start, handle_end = pype.get_handle_irregular(asset)
+ # delete viewer with wrong settings
+ erased_viewers.append(v['name'].value())
+ nuke.delete(v)
- fps = asset["data"]["fps"]
- edit_in = int(asset["data"]["fstart"]) - handle_start
- edit_out = int(asset["data"]["fend"]) + handle_end
+ # create new viewer
+ nv = nuke.createNode("Viewer")
- root["fps"].setValue(fps)
- root["first_frame"].setValue(edit_in)
- root["last_frame"].setValue(edit_out)
+ # connect to original inputs
+ for i, n in enumerate(copy_inputs):
+ nv.setInput(i, n)
- log.info("__ handles: `{}`".format(handles))
- log.info("__ handle_start: `{}`".format(handle_start))
- log.info("__ handle_end: `{}`".format(handle_end))
- log.info("__ edit_in: `{}`".format(edit_in))
- log.info("__ edit_out: `{}`".format(edit_out))
- log.info("__ fps: `{}`".format(fps))
+ # set coppied knobs
+ for k, v in copy_knobs.items():
+ print(k, v)
+ nv[k].setValue(v)
- # setting active viewers
- nuke.frame(int(asset["data"]["fstart"]))
+ # set viewerProcess
+ nv['viewerProcess'].setValue(str(viewer_dict["viewerProcess"]))
- try:
- vv = nuke.activeViewer().node()
- except AttributeError:
- log.error("No active viewer. Select any node and hit num `1`")
- return
+ if erased_viewers:
+ log.warning(
+ "Attention! Viewer nodes {} were erased."
+ "It had wrong color profile".format(erased_viewers))
- range = '{0}-{1}'.format(
- int(asset["data"]["fstart"]),
- int(asset["data"]["fend"]))
+ def set_root_colorspace(self, root_dict):
+ ''' Adds correct colorspace to root
- vv['frame_range'].setValue(range)
- vv['frame_range_lock'].setValue(True)
+ Arguments:
+ root_dict (dict): adjustmensts from presets
- log.info("_frameRange: {}".format(range))
- log.info("frameRange: {}".format(vv['frame_range'].value()))
+ '''
+ assert isinstance(root_dict, dict), log.error(
+ "set_root_colorspace(): argument should be dictionary")
- vv['frame_range'].setValue(range)
- vv['frame_range_lock'].setValue(True)
+ # first set OCIO
+ if self._root_node["colorManagement"].value() \
+ not in str(root_dict["colorManagement"]):
+ self._root_node["colorManagement"].setValue(
+ str(root_dict["colorManagement"]))
- # adding handle_start/end to root avalon knob
- if not avalon.nuke.set_avalon_knob_data(root, {
- "handle_start": handle_start,
- "handle_end": handle_end
- }):
- log.warning("Cannot set Avalon knob to Root node!")
+ # second set ocio version
+ if self._root_node["OCIO_config"].value() \
+ not in str(root_dict["OCIO_config"]):
+ self._root_node["OCIO_config"].setValue(
+ str(root_dict["OCIO_config"]))
+ # then set the rest
+ for knob, value in root_dict.items():
+ if self._root_node[knob].value() not in value:
+ self._root_node[knob].setValue(str(value))
+ log.debug("nuke.root()['{}'] changed to: {}".format(
+ knob, value))
-def get_avalon_knob_data(node):
- import toml
- try:
- data = toml.loads(node['avalon'].value())
- except Exception:
- return None
- return data
+ def set_writes_colorspace(self, write_dict):
+ ''' Adds correct colorspace to write node dict
+ Arguments:
+ write_dict (dict): nuke write node as dictionary
-def reset_resolution():
- """Set resolution to project resolution."""
- log.info("Reseting resolution")
- project = io.find_one({"type": "project"})
- asset = api.Session["AVALON_ASSET"]
- asset = io.find_one({"name": asset, "type": "asset"})
+ '''
+ # TODO: complete this function so any write node in
+ # scene will have fixed colorspace following presets for the project
+ assert isinstance(write_dict, dict), log.error(
+ "set_root_colorspace(): argument should be dictionary")
- try:
- width = asset.get('data', {}).get('resolution_width', 1920)
- height = asset.get('data', {}).get('resolution_height', 1080)
- pixel_aspect = asset.get('data', {}).get('pixel_aspect', 1)
- bbox = asset.get('data', {}).get('crop', "0.0.1920.1080")
+ log.debug("__ set_writes_colorspace(): {}".format(write_dict))
- if bbox not in "0.0.1920.1080":
+ def set_colorspace(self):
+ ''' Setting colorpace following presets
+ '''
+ nuke_colorspace = get_colorspace_preset().get("nuke", None)
+
+ try:
+ self.set_root_colorspace(nuke_colorspace["root"])
+ except AttributeError:
+ log.error(
+ "set_colorspace(): missing `root` settings in template")
+ try:
+ self.set_viewers_colorspace(nuke_colorspace["viewer"])
+ except AttributeError:
+ log.error(
+ "set_colorspace(): missing `viewer` settings in template")
+ try:
+ self.set_writes_colorspace(nuke_colorspace["write"])
+ except AttributeError:
+ log.error(
+ "set_colorspace(): missing `write` settings in template")
+
+ try:
+ for key in nuke_colorspace:
+ log.debug("Preset's colorspace key: {}".format(key))
+ except TypeError:
+ log.error("Nuke is not in templates! \n\n\n"
+ "contact your supervisor!")
+
+ def reset_frame_range_handles(self):
+ """Set frame range to current asset"""
+
+ if "data" not in self._asset_entity:
+ msg = "Asset {} don't have set any 'data'".format(self._asset)
+ log.warning(msg)
+ nuke.message(msg)
+ return
+ data = self._asset_entity["data"]
+
+ missing_cols = []
+ check_cols = ["fps", "frameStart", "frameEnd",
+ "handleStart", "handleEnd"]
+
+ for col in check_cols:
+ if col not in data:
+ missing_cols.append(col)
+
+ if len(missing_cols) > 0:
+ missing = ", ".join(missing_cols)
+ msg = "'{}' are not set for asset '{}'!".format(
+ missing, self._asset)
+ log.warning(msg)
+ nuke.message(msg)
+ return
+
+ # get handles values
+ handle_start = data["handleStart"]
+ handle_end = data["handleEnd"]
+
+ fps = data["fps"]
+ frame_start = int(data["frameStart"]) - handle_start
+ frame_end = int(data["frameEnd"]) + handle_end
+
+ self._root_node["fps"].setValue(fps)
+ self._root_node["first_frame"].setValue(frame_start)
+ self._root_node["last_frame"].setValue(frame_end)
+
+ # setting active viewers
+ try:
+ nuke.frame(int(data["frameStart"]))
+ except Exception as e:
+ log.warning("no viewer in scene: `{}`".format(e))
+
+ range = '{0}-{1}'.format(
+ int(data["frameStart"]),
+ int(data["frameEnd"]))
+
+ for node in nuke.allNodes(filter="Viewer"):
+ node['frame_range'].setValue(range)
+ node['frame_range_lock'].setValue(True)
+ node['frame_range'].setValue(range)
+ node['frame_range_lock'].setValue(True)
+
+ # adding handle_start/end to root avalon knob
+ if not avalon.nuke.imprint(self._root_node, {
+ "handleStart": int(handle_start),
+ "handleEnd": int(handle_end)
+ }):
+ log.warning("Cannot set Avalon knob to Root node!")
+
+ def reset_resolution(self):
+ """Set resolution to project resolution."""
+ log.info("Reseting resolution")
+ project = io.find_one({"type": "project"})
+ asset = api.Session["AVALON_ASSET"]
+ asset = io.find_one({"name": asset, "type": "asset"})
+ asset_data = asset.get('data', {})
+
+ data = {
+ "width": int(asset_data.get(
+ 'resolutionWidth',
+ asset_data.get('resolution_width'))),
+ "height": int(asset_data.get(
+ 'resolutionHeight',
+ asset_data.get('resolution_height'))),
+ "pixel_aspect": asset_data.get(
+ 'pixelAspect',
+ asset_data.get('pixel_aspect', 1)),
+ "name": project["name"]
+ }
+
+ if any(x for x in data.values() if x is None):
+ log.error(
+ "Missing set shot attributes in DB."
+ "\nContact your supervisor!."
+ "\n\nWidth: `{width}`"
+ "\nHeight: `{height}`"
+ "\nPixel Asspect: `{pixel_aspect}`".format(**data)
+ )
+
+ bbox = self._asset_entity.get('data', {}).get('crop')
+
+ if bbox:
try:
x, y, r, t = bbox.split(".")
+ data.update(
+ {
+ "x": int(x),
+ "y": int(y),
+ "r": int(r),
+ "t": int(t),
+ }
+ )
except Exception as e:
- x = 0
- y = 0
- r = width
- t = height
bbox = None
- log.error("{}: {} \nFormat:Crop need to be set with dots, example: "
- "0.0.1920.1080, /nSetting to default".format(__name__, e))
- else:
- bbox = None
+ log.error(
+ "{}: {} \nFormat:Crop need to be set with dots, example: "
+ "0.0.1920.1080, /nSetting to default".format(__name__, e)
+ )
- except KeyError:
- log.warning(
- "No resolution information found for \"{0}\".".format(
- project["name"]
+ existing_format = None
+ for format in nuke.formats():
+ if data["name"] == format.name():
+ existing_format = format
+ break
+
+ if existing_format:
+ # Enforce existing format to be correct.
+ existing_format.setWidth(data["width"])
+ existing_format.setHeight(data["height"])
+ existing_format.setPixelAspect(data["pixel_aspect"])
+
+ if bbox:
+ existing_format.setX(data["x"])
+ existing_format.setY(data["y"])
+ existing_format.setR(data["r"])
+ existing_format.setT(data["t"])
+ else:
+ format_string = self.make_format_string(**data)
+ log.info("Creating new format: {}".format(format_string))
+ nuke.addFormat(format_string)
+
+ nuke.root()["format"].setValue(data["name"])
+ log.info("Format is set.")
+
+ def make_format_string(self, **kwargs):
+ if kwargs.get("r"):
+ return (
+ "{width} "
+ "{height} "
+ "{x} "
+ "{y} "
+ "{r} "
+ "{t} "
+ "{pixel_aspect:.2f} "
+ "{name}".format(**kwargs)
)
- )
- return
-
- used_formats = list()
- for f in nuke.formats():
- if project["name"] in str(f.name()):
- used_formats.append(f)
else:
- format_name = project["name"] + "_1"
+ return (
+ "{width} "
+ "{height} "
+ "{pixel_aspect:.2f} "
+ "{name}".format(**kwargs)
+ )
- crnt_fmt_str = ""
- if used_formats:
- check_format = used_formats[-1]
- format_name = "{}_{}".format(
- project["name"],
- int(used_formats[-1].name()[-1]) + 1
- )
- log.info(
- "Format exists: {}. "
- "Will create new: {}...".format(
- used_formats[-1].name(),
- format_name)
- )
- crnt_fmt_kargs = {
- "width": (check_format.width()),
- "height": (check_format.height()),
- "pixel_aspect": float(check_format.pixelAspect())
- }
- if bbox:
- crnt_fmt_kargs.update({
- "x": int(check_format.x()),
- "y": int(check_format.y()),
- "r": int(check_format.r()),
- "t": int(check_format.t()),
- })
- crnt_fmt_str = make_format_string(**crnt_fmt_kargs)
- log.info("crnt_fmt_str: {}".format(crnt_fmt_str))
-
- new_fmt_kargs = {
- "width": int(width),
- "height": int(height),
- "pixel_aspect": float(pixel_aspect),
- "project_name": format_name
- }
- if bbox:
- new_fmt_kargs.update({
- "x": int(x),
- "y": int(y),
- "r": int(r),
- "t": int(t),
- })
-
- new_fmt_str = make_format_string(**new_fmt_kargs)
- log.info("new_fmt_str: {}".format(new_fmt_str))
-
- if new_fmt_str not in crnt_fmt_str:
- make_format(frm_str=new_fmt_str,
- project_name=new_fmt_kargs["project_name"])
-
- log.info("Format is set")
-
-
-def make_format_string(**args):
- if args.get("r"):
- return (
- "{width} "
- "{height} "
- "{x} "
- "{y} "
- "{r} "
- "{t} "
- "{pixel_aspect:.2f}".format(**args)
- )
- else:
- return (
- "{width} "
- "{height} "
- "{pixel_aspect:.2f}".format(**args)
- )
-
-
-def make_format(**args):
- log.info("Format does't exist, will create: \n{}".format(args))
- nuke.addFormat("{frm_str} "
- "{project_name}".format(**args))
- nuke.root()["format"].setValue("{project_name}".format(**args))
-
-
-def set_context_settings():
- # replace reset resolution from avalon core to pype's
- reset_resolution()
- # replace reset resolution from avalon core to pype's
- reset_frame_range_handles()
- # add colorspace menu item
- set_colorspace()
+ def set_context_settings(self):
+ # replace reset resolution from avalon core to pype's
+ self.reset_resolution()
+ # replace reset resolution from avalon core to pype's
+ self.reset_frame_range_handles()
+ # add colorspace menu item
+ self.set_colorspace()
def get_hierarchical_attr(entity, attr, default=None):
@@ -576,60 +791,6 @@ def get_hierarchical_attr(entity, attr, default=None):
return get_hierarchical_attr(parent, attr)
-# TODO: bellow functions are wip and needs to be check where they are used
-# ------------------------------------
-
-#
-# def update_frame_range(start, end, root=None):
-# """Set Nuke script start and end frame range
-#
-# Args:
-# start (float, int): start frame
-# end (float, int): end frame
-# root (object, Optional): root object from nuke's script
-#
-# Returns:
-# None
-#
-# """
-#
-# knobs = {
-# "first_frame": start,
-# "last_frame": end
-# }
-#
-# with avalon.nuke.viewer_update_and_undo_stop():
-# for key, value in knobs.items():
-# if root:
-# root[key].setValue(value)
-# else:
-# nuke.root()[key].setValue(value)
-#
-# #
-# def get_additional_data(container):
-# """Get Nuke's related data for the container
-#
-# Args:
-# container(dict): the container found by the ls() function
-#
-# Returns:
-# dict
-# """
-#
-# node = container["_tool"]
-# tile_color = node['tile_color'].value()
-# if tile_color is None:
-# return {}
-#
-# hex = '%08x' % tile_color
-# rgba = [
-# float(int(hex[0:2], 16)) / 255.0,
-# float(int(hex[2:4], 16)) / 255.0,
-# float(int(hex[4:6], 16)) / 255.0
-# ]
-#
-# return {"color": Qt.QtGui.QColor().fromRgbF(rgba[0], rgba[1], rgba[2])}
-
def get_write_node_template_attr(node):
''' Gets all defined data from presets
@@ -637,7 +798,7 @@ def get_write_node_template_attr(node):
'''
# get avalon data from node
data = dict()
- data['avalon'] = get_avalon_knob_data(node)
+ data['avalon'] = avalon.nuke.get_avalon_knob_data(node)
data_preset = {
"class": data['avalon']['family'],
"preset": data['avalon']['families']
@@ -655,7 +816,7 @@ def get_write_node_template_attr(node):
# adding dataflow template
{correct_data.update({k: v})
for k, v in nuke_dataflow_writes.items()
- if k not in ["id", "previous"]}
+ if k not in ["_id", "_previous"]}
# adding colorspace template
{correct_data.update({k: v})
@@ -663,3 +824,297 @@ def get_write_node_template_attr(node):
# fix badly encoded data
return avalon.nuke.lib.fix_data_for_node_create(correct_data)
+
+
+class BuildWorkfile(WorkfileSettings):
+ """
+ Building first version of workfile.
+
+ Settings are taken from presets and db. It will add all subsets in last version for defined representaions
+
+ Arguments:
+ variable (type): description
+
+ """
+ xpos = 0
+ ypos = 0
+ xpos_size = 80
+ ypos_size = 90
+ xpos_gap = 50
+ ypos_gap = 50
+ pos_layer = 10
+
+ def __init__(self,
+ root_path=None,
+ root_node=None,
+ nodes=None,
+ to_script=None,
+ **kwargs):
+ """
+ A short description.
+
+ A bit longer description.
+
+ Argumetns:
+ root_path (str): description
+ root_node (nuke.Node): description
+ nodes (list): list of nuke.Node
+ nodes_effects (dict): dictionary with subsets
+
+ Example:
+ nodes_effects = {
+ "plateMain": {
+ "nodes": [
+ [("Class", "Reformat"),
+ ("resize", "distort"),
+ ("flip", True)],
+
+ [("Class", "Grade"),
+ ("blackpoint", 0.5),
+ ("multiply", 0.4)]
+ ]
+ },
+ }
+
+ """
+
+ WorkfileSettings.__init__(self,
+ root_node=root_node,
+ nodes=nodes,
+ **kwargs)
+ self.to_script = to_script
+ # collect data for formating
+ data = {
+ "root": root_path or api.Session["AVALON_PROJECTS"],
+ "project": {"name": self._project["name"],
+ "code": self._project["data"].get("code", '')},
+ "asset": self._asset or os.environ["AVALON_ASSET"],
+ "task": kwargs.get("task") or api.Session["AVALON_TASK"].lower(),
+ "hierarchy": kwargs.get("hierarchy") or pype.get_hierarchy(),
+ "version": kwargs.get("version", {}).get("name", 1),
+ "user": getpass.getuser(),
+ "comment": "firstBuild"
+ }
+
+ # get presets from anatomy
+ anatomy = get_anatomy()
+ # format anatomy
+ anatomy_filled = anatomy.format(data)
+
+ # get dir and file for workfile
+ self.work_dir = anatomy_filled["avalon"]["work"]
+ self.work_file = anatomy_filled["avalon"]["workfile"] + ".nk"
+
+ def save_script_as(self, path=None):
+ # first clear anything in open window
+ nuke.scriptClear()
+
+ if not path:
+ dir = self.work_dir
+ path = os.path.join(
+ self.work_dir,
+ self.work_file).replace("\\", "/")
+ else:
+ dir = os.path.dirname(path)
+
+ # check if folder is created
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+
+ # save script to path
+ nuke.scriptSaveAs(path)
+
+ def process(self,
+ regex_filter=None,
+ version=None,
+ representations=["exr", "dpx", "lutJson"]):
+ """
+ A short description.
+
+ A bit longer description.
+
+ Args:
+ regex_filter (raw string): regex pattern to filter out subsets
+ version (int): define a particular version, None gets last
+ representations (list):
+
+ Returns:
+ type: description
+
+ Raises:
+ Exception: description
+
+ """
+
+ if not self.to_script:
+ # save the script
+ self.save_script_as()
+
+ # create viewer and reset frame range
+ viewer = self.get_nodes(nodes_filter=["Viewer"])
+ if not viewer:
+ vn = nuke.createNode("Viewer")
+ vn["xpos"].setValue(self.xpos)
+ vn["ypos"].setValue(self.ypos)
+ else:
+ vn = viewer[-1]
+
+ # move position
+ self.position_up()
+
+ wn = self.write_create()
+ wn["xpos"].setValue(self.xpos)
+ wn["ypos"].setValue(self.ypos)
+ wn["render"].setValue(True)
+ vn.setInput(0, wn)
+
+ bdn = self.create_backdrop(label="Render write \n\n\n\nOUTPUT",
+ color='0xcc1102ff', layer=-1,
+ nodes=[wn])
+
+ # move position
+ self.position_up(4)
+
+ # set frame range for new viewer
+ self.reset_frame_range_handles()
+
+ # get all available representations
+ subsets = pype.get_subsets(self._asset,
+ regex_filter=regex_filter,
+ version=version,
+ representations=representations)
+
+ nodes_backdrop = list()
+
+ for name, subset in subsets.items():
+ if "lut" in name:
+ continue
+ log.info("Building Loader to: `{}`".format(name))
+ version = subset["version"]
+ log.info("Version to: `{}`".format(version["name"]))
+ representations = subset["representaions"]
+ for repr in representations:
+ rn = self.read_loader(repr)
+ rn["xpos"].setValue(self.xpos)
+ rn["ypos"].setValue(self.ypos)
+ wn.setInput(0, rn)
+
+ # get editional nodes
+ lut_subset = [s for n, s in subsets.items()
+ if "lut{}".format(name.lower()) in n.lower()]
+ log.debug(">> lut_subset: `{}`".format(lut_subset))
+
+ if len(lut_subset) > 0:
+ lsub = lut_subset[0]
+ fxn = self.effect_loader(lsub["representaions"][-1])
+ fxn_ypos = fxn["ypos"].value()
+ fxn["ypos"].setValue(fxn_ypos - 100)
+ nodes_backdrop.append(fxn)
+
+ nodes_backdrop.append(rn)
+ # move position
+ self.position_right()
+
+ bdn = self.create_backdrop(label="Loaded Reads",
+ color='0x2d7702ff', layer=-1,
+ nodes=nodes_backdrop)
+
+ def read_loader(self, representation):
+ """
+ Gets Loader plugin for image sequence or mov
+
+ Arguments:
+ representation (dict): avalon db entity
+
+ """
+ context = representation["context"]
+
+ loader_name = "LoadSequence"
+ if "mov" in context["representation"]:
+ loader_name = "LoadMov"
+
+ loader_plugin = None
+ for Loader in api.discover(api.Loader):
+ if Loader.__name__ != loader_name:
+ continue
+
+ loader_plugin = Loader
+
+ return api.load(Loader=loader_plugin,
+ representation=representation["_id"])
+
+ def effect_loader(self, representation):
+ """
+ Gets Loader plugin for effects
+
+ Arguments:
+ representation (dict): avalon db entity
+
+ """
+ context = representation["context"]
+
+ loader_name = "LoadLuts"
+
+ loader_plugin = None
+ for Loader in api.discover(api.Loader):
+ if Loader.__name__ != loader_name:
+ continue
+
+ loader_plugin = Loader
+
+ return api.load(Loader=loader_plugin,
+ representation=representation["_id"])
+
+ def write_create(self):
+ """
+ Create render write
+
+ Arguments:
+ representation (dict): avalon db entity
+
+ """
+
+ Create_name = "CreateWriteRender"
+
+ creator_plugin = None
+ for Creator in api.discover(api.Creator):
+ if Creator.__name__ != Create_name:
+ continue
+
+ creator_plugin = Creator
+
+ # return api.create()
+ return creator_plugin("render_writeMain", self._asset).process()
+
+ def create_backdrop(self, label="", color=None, layer=0,
+ nodes=None):
+ """
+ Create Backdrop node
+
+ Arguments:
+ color (str): nuke compatible string with color code
+ layer (int): layer of node usually used (self.pos_layer - 1)
+ label (str): the message
+ nodes (list): list of nodes to be wrapped into backdrop
+
+ """
+ assert isinstance(nodes, list), "`nodes` should be a list of nodes"
+ layer = self.pos_layer + layer
+
+ create_backdrop(label=label, color=color, layer=layer, nodes=nodes)
+
+ def position_reset(self, xpos=0, ypos=0):
+ self.xpos = xpos
+ self.ypos = ypos
+
+ def position_right(self, multiply=1):
+ self.xpos += (self.xpos_size * multiply) + self.xpos_gap
+
+ def position_left(self, multiply=1):
+ self.xpos -= (self.xpos_size * multiply) + self.xpos_gap
+
+ def position_down(self, multiply=1):
+ self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
+
+ def position_up(self, multiply=1):
+ self.ypos -= (self.ypos_size * multiply) + self.ypos_gap
diff --git a/pype/nuke/menu.py b/pype/nuke/menu.py
index 169ac81096..56111674a8 100644
--- a/pype/nuke/menu.py
+++ b/pype/nuke/menu.py
@@ -2,21 +2,25 @@ import nuke
from avalon.api import Session
from pype.nuke import lib
+from pypeapp import Logger
+log = Logger().get_logger(__name__, "nuke")
def install():
-
menubar = nuke.menu("Nuke")
menu = menubar.findItem(Session["AVALON_LABEL"])
-
+ workfile_settings = lib.WorkfileSettings()
# replace reset resolution from avalon core to pype's
name = "Reset Resolution"
new_name = "Set Resolution"
rm_item = [
(i, item) for i, item in enumerate(menu.items()) if name in item.name()
][0]
+
+ log.debug("Changing Item: {}".format(rm_item))
+ # rm_item[1].setEnabled(False)
menu.removeItem(rm_item[1].name())
- menu.addCommand(new_name, lib.reset_resolution, index=rm_item[0])
+ menu.addCommand(new_name, workfile_settings.reset_resolution, index=(rm_item[0]))
# replace reset frame range from avalon core to pype's
name = "Reset Frame Range"
@@ -24,18 +28,41 @@ def install():
rm_item = [
(i, item) for i, item in enumerate(menu.items()) if name in item.name()
][0]
+ log.debug("Changing Item: {}".format(rm_item))
+ # rm_item[1].setEnabled(False)
menu.removeItem(rm_item[1].name())
- menu.addCommand(new_name, lib.reset_frame_range_handles, index=rm_item[0])
+ menu.addCommand(new_name, workfile_settings.reset_frame_range_handles, index=(rm_item[0]))
# add colorspace menu item
name = "Set colorspace"
menu.addCommand(
- name, lib.set_colorspace,
+ name, workfile_settings.set_colorspace,
index=(rm_item[0]+2)
)
+ log.debug("Adding menu item: {}".format(name))
+
+ # add workfile builder menu item
+ name = "Build First Workfile.."
+ menu.addCommand(
+ name, lib.BuildWorkfile().process,
+ index=(rm_item[0]+7)
+ )
+ log.debug("Adding menu item: {}".format(name))
# add item that applies all setting above
name = "Apply all settings"
menu.addCommand(
- name, lib.set_context_settings, index=(rm_item[0]+3)
+ name, workfile_settings.set_context_settings, index=(rm_item[0]+3)
)
+ log.debug("Adding menu item: {}".format(name))
+
+
+
+def uninstall():
+
+ menubar = nuke.menu("Nuke")
+ menu = menubar.findItem(Session["AVALON_LABEL"])
+
+ for item in menu.items():
+ log.info("Removing menu item: {}".format(item.name()))
+ menu.removeItem(item.name())
diff --git a/pype/nuke/templates.py b/pype/nuke/templates.py
index 797335d982..6434d73f1d 100644
--- a/pype/nuke/templates.py
+++ b/pype/nuke/templates.py
@@ -20,6 +20,8 @@ def get_colorspace_preset():
def get_node_dataflow_preset(**kwarg):
+ ''' Get preset data for dataflow (fileType, compression, bitDepth)
+ '''
log.info(kwarg)
host = kwarg.get("host", "nuke")
cls = kwarg.get("class", None)
@@ -39,6 +41,8 @@ def get_node_dataflow_preset(**kwarg):
def get_node_colorspace_preset(**kwarg):
+ ''' Get preset data for colorspace
+ '''
log.info(kwarg)
host = kwarg.get("host", "nuke")
cls = kwarg.get("class", None)
diff --git a/pype/nukestudio/__init__.py b/pype/nukestudio/__init__.py
index 834455168a..9283e732af 100644
--- a/pype/nukestudio/__init__.py
+++ b/pype/nukestudio/__init__.py
@@ -1,24 +1,42 @@
import os
-
-from avalon.tools import workfiles
+from pypeapp import Logger
+import hiero
from avalon import api as avalon
from pyblish import api as pyblish
-from .. import api
+from .workio import (
+ open,
+ save,
+ current_file,
+ has_unsaved_changes,
+ file_extensions,
+ work_root
+)
+
from .menu import (
install as menu_install,
_update_menu_task_label
)
from .tags import add_tags_from_presets
-from pypeapp import Logger
-
-import hiero
+__all__ = [
+ # Workfiles API
+ "open",
+ "save",
+ "current_file",
+ "has_unsaved_changes",
+ "file_extensions",
+ "work_root",
+]
+# get logger
log = Logger().get_logger(__name__, "nukestudio")
+
+''' Creating all important host related variables '''
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
+# plugin root path
PARENT_DIR = os.path.dirname(__file__)
PACKAGE_DIR = os.path.dirname(PARENT_DIR)
PLUGINS_DIR = os.path.join(PACKAGE_DIR, "plugins")
@@ -28,13 +46,21 @@ LOAD_PATH = os.path.join(PLUGINS_DIR, "nukestudio", "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "nukestudio", "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "nukestudio", "inventory")
-
+# registering particular pyblish gui but `lite` is recomended!!
if os.getenv("PYBLISH_GUI", None):
pyblish.register_gui(os.getenv("PYBLISH_GUI", None))
def install(config):
+ """
+ Installing Nukestudio integration for avalon
+ Args:
+ config (obj): avalon config module `pype` in our case, it is not used but required by avalon.api.install()
+
+ """
+
+ # adding all events
_register_events()
log.info("Registering NukeStudio plug-ins..")
@@ -53,6 +79,7 @@ def install(config):
avalon.data["familiesStateDefault"] = False
avalon.data["familiesStateToggled"] = family_states
+ # install menu
menu_install()
# Workfiles.
@@ -70,11 +97,26 @@ def install(config):
def add_tags(event):
+ """
+ Event for automatic tag creation after nukestudio start
+
+ Args:
+ event (obj): required but unused
+ """
+
add_tags_from_presets()
def launch_workfiles_app(event):
- workfiles.show(os.environ["AVALON_WORKDIR"])
+ """
+ Event for launching workfiles after nukestudio start
+
+ Args:
+ event (obj): required but unused
+ """
+ from .lib import set_workfiles
+
+ set_workfiles()
# Closing the new project.
event.sender.close()
@@ -86,6 +128,10 @@ def launch_workfiles_app(event):
def uninstall():
+ """
+ Uninstalling Nukestudio integration for avalon
+
+ """
log.info("Deregistering NukeStudio plug-ins..")
pyblish.deregister_host("nukestudio")
pyblish.deregister_plugin_path(PUBLISH_PATH)
@@ -94,6 +140,11 @@ def uninstall():
def _register_events():
+ """
+ Adding all callbacks.
+ """
+
+ # if task changed then change notext of nukestudio
avalon.on("taskChanged", _update_menu_task_label)
log.info("Installed event callback for 'taskChanged'..")
@@ -108,4 +159,5 @@ def ls():
See the `container.json` schema for details on how it should look,
and the Maya equivalent, which is in `avalon.maya.pipeline`
"""
+ # TODO: listing all availabe containers form sequence
return
diff --git a/pype/nukestudio/lib.py b/pype/nukestudio/lib.py
index 9adaf380d6..81b48f294d 100644
--- a/pype/nukestudio/lib.py
+++ b/pype/nukestudio/lib.py
@@ -1,19 +1,13 @@
-# Standard library
import os
import sys
-
-# Pyblish libraries
-import pyblish.api
-
-import avalon.api as avalon
-import pype.api as pype
-
-from avalon.vendor.Qt import (QtWidgets, QtGui)
-
-# Host libraries
import hiero
-
+import pyblish.api
+import avalon.api as avalon
+from avalon.vendor.Qt import (QtWidgets, QtGui)
+import pype.api as pype
from pypeapp import Logger
+
+
log = Logger().get_logger(__name__, "nukestudio")
cached_process = None
@@ -30,12 +24,18 @@ AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
def set_workfiles():
''' Wrapping function for workfiles launcher '''
from avalon.tools import workfiles
+
+ # import session to get project dir
S = avalon.Session
active_project_root = os.path.normpath(
os.path.join(S['AVALON_PROJECTS'], S['AVALON_PROJECT'])
)
workdir = os.environ["AVALON_WORKDIR"]
+
+ # show workfile gui
workfiles.show(workdir)
+
+ # getting project
project = hiero.core.projects()[-1]
# set project root with backward compatibility
@@ -46,14 +46,14 @@ def set_workfiles():
project.setProjectRoot(active_project_root)
# get project data from avalon db
- project_data = pype.get_project_data()
+ project_data = pype.get_project()["data"]
log.info("project_data: {}".format(project_data))
# get format and fps property from avalon db on project
- width = project_data['resolution_width']
- height = project_data['resolution_height']
- pixel_aspect = project_data['pixel_aspect']
+ width = project_data["resolutionWidth"]
+ height = project_data["resolutionHeight"]
+ pixel_aspect = project_data["pixelAspect"]
fps = project_data['fps']
format_name = project_data['code']
@@ -64,11 +64,10 @@ def set_workfiles():
# set fps to hiero project
project.setFramerate(fps)
+ # TODO: add auto colorspace set from project drop
log.info("Project property has been synchronised with Avalon db")
-
-
def reload_config():
"""Attempt to reload pipeline at run-time.
@@ -189,6 +188,10 @@ def add_submission():
class PublishAction(QtWidgets.QAction):
+ """
+ Action with is showing as menu item
+ """
+
def __init__(self):
QtWidgets.QAction.__init__(self, "Publish", None)
self.triggered.connect(self.publish)
@@ -213,7 +216,8 @@ class PublishAction(QtWidgets.QAction):
def _show_no_gui():
- """Popup with information about how to register a new GUI
+ """
+ Popup with information about how to register a new GUI
In the event of no GUI being registered or available,
this information dialog will appear to guide the user
through how to get set up with one.
@@ -283,3 +287,59 @@ def _show_no_gui():
messagebox.setStandardButtons(messagebox.Ok)
messagebox.exec_()
+
+
+def CreateNukeWorkfile(nodes=None,
+ nodes_effects=None,
+ to_timeline=False,
+ **kwargs):
+ ''' Creating nuke workfile with particular version with given nodes
+ Also it is creating timeline track items as precomps.
+
+ Arguments:
+ nodes(list of dict): each key in dict is knob order is important
+ to_timeline(type): will build trackItem with metadata
+
+ Returns:
+ bool: True if done
+
+ Raises:
+ Exception: with traceback
+
+ '''
+ import hiero.core
+ from avalon.nuke import imprint
+ from pype.nuke import (
+ lib as nklib
+ )
+
+ # check if the file exists if does then Raise "File exists!"
+ if os.path.exists(filepath):
+ raise FileExistsError("File already exists: `{}`".format(filepath))
+
+ # if no representations matching then
+ # Raise "no representations to be build"
+ if len(representations) == 0:
+ raise AttributeError("Missing list of `representations`")
+
+ # check nodes input
+ if len(nodes) == 0:
+ log.warning("Missing list of `nodes`")
+
+ # create temp nk file
+ nuke_script = hiero.core.nuke.ScriptWriter()
+
+ # create root node and save all metadata
+ root_node = hiero.core.nuke.RootNode()
+
+ root_path = os.environ["AVALON_PROJECTS"]
+
+ nuke_script.addNode(root_node)
+
+ # here to call pype.nuke.lib.BuildWorkfile
+ script_builder = nklib.BuildWorkfile(
+ root_node=root_node,
+ root_path=root_path,
+ nodes=nuke_script.getNodes(),
+ **kwargs
+ )
diff --git a/pype/nukestudio/menu.py b/pype/nukestudio/menu.py
index 6babceff41..a996389524 100644
--- a/pype/nukestudio/menu.py
+++ b/pype/nukestudio/menu.py
@@ -1,24 +1,23 @@
import os
import sys
import hiero.core
+from pypeapp import Logger
+from avalon.api import Session
+from hiero.ui import findMenuAction
+# this way we secure compatibility between nuke 10 and 11
try:
from PySide.QtGui import *
except Exception:
from PySide2.QtGui import *
from PySide2.QtWidgets import *
-from hiero.ui import findMenuAction
-
-from avalon.api import Session
-
from .tags import add_tags_from_presets
from .lib import (
reload_config,
set_workfiles
)
-from pypeapp import Logger
log = Logger().get_logger(__name__, "nukestudio")
@@ -45,6 +44,11 @@ def _update_menu_task_label(*args):
def install():
+ """
+ Installing menu into Nukestudio
+
+ """
+
# here is the best place to add menu
from avalon.tools import (
creator,
@@ -127,8 +131,6 @@ def install():
'icon': QIcon('icons:ColorAdd.png')
}]
-
-
# Create menu items
for a in actions:
add_to_menu = menu
diff --git a/pype/nukestudio/precomp_clip.py b/pype/nukestudio/precomp_clip.py
index 426a13f0c7..b544b6e654 100644
--- a/pype/nukestudio/precomp_clip.py
+++ b/pype/nukestudio/precomp_clip.py
@@ -14,9 +14,9 @@ def create_nk_script_clips(script_lst, seq=None):
'handles': 10,
'handleStart': 15, # added asymetrically to handles
'handleEnd': 10, # added asymetrically to handles
- 'timelineIn': 16,
- 'startFrame': 991,
- 'endFrame': 1023,
+ "clipIn": 16,
+ "frameStart": 991,
+ "frameEnd": 1023,
'task': 'Comp-tracking',
'work_dir': 'VFX_PR',
'shot': '00010'
@@ -55,12 +55,12 @@ def create_nk_script_clips(script_lst, seq=None):
if media_in:
source_in = media_in + handle_start
else:
- source_in = nk['startFrame'] + handle_start
+ source_in = nk["frameStart"] + handle_start
if media_duration:
source_out = (media_in + media_duration - 1) - handle_end
else:
- source_out = nk['endFrame'] - handle_end
+ source_out = nk["frameEnd"] - handle_end
print("__ media: `{}`".format(media))
print("__ media_in: `{}`".format(media_in))
@@ -98,8 +98,8 @@ def create_nk_script_clips(script_lst, seq=None):
trackItem.setSourceIn(source_in)
trackItem.setSourceOut(source_out)
trackItem.setSourceIn(source_in)
- trackItem.setTimelineIn(nk['timelineIn'])
- trackItem.setTimelineOut(nk['timelineIn'] + (source_out - source_in))
+ trackItem.setTimelineIn(nk["clipIn"])
+ trackItem.setTimelineOut(nk["clipIn"] + (source_out - source_in))
track.addTrackItem(trackItem)
track.addTrackItem(trackItem)
clips_lst.append(trackItem)
@@ -179,9 +179,9 @@ script_lst = [{
'handles': 10,
'handleStart': 10,
'handleEnd': 10,
- 'timelineIn': 16,
- 'startFrame': 991,
- 'endFrame': 1023,
+ "clipIn": 16,
+ "frameStart": 991,
+ "frameEnd": 1023,
'task': 'platesMain',
'work_dir': 'shots',
'shot': '120sh020'
diff --git a/pype/nukestudio/tags.py b/pype/nukestudio/tags.py
index d9574bdf2b..8ae88d731c 100644
--- a/pype/nukestudio/tags.py
+++ b/pype/nukestudio/tags.py
@@ -1,5 +1,6 @@
import re
import os
+import hiero
from pypeapp import (
config,
@@ -7,8 +8,6 @@ from pypeapp import (
)
from avalon import io
-import hiero
-
log = Logger().get_logger(__name__, "nukestudio")
diff --git a/pype/nukestudio/workio.py b/pype/nukestudio/workio.py
new file mode 100644
index 0000000000..eadd8322cd
--- /dev/null
+++ b/pype/nukestudio/workio.py
@@ -0,0 +1,44 @@
+import os
+
+import hiero
+
+from avalon import api
+
+
+def file_extensions():
+ return [".hrox"]
+
+
+def has_unsaved_changes():
+ # There are no methods for querying unsaved changes to a project, so
+ # enforcing to always save.
+ return True
+
+
+def save(filepath):
+ project = hiero.core.projects()[-1]
+ if project:
+ project.saveAs(filepath)
+ else:
+ project = hiero.core.newProject()
+ project.saveAs(filepath)
+
+
+def open(filepath):
+ hiero.core.openProject(filepath)
+ return True
+
+
+def current_file():
+ current_file = hiero.core.projects()[-1].path()
+ normalised = os.path.normpath(current_file)
+
+ # Unsaved current file
+ if normalised == "":
+ return None
+
+ return normalised
+
+
+def work_root():
+ return os.path.normpath(api.Session["AVALON_WORKDIR"]).replace("\\", "/")
diff --git a/pype/plugins/aport/publish/collect_context.py b/pype/plugins/aport/publish/collect_context.py
index f43e78120c..ee2a20355f 100644
--- a/pype/plugins/aport/publish/collect_context.py
+++ b/pype/plugins/aport/publish/collect_context.py
@@ -87,13 +87,13 @@ class CollectContextDataFromAport(pyblish.api.ContextPlugin):
context.data["currentFile"] = current_file
# get project data from avalon
- project_data = pype.get_project_data()
+ project_data = pype.get_project()["data"]
assert project_data, "No `project_data` data in avalon db"
context.data["projectData"] = project_data
self.log.debug("project_data: {}".format(project_data))
# get asset data from avalon and fix all paths
- asset_data = pype.get_asset_data()
+ asset_data = pype.get_asset()["data"]
assert asset_data, "No `asset_data` data in avalon db"
asset_data = {k: v.replace("\\", "/") for k, v in asset_data.items()
if isinstance(v, str)}
diff --git a/pype/plugins/aport/publish/collect_instances.py b/pype/plugins/aport/publish/collect_instances.py
index ffb2ec824c..be9d798a4f 100644
--- a/pype/plugins/aport/publish/collect_instances.py
+++ b/pype/plugins/aport/publish/collect_instances.py
@@ -39,19 +39,18 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
assert instances_data, "No `asset_default` data in json file"
asset_name = a_session["AVALON_ASSET"]
- entity = io.find_one({"name": asset_name,
- "type": "asset"})
+ entity = pype.get_asset(asset_name)
# get frame start > first try from asset data
- frame_start = context.data["assetData"].get("fstart", None)
+ frame_start = context.data["assetData"].get("frameStart", None)
if not frame_start:
self.log.debug("frame_start not on assetData")
# get frame start > second try from parent data
- frame_start = pype.get_data_hierarchical_attr(entity, "fstart")
+ frame_start = entity["data"]["frameStart"]
if not frame_start:
self.log.debug("frame_start not on any parent entity")
# get frame start > third try from parent data
- frame_start = asset_default["fstart"]
+ frame_start = asset_default["frameStart"]
assert frame_start, "No `frame_start` data found, "
"please set `fstart` on asset"
@@ -61,7 +60,7 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
handles = context.data["assetData"].get("handles", None)
if not handles:
# get frame start > second try from parent data
- handles = pype.get_data_hierarchical_attr(entity, "handles")
+ handles = entity["data"]["handles"]
if not handles:
# get frame start > third try from parent data
handles = asset_default["handles"]
@@ -129,7 +128,7 @@ class CollectInstancesFromJson(pyblish.api.ContextPlugin):
instance.data.update({
"subset": subset_name,
"task": task,
- "fstart": frame_start,
+ "frameStart": frame_start,
"handles": handles,
"host": host,
"asset": asset,
diff --git a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py
index 02455454bb..976250da00 100644
--- a/pype/plugins/ftrack/publish/integrate_ftrack_instances.py
+++ b/pype/plugins/ftrack/publish/integrate_ftrack_instances.py
@@ -26,7 +26,9 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
'write': 'render',
'review': 'mov',
'plate': 'img',
- 'audio': 'audio'
+ 'audio': 'audio',
+ 'workfile': 'scene',
+ 'animation': 'cache'
}
def process(self, instance):
@@ -74,11 +76,11 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
)
else:
end_frame += (
- instance.data['endFrame'] - instance.data['startFrame']
+ instance.data["frameEnd"] - instance.data["frameStart"]
)
- if not comp.get('frameRate'):
- comp['frameRate'] = instance.context.data['fps']
+ if not comp.get('fps'):
+ comp['fps'] = instance.context.data['fps']
location = self.get_ftrack_location(
'ftrack.server', ft_session
)
@@ -88,7 +90,7 @@ class IntegrateFtrackInstance(pyblish.api.InstancePlugin):
"metadata": {'ftr_meta': json.dumps({
'frameIn': int(start_frame),
'frameOut': int(end_frame),
- 'frameRate': float(comp['frameRate'])})}
+ 'frameRate': float(comp['fps'])})}
}
comp['thumbnail'] = False
else:
diff --git a/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py b/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py
index 5f0516c593..25c641c168 100644
--- a/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py
+++ b/pype/plugins/ftrack/publish/integrate_hierarchy_ftrack.py
@@ -106,11 +106,11 @@ class IntegrateHierarchyToFtrack(pyblish.api.ContextPlugin):
tasks_to_create = []
for child in entity['children']:
if child.entity_type.lower() == 'task':
- existing_tasks.append(child['name'])
+ existing_tasks.append(child['name'].lower())
# existing_tasks.append(child['type']['name'])
for task in tasks:
- if task in existing_tasks:
+ if task.lower() in existing_tasks:
print("Task {} already exists".format(task))
continue
tasks_to_create.append(task)
diff --git a/pype/plugins/ftrack/publish/integrate_remove_components.py b/pype/plugins/ftrack/publish/integrate_remove_components.py
new file mode 100644
index 0000000000..a215ee1b97
--- /dev/null
+++ b/pype/plugins/ftrack/publish/integrate_remove_components.py
@@ -0,0 +1,27 @@
+import pyblish.api
+import os
+
+
+class IntegrateCleanComponentData(pyblish.api.InstancePlugin):
+ """
+ Cleaning up thumbnail an mov files after they have been integrated
+ """
+
+ order = pyblish.api.IntegratorOrder + 0.5
+ label = 'Clean component data'
+ families = ["ftrack"]
+ optional = True
+ active = True
+
+ def process(self, instance):
+
+ for comp in instance.data['representations']:
+ self.log.debug('component {}'.format(comp))
+
+ if comp.get('thumbnail') or ("thumbnail" in comp.get('tags', [])):
+ os.remove(comp['published_path'])
+ self.log.info('Thumbnail image was erased')
+
+ elif comp.get('preview') or ("preview" in comp.get('tags', [])):
+ os.remove(comp['published_path'])
+ self.log.info('Preview mov file was erased')
diff --git a/pype/plugins/fusion/load/actions.py b/pype/plugins/fusion/load/actions.py
index d7ee13716b..481c95387f 100644
--- a/pype/plugins/fusion/load/actions.py
+++ b/pype/plugins/fusion/load/actions.py
@@ -27,8 +27,8 @@ class FusionSetFrameRangeLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
- start = version_data.get("startFrame", None)
- end = version_data.get("endFrame", None)
+ start = version_data.get("frameStart", None)
+ end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
@@ -60,8 +60,8 @@ class FusionSetFrameRangeWithHandlesLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
- start = version_data.get("startFrame", None)
- end = version_data.get("endFrame", None)
+ start = version_data.get("frameStart", None)
+ end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
diff --git a/pype/plugins/fusion/load/load_sequence.py b/pype/plugins/fusion/load/load_sequence.py
index 7b859e9b03..ce6bca6c77 100644
--- a/pype/plugins/fusion/load/load_sequence.py
+++ b/pype/plugins/fusion/load/load_sequence.py
@@ -145,7 +145,7 @@ class FusionLoadSequence(api.Loader):
tool["Clip"] = path
# Set global in point to start frame (if in version.data)
- start = context["version"]["data"].get("startFrame", None)
+ start = context["version"]["data"].get("frameStart", None)
if start is not None:
loader_shift(tool, start, relative=False)
@@ -175,7 +175,7 @@ class FusionLoadSequence(api.Loader):
been set.
- GlobalIn: Fusion reset to comp's global in if duration changes
- - We change it to the "startFrame"
+ - We change it to the "frameStart"
- GlobalEnd: Fusion resets to globalIn + length if duration changes
- We do the same like Fusion - allow fusion to take control.
@@ -212,7 +212,7 @@ class FusionLoadSequence(api.Loader):
# Get start frame from version data
version = io.find_one({"type": "version",
"_id": representation["parent"]})
- start = version["data"].get("startFrame")
+ start = version["data"].get("frameStart")
if start is None:
self.log.warning("Missing start frame for updated version"
"assuming starts at frame 0 for: "
diff --git a/pype/plugins/fusion/publish/collect_instances.py b/pype/plugins/fusion/publish/collect_instances.py
index 472e5d4741..6dbb1b1a97 100644
--- a/pype/plugins/fusion/publish/collect_instances.py
+++ b/pype/plugins/fusion/publish/collect_instances.py
@@ -23,7 +23,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
"""Collect Fusion saver instances
This additionally stores the Comp start and end render range in the
- current context's data as "startFrame" and "endFrame".
+ current context's data as "frameStart" and "frameEnd".
"""
@@ -43,8 +43,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
savers = [tool for tool in tools if tool.ID == "Saver"]
start, end = get_comp_render_range(comp)
- context.data["startFrame"] = start
- context.data["endFrame"] = end
+ context.data["frameStart"] = start
+ context.data["frameEnd"] = end
for tool in savers:
path = tool["Clip"][comp.TIME_UNDEFINED]
diff --git a/pype/plugins/fusion/publish/publish_image_sequences.py b/pype/plugins/fusion/publish/publish_image_sequences.py
index 26ae74676f..9fe9ddc4cb 100644
--- a/pype/plugins/fusion/publish/publish_image_sequences.py
+++ b/pype/plugins/fusion/publish/publish_image_sequences.py
@@ -53,8 +53,8 @@ class PublishImageSequence(pyblish.api.InstancePlugin):
# The instance has most of the information already stored
metadata = {
"regex": regex,
- "startFrame": instance.context.data["startFrame"],
- "endFrame": instance.context.data["endFrame"],
+ "frameStart": instance.context.data["frameStart"],
+ "frameEnd": instance.context.data["frameEnd"],
"families": ["imagesequence"],
}
diff --git a/pype/plugins/fusion/publish/submit_deadline.py b/pype/plugins/fusion/publish/submit_deadline.py
index 30d17a4c69..6b65f9fe05 100644
--- a/pype/plugins/fusion/publish/submit_deadline.py
+++ b/pype/plugins/fusion/publish/submit_deadline.py
@@ -79,8 +79,8 @@ class FusionSubmitDeadline(pyblish.api.InstancePlugin):
"Plugin": "Fusion",
"Frames": "{start}-{end}".format(
- start=int(context.data["startFrame"]),
- end=int(context.data["endFrame"])
+ start=int(context.data["frameStart"]),
+ end=int(context.data["frameEnd"])
),
"Comment": comment,
diff --git a/pype/plugins/global/load/open_djv.py b/pype/plugins/global/load/open_djv.py
index bd49d86d5f..754f583a56 100644
--- a/pype/plugins/global/load/open_djv.py
+++ b/pype/plugins/global/load/open_djv.py
@@ -1,22 +1,15 @@
import os
import subprocess
import json
-from pype import lib as pypelib
+from pypeapp import config
from avalon import api
-def get_config_data():
- path_items = [pypelib.get_presets_path(), 'djv_view', 'config.json']
- filepath = os.path.sep.join(path_items)
- data = dict()
- with open(filepath) as data_file:
- data = json.load(data_file)
- return data
-
-
def get_families():
families = []
- paths = get_config_data().get('djv_paths', [])
+ paths = config.get_presets().get("djv_view", {}).get("config", {}).get(
+ "djv_paths", []
+ )
for path in paths:
if os.path.exists(path):
families.append("*")
@@ -25,13 +18,15 @@ def get_families():
def get_representation():
- return get_config_data().get('file_ext', [])
+ return config.get_presets().get("djv_view", {}).get("config", {}).get(
+ 'file_ext', []
+ )
class OpenInDJV(api.Loader):
"""Open Image Sequence with system default"""
- config_data = get_config_data()
+ config_data = config.get_presets().get("djv_view", {}).get("config", {})
families = get_families()
representations = get_representation()
@@ -42,7 +37,9 @@ class OpenInDJV(api.Loader):
def load(self, context, name, namespace, data):
self.djv_path = None
- paths = get_config_data().get('djv_paths', [])
+ paths = config.get_presets().get("djv_view", {}).get("config", {}).get(
+ "djv_paths", []
+ )
for path in paths:
if os.path.exists(path):
self.djv_path = path
diff --git a/pype/plugins/global/publish/cleanup.py b/pype/plugins/global/publish/cleanup.py
index f31477faca..34123b31cf 100644
--- a/pype/plugins/global/publish/cleanup.py
+++ b/pype/plugins/global/publish/cleanup.py
@@ -3,11 +3,33 @@ import shutil
import pyblish.api
+def clean_renders(instance):
+ transfers = instance.data.get("transfers", list())
+
+ current_families = instance.data.get("families", list())
+ instance_family = instance.data.get("family", None)
+ dirnames = []
+
+ for src, dest in transfers:
+ if os.path.normpath(src) != os.path.normpath(dest):
+ if instance_family == 'render' or 'render' in current_families:
+ os.remove(src)
+ dirnames.append(os.path.dirname(src))
+
+ # make unique set
+ cleanup_dirs = set(dirnames)
+ for dir in cleanup_dirs:
+ try:
+ os.rmdir(dir)
+ except OSError:
+ # directory is not empty, skipping
+ continue
+
+
class CleanUp(pyblish.api.InstancePlugin):
"""Cleans up the staging directory after a successful publish.
- The removal will only happen for staging directories which are inside the
- temporary folder, otherwise the folder is ignored.
+ This will also clean published renders and delete their parent directories.
"""
@@ -36,3 +58,5 @@ class CleanUp(pyblish.api.InstancePlugin):
self.log.info("Removing temporary folder ...")
shutil.rmtree(staging_dir)
+ self.log.info("Cleaning renders ...")
+ clean_renders(instance)
diff --git a/pype/plugins/global/publish/collect_context.py b/pype/plugins/global/publish/collect_context.py
index 11625eaa85..31ab95259c 100644
--- a/pype/plugins/global/publish/collect_context.py
+++ b/pype/plugins/global/publish/collect_context.py
@@ -67,9 +67,9 @@ class CollectContextDataSAPublish(pyblish.api.ContextPlugin):
if isinstance(component['files'], list):
collections, remainder = clique.assemble(component['files'])
self.log.debug("collecting sequence: {}".format(collections))
- instance.data['startFrame'] = int(component['startFrame'])
- instance.data['endFrame'] = int(component['endFrame'])
- instance.data['frameRate'] = int(component['frameRate'])
+ instance.data["frameStart"] = int(component["frameStart"])
+ instance.data["frameEnd"] = int(component["frameEnd"])
+ instance.data['fps'] = int(component['fps'])
instance.data["representations"].append(component)
diff --git a/pype/plugins/global/publish/collect_filesequences.py b/pype/plugins/global/publish/collect_filesequences.py
index ed48404a98..33531549cb 100644
--- a/pype/plugins/global/publish/collect_filesequences.py
+++ b/pype/plugins/global/publish/collect_filesequences.py
@@ -6,14 +6,13 @@ from pprint import pformat
import pyblish.api
from avalon import api
-import pype.api as pype
def collect(root,
regex=None,
exclude_regex=None,
- startFrame=None,
- endFrame=None):
+ frame_start=None,
+ frame_end=None):
"""Collect sequence collections in root"""
from avalon.vendor import clique
@@ -52,10 +51,10 @@ def collect(root,
# Exclude any frames outside start and end frame.
for collection in collections:
for index in list(collection.indexes):
- if startFrame is not None and index < startFrame:
+ if frame_start is not None and index < frame_start:
collection.indexes.discard(index)
continue
- if endFrame is not None and index > endFrame:
+ if frame_end is not None and index > frame_end:
collection.indexes.discard(index)
continue
@@ -77,8 +76,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
api.Session["AVALON_ASSET"]
subset (str): The subset to publish to. If not provided the sequence's
head (up to frame number) will be used.
- startFrame (int): The start frame for the sequence
- endFrame (int): The end frame for the sequence
+ frame_start (int): The start frame for the sequence
+ frame_end (int): The end frame for the sequence
root (str): The path to collect from (can be relative to the .json)
regex (str): A regex for the sequence filename
exclude_regex (str): A regex for filename to exclude from collection
@@ -143,8 +142,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
collections = collect(root=root,
regex=regex,
exclude_regex=data.get("exclude_regex"),
- startFrame=data.get("startFrame"),
- endFrame=data.get("endFrame"))
+ frame_start=data.get("frameStart"),
+ frame_end=data.get("frameEnd"))
self.log.info("Found collections: {}".format(collections))
@@ -160,10 +159,13 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
# Get family from the data
families = data.get("families", ["render"])
- assert isinstance(families, (list, tuple)), "Must be iterable"
- assert families, "Must have at least a single family"
- families.append("ftrack")
- families.append("review")
+ if "render" not in families:
+ families.append("render")
+ if "ftrack" not in families:
+ families.append("ftrack")
+ if "review" not in families:
+ families.append("review")
+
for collection in collections:
instance = context.create_instance(str(collection))
self.log.info("Collection: %s" % list(collection))
@@ -176,8 +178,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
# If no start or end frame provided, get it from collection
indices = list(collection.indexes)
- start = data.get("startFrame", indices[0])
- end = data.get("endFrame", indices[-1])
+ start = data.get("frameStart", indices[0])
+ end = data.get("frameEnd", indices[-1])
# root = os.path.normpath(root)
# self.log.info("Source: {}}".format(data.get("source", "")))
@@ -191,8 +193,8 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
"subset": subset,
"asset": data.get("asset", api.Session["AVALON_ASSET"]),
"stagingDir": root,
- "startFrame": start,
- "endFrame": end,
+ "frameStart": start,
+ "frameEnd": end,
"fps": fps,
"source": data.get('source', '')
})
@@ -208,7 +210,7 @@ class CollectRenderedFrames(pyblish.api.ContextPlugin):
'files': list(collection),
"stagingDir": root,
"anatomy_template": "render",
- "frameRate": fps,
+ "fps": fps,
"tags": ['review']
}
instance.data["representations"].append(representation)
diff --git a/pype/plugins/global/publish/collect_output_repre_config.py b/pype/plugins/global/publish/collect_output_repre_config.py
index 8c63cfcc11..5595e29cab 100644
--- a/pype/plugins/global/publish/collect_output_repre_config.py
+++ b/pype/plugins/global/publish/collect_output_repre_config.py
@@ -1,7 +1,7 @@
import os
import json
import pyblish.api
-from pype import lib as pypelib
+from pypeapp import config
class CollectOutputRepreConfig(pyblish.api.ContextPlugin):
@@ -12,13 +12,5 @@ class CollectOutputRepreConfig(pyblish.api.ContextPlugin):
hosts = ["shell"]
def process(self, context):
- config_items = [
- pypelib.get_presets_path(),
- "ftrack",
- "output_representation.json"
- ]
- config_file = os.path.sep.join(config_items)
- with open(config_file) as data_file:
- config_data = json.load(data_file)
-
+ config_data = config.get_presets()["ftrack"]["output_representation"]
context.data['output_repre_config'] = config_data
diff --git a/pype/plugins/global/publish/collect_project_data.py b/pype/plugins/global/publish/collect_project_data.py
index c25580bd5f..de51ad880c 100644
--- a/pype/plugins/global/publish/collect_project_data.py
+++ b/pype/plugins/global/publish/collect_project_data.py
@@ -12,6 +12,6 @@ class CollectProjectData(pyblish.api.ContextPlugin):
def process(self, context):
# get project data from avalon db
- context.data["projectData"] = pype.get_project_data()
+ context.data["projectData"] = pype.get_project()["data"]
return
diff --git a/pype/plugins/global/publish/extract_burnin.py b/pype/plugins/global/publish/extract_burnin.py
index 5f16cc91f2..1ffda4fdfa 100644
--- a/pype/plugins/global/publish/extract_burnin.py
+++ b/pype/plugins/global/publish/extract_burnin.py
@@ -1,7 +1,7 @@
import os
-import subprocess
-import pype.api
import json
+
+import pype.api
import pyblish
@@ -17,6 +17,7 @@ class ExtractBurnin(pype.api.Extractor):
label = "Quicktime with burnins"
order = pyblish.api.ExtractorOrder + 0.03
families = ["review", "burnin"]
+ hosts = ["nuke", "maya", "shell"]
optional = True
def process(self, instance):
@@ -32,7 +33,7 @@ class ExtractBurnin(pype.api.Extractor):
"username": instance.context.data['user'],
"asset": os.environ['AVALON_ASSET'],
"task": os.environ['AVALON_TASK'],
- "start_frame": int(instance.data['startFrame']),
+ "start_frame": int(instance.data["frameStart"]),
"version": version
}
self.log.debug("__ prep_data: {}".format(prep_data))
@@ -61,31 +62,55 @@ class ExtractBurnin(pype.api.Extractor):
self.log.debug("__ burnin_data2: {}".format(burnin_data))
json_data = json.dumps(burnin_data)
- scriptpath = os.path.normpath(os.path.join(os.environ['PYPE_MODULE_ROOT'],
- "pype",
- "scripts",
- "otio_burnin.py"))
+
+ # Get script path.
+ module_path = os.environ['PYPE_MODULE_ROOT']
+
+ # There can be multiple paths in PYPE_MODULE_ROOT, in which case
+ # we just take first one.
+ if os.pathsep in module_path:
+ module_path = module_path.split(os.pathsep)[0]
+
+ scriptpath = os.path.normpath(
+ os.path.join(
+ module_path,
+ "pype",
+ "scripts",
+ "otio_burnin.py"
+ )
+ )
self.log.debug("__ scriptpath: {}".format(scriptpath))
- self.log.debug("__ EXE: {}".format(os.getenv("PYPE_PYTHON_EXE")))
- try:
- p = subprocess.Popen(
- [os.getenv("PYPE_PYTHON_EXE"), scriptpath, json_data]
- )
- p.wait()
- if not os.path.isfile(full_burnin_path):
- raise RuntimeError("File not existing: {}".format(full_burnin_path))
- except Exception as e:
- raise RuntimeError("Burnin script didn't work: `{}`".format(e))
+ # Get executable.
+ executable = os.getenv("PYPE_PYTHON_EXE")
- if os.path.exists(full_burnin_path):
- repre_update = {
- "files": movieFileBurnin,
- "name": repre["name"]
- }
- instance.data["representations"][i].update(repre_update)
+ # There can be multiple paths in PYPE_PYTHON_EXE, in which case
+ # we just take first one.
+ if os.pathsep in executable:
+ executable = executable.split(os.pathsep)[0]
- # removing the source mov file
- os.remove(full_movie_path)
- self.log.debug("Removed: `{}`".format(full_movie_path))
+ self.log.debug("__ EXE: {}".format(executable))
+
+ args = [executable, scriptpath, json_data]
+ self.log.debug("Executing: {}".format(args))
+ pype.api.subprocess(args)
+
+ repre_update = {
+ "files": movieFileBurnin,
+ "name": repre["name"],
+ "tags": [x for x in repre["tags"] if x != "delete"]
+ }
+ instance.data["representations"][i].update(repre_update)
+
+ # removing the source mov file
+ os.remove(full_movie_path)
+ self.log.debug("Removed: `{}`".format(full_movie_path))
+
+ # Remove any representations tagged for deletion.
+ for repre in instance.data["representations"]:
+ if "delete" in repre.get("tags", []):
+ self.log.debug("Removing representation: {}".format(repre))
+ instance.data["representations"].remove(repre)
+
+ self.log.debug(instance.data["representations"])
diff --git a/pype/plugins/global/publish/integrate_hierarchy_avalon.py b/pype/plugins/global/publish/extract_hierarchy_avalon.py
similarity index 95%
rename from pype/plugins/global/publish/integrate_hierarchy_avalon.py
rename to pype/plugins/global/publish/extract_hierarchy_avalon.py
index c01cb2d26a..778263f29a 100644
--- a/pype/plugins/global/publish/integrate_hierarchy_avalon.py
+++ b/pype/plugins/global/publish/extract_hierarchy_avalon.py
@@ -2,11 +2,11 @@ import pyblish.api
from avalon import io
-class IntegrateHierarchyToAvalon(pyblish.api.ContextPlugin):
+class ExtractHierarchyToAvalon(pyblish.api.ContextPlugin):
"""Create entities in Avalon based on collected data."""
- order = pyblish.api.IntegratorOrder - 0.1
- label = "Integrate Hierarchy To Avalon"
+ order = pyblish.api.ExtractorOrder - 0.01
+ label = "Extract Hierarchy To Avalon"
families = ["clip", "shot"]
def process(self, context):
diff --git a/pype/plugins/global/publish/extract_jpeg.py b/pype/plugins/global/publish/extract_jpeg.py
index 3c4c2e6775..e4ff7a6407 100644
--- a/pype/plugins/global/publish/extract_jpeg.py
+++ b/pype/plugins/global/publish/extract_jpeg.py
@@ -1,7 +1,8 @@
import os
+
import pyblish.api
-import subprocess
from pype.vendor import clique
+import pype.api
class ExtractJpegEXR(pyblish.api.InstancePlugin):
@@ -20,9 +21,8 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
order = pyblish.api.ExtractorOrder
families = ["imagesequence", "render", "write", "source"]
-
def process(self, instance):
- start = instance.data.get("startFrame")
+ start = instance.data.get("frameStart")
stagingdir = os.path.normpath(instance.data.get("stagingDir"))
collected_frames = os.listdir(stagingdir)
@@ -59,8 +59,10 @@ class ExtractJpegEXR(pyblish.api.InstancePlugin):
jpeg_items.append(full_output_path)
subprocess_jpeg = " ".join(jpeg_items)
- sub_proc = subprocess.Popen(subprocess_jpeg)
- sub_proc.wait()
+
+ # run subprocess
+ self.log.debug("{}".format(subprocess_jpeg))
+ pype.api.subprocess(subprocess_jpeg)
if "representations" not in instance.data:
instance.data["representations"] = []
diff --git a/pype/plugins/global/publish/extract_review.py b/pype/plugins/global/publish/extract_review.py
index 3a764b19c3..bf53fa87d4 100644
--- a/pype/plugins/global/publish/extract_review.py
+++ b/pype/plugins/global/publish/extract_review.py
@@ -1,7 +1,8 @@
import os
+
import pyblish.api
-import subprocess
from pype.vendor import clique
+import pype.api
from pypeapp import config
@@ -19,6 +20,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
label = "Extract Review"
order = pyblish.api.ExtractorOrder + 0.02
families = ["review"]
+ hosts = ["nuke", "maya", "shell"]
def process(self, instance):
# adding plugin attributes from presets
@@ -28,7 +30,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
inst_data = instance.data
fps = inst_data.get("fps")
- start_frame = inst_data.get("startFrame")
+ start_frame = inst_data.get("frameStart")
self.log.debug("Families In: `{}`".format(instance.data["families"]))
@@ -53,7 +55,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
ext = "mov"
self.log.warning(
"`ext` attribute not in output profile. Setting to default ext: `mov`")
-
+
self.log.debug("instance.families: {}".format(instance.data['families']))
self.log.debug("profile.families: {}".format(profile['families']))
@@ -85,7 +87,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
repre_new = repre.copy()
- new_tags = tags[:]
+ new_tags = [x for x in tags if x != "delete"]
p_tags = profile.get('tags', [])
self.log.info("p_tags: `{}`".format(p_tags))
# add families
@@ -108,12 +110,42 @@ class ExtractReview(pyblish.api.InstancePlugin):
# necessary input data
# adds start arg only if image sequence
- if "mov" not in repre_new['ext']:
+ if isinstance(repre["files"], list):
input_args.append("-start_number {0} -framerate {1}".format(
start_frame, fps))
input_args.append("-i {}".format(full_input_path))
+ for audio in instance.data.get("audio", []):
+ offset_frames = (
+ instance.data.get("startFrameReview") -
+ audio["offset"]
+ )
+ offset_seconds = offset_frames / fps
+
+ if offset_seconds > 0:
+ input_args.append("-ss")
+ else:
+ input_args.append("-itsoffset")
+
+ input_args.append(str(abs(offset_seconds)))
+
+ input_args.extend(
+ ["-i", audio["filename"]]
+ )
+
+ # Need to merge audio if there are more
+ # than 1 input.
+ if len(instance.data["audio"]) > 1:
+ input_args.extend(
+ [
+ "-filter_complex",
+ "amerge",
+ "-ac",
+ "2"
+ ]
+ )
+
output_args = []
# preset's output data
output_args.extend(profile.get('output', []))
@@ -125,6 +157,9 @@ class ExtractReview(pyblish.api.InstancePlugin):
output_args.append(
"-filter:v drawbox=0:0:iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black,drawbox=0:ih-round((ih-(iw*(1/{0})))/2):iw:round((ih-(iw*(1/{0})))/2):t=fill:c=black".format(lb))
+ # In case audio is longer than video.
+ output_args.append("-shortest")
+
# output filename
output_args.append(full_output_path)
mov_args = [
@@ -136,12 +171,7 @@ class ExtractReview(pyblish.api.InstancePlugin):
# run subprocess
self.log.debug("{}".format(subprcs_cmd))
- sub_proc = subprocess.Popen(subprcs_cmd)
- sub_proc.wait()
-
- if not os.path.isfile(full_output_path):
- raise FileExistsError(
- "Quicktime wasn't created succesfully")
+ pype.api.subprocess(subprcs_cmd)
# create representation data
repre_new.update({
@@ -157,16 +187,17 @@ class ExtractReview(pyblish.api.InstancePlugin):
repre_new.pop("thumbnail")
# adding representation
+ self.log.debug("Adding: {}".format(repre_new))
representations_new.append(repre_new)
- # if "delete" in tags:
- # if "mov" in full_input_path:
- # os.remove(full_input_path)
- # self.log.debug("Removed: `{}`".format(full_input_path))
else:
continue
else:
continue
+ for repre in representations_new:
+ if "delete" in repre.get("tags", []):
+ representations_new.remove(repre)
+
self.log.debug(
"new representations: {}".format(representations_new))
instance.data["representations"] = representations_new
diff --git a/pype/plugins/global/publish/integrate.py b/pype/plugins/global/publish/integrate.py
index 29a4ba3c17..8597d4a719 100644
--- a/pype/plugins/global/publish/integrate.py
+++ b/pype/plugins/global/publish/integrate.py
@@ -404,7 +404,7 @@ class IntegrateAsset(pyblish.api.InstancePlugin):
# Include optional data if present in
optionals = [
- "startFrame", "endFrame", "step", "handles", "sourceHashes"
+ "frameStart", "frameEnd", "step", "handles", "sourceHashes"
]
for key in optionals:
if key in instance.data:
diff --git a/pype/plugins/global/publish/integrate_new.py b/pype/plugins/global/publish/integrate_new.py
index e758789c37..e5d8007d70 100644
--- a/pype/plugins/global/publish/integrate_new.py
+++ b/pype/plugins/global/publish/integrate_new.py
@@ -36,9 +36,9 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
template from anatomy that should be used for
integrating this file. Only the first level can
be specified right now.
- 'startFrame'
- 'endFrame'
- 'framerate'
+ "frameStart"
+ "frameEnd"
+ 'fps'
"""
label = "Integrate Asset New"
@@ -63,6 +63,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"rig",
"plate",
"look",
+ "lut",
"audio"
]
exclude_families = ["clip"]
@@ -271,15 +272,22 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug(
"src_tail_collections: {}".format(str(src_collections)))
src_collection = src_collections[0]
+
# Assert that each member has identical suffix
src_head = src_collection.format("{head}")
src_tail = src_collection.format("{tail}")
+ # fix dst_padding
+ valid_files = [x for x in files if src_collection.match(x)]
+ padd_len = len(
+ valid_files[0].replace(src_head, "").replace(src_tail, "")
+ )
+ src_padding_exp = "%0{}d".format(padd_len)
+
test_dest_files = list()
for i in [1, 2]:
template_data["representation"] = repre['ext']
- template_data["frame"] = src_collection.format(
- "{padding}") % i
+ template_data["frame"] = src_padding_exp % i
anatomy_filled = anatomy.format(template_data)
test_dest_files.append(
@@ -295,24 +303,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
dst_head = dst_collection.format("{head}")
dst_tail = dst_collection.format("{tail}")
- repre['published_path'] = dst_collection.format()
-
index_frame_start = None
- if repre.get('startFrame'):
+ if repre.get("frameStart"):
frame_start_padding = len(str(
- repre.get('endFrame')))
- index_frame_start = repre.get('startFrame')
+ repre.get("frameEnd")))
+ index_frame_start = repre.get("frameStart")
+ dst_padding_exp = src_padding_exp
for i in src_collection.indexes:
- src_padding = src_collection.format("{padding}") % i
+ src_padding = src_padding_exp % i
src_file_name = "{0}{1}{2}".format(
src_head, src_padding, src_tail)
- dst_padding = dst_collection.format("{padding}") % i
+ dst_padding = src_padding_exp % i
if index_frame_start:
- dst_padding = "%0{}d".format(
- frame_start_padding) % index_frame_start
+ dst_padding_exp = "%0{}d".format(frame_start_padding)
+ dst_padding = dst_padding_exp % index_frame_start
index_frame_start += 1
dst = "{0}{1}{2}".format(dst_head, dst_padding, dst_tail)
@@ -321,6 +328,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.debug("source: {}".format(src))
instance.data["transfers"].append([src, dst])
+ repre['published_path'] = "{0}{1}{2}".format(dst_head, dst_padding_exp, dst_tail)
# for imagesequence version data
hashes = '#' * len(dst_padding)
dst = os.path.normpath("{0}{1}{2}".format(
@@ -380,7 +388,7 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
"representation": repre['ext']
}
}
- self.log.debug("__ _representation: {}".format(representation))
+ self.log.debug("__ representation: {}".format(representation))
destination_list.append(dst)
self.log.debug("__ destination_list: {}".format(destination_list))
instance.data['destination_list'] = destination_list
@@ -396,20 +404,23 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
self.log.info("Registered {} items".format(len(representations)))
def integrate(self, instance):
- """Move the files
+ """ Move the files.
- Through `instance.data["transfers"]`
+ Through `instance.data["transfers"]`
- Args:
- instance: the instance to integrate
+ Args:
+ instance: the instance to integrate
"""
-
transfers = instance.data.get("transfers", list())
for src, dest in transfers:
if os.path.normpath(src) != os.path.normpath(dest):
self.copy_file(src, dest)
+ transfers = instance.data.get("transfers", list())
+ for src, dest in transfers:
+ self.copy_file(src, dest)
+
# Produce hardlinked copies
# Note: hardlink can only be produced between two files on the same
# server/disk and editing one of the two will edit both files at once.
@@ -543,8 +554,8 @@ class IntegrateAssetNew(pyblish.api.InstancePlugin):
# Include optional data if present in
optionals = [
- "startFrame", "endFrame", "step", "handles",
- "handle_end", "handle_start", "sourceHashes"
+ "frameStart", "frameEnd", "step", "handles",
+ "handleEnd", "handleStart", "sourceHashes"
]
for key in optionals:
if key in instance.data:
diff --git a/pype/plugins/global/publish/integrate_rendered_frames.py b/pype/plugins/global/publish/integrate_rendered_frames.py
index ff8d6adc71..cb8f2d8608 100644
--- a/pype/plugins/global/publish/integrate_rendered_frames.py
+++ b/pype/plugins/global/publish/integrate_rendered_frames.py
@@ -408,7 +408,7 @@ class IntegrateFrames(pyblish.api.InstancePlugin):
"comment": context.data.get("comment")}
# Include optional data if present in
- optionals = ["startFrame", "endFrame", "step",
+ optionals = ["frameStart", "frameEnd", "step",
"handles", "colorspace", "fps", "outputDir"]
for key in optionals:
diff --git a/pype/plugins/global/publish/submit_publish_job.py b/pype/plugins/global/publish/submit_publish_job.py
index a3ea308891..8d352b8872 100644
--- a/pype/plugins/global/publish/submit_publish_job.py
+++ b/pype/plugins/global/publish/submit_publish_job.py
@@ -121,7 +121,7 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
- publishJobState (str, Optional): "Active" or "Suspended"
This defaults to "Suspended"
- This requires a "startFrame" and "endFrame" to be present in instance.data
+ This requires a "frameStart" and "frameEnd" to be present in instance.data
or in context.data.
"""
@@ -138,6 +138,16 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
"imagesequence"
]
+ enviro_filter = [
+ "PATH",
+ "PYTHONPATH",
+ "FTRACK_API_USER",
+ "FTRACK_API_KEY",
+ "FTRACK_SERVER",
+ "PYPE_ROOT"
+ ]
+
+
def _submit_deadline_post_job(self, instance, job):
"""
Deadline specific code separated from :meth:`process` for sake of
@@ -181,13 +191,22 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Transfer the environment from the original job to this dependent
# job so they use the same environment
+
+
environment = job["Props"].get("Env", {})
- payload["JobInfo"].update({
- "EnvironmentKeyValue%d" % index: "{key}={value}".format(
- key=key,
- value=environment[key]
- ) for index, key in enumerate(environment)
- })
+ i = 0
+ for index, key in enumerate(environment):
+ self.log.info("KEY: {}".format(key))
+ self.log.info("FILTER: {}".format(self.enviro_filter))
+
+ if key.upper() in self.enviro_filter:
+ payload["JobInfo"].update({
+ "EnvironmentKeyValue%d" % i: "{key}={value}".format(
+ key=key,
+ value=environment[key]
+ )
+ })
+ i += 1
# Avoid copied pools and remove secondary pool
payload["JobInfo"]["Pool"] = "none"
@@ -236,12 +255,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Get start/end frame from instance, if not available get from context
context = instance.context
- start = instance.data.get("startFrame")
+ start = instance.data.get("frameStart")
if start is None:
- start = context.data["startFrame"]
- end = instance.data.get("endFrame")
+ start = context.data["frameStart"]
+ end = instance.data.get("frameEnd")
if end is None:
- end = context.data["endFrame"]
+ end = context.data["frameEnd"]
# Add in regex for sequence filename
# This assumes the output files start with subset name and ends with
@@ -266,8 +285,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
metadata = {
"asset": asset,
"regex": regex,
- "startFrame": start,
- "endFrame": end,
+ "frameStart": start,
+ "frameEnd": end,
"fps": context.data.get("fps", None),
"families": ["render"],
"source": source,
@@ -315,8 +334,8 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Set prev start / end frames for comparison
if not prev_start and not prev_end:
- prev_start = version["data"]["startFrame"]
- prev_end = version["data"]["endFrame"]
+ prev_start = version["data"]["frameStart"]
+ prev_end = version["data"]["frameEnd"]
subset_resources = get_resources(version, _ext)
resource_files = get_resource_files(subset_resources,
@@ -352,12 +371,12 @@ class ProcessSubmittedJobOnFarm(pyblish.api.InstancePlugin):
# Please do so when fixing this.
# Start frame
- metadata["startFrame"] = updated_start
- metadata["metadata"]["instance"]["startFrame"] = updated_start
+ metadata["frameStart"] = updated_start
+ metadata["metadata"]["instance"]["frameStart"] = updated_start
# End frame
- metadata["endFrame"] = updated_end
- metadata["metadata"]["instance"]["endFrame"] = updated_end
+ metadata["frameEnd"] = updated_end
+ metadata["metadata"]["instance"]["frameEnd"] = updated_end
metadata_filename = "{}_metadata.json".format(subset)
diff --git a/pype/plugins/global/publish/validate_filesequences.py b/pype/plugins/global/publish/validate_filesequences.py
new file mode 100644
index 0000000000..2f4ac3de4f
--- /dev/null
+++ b/pype/plugins/global/publish/validate_filesequences.py
@@ -0,0 +1,12 @@
+import pyblish.api
+
+
+class ValidateFileSequences(pyblish.api.ContextPlugin):
+ """Validates whether any file sequences were collected."""
+
+ order = pyblish.api.ValidatorOrder
+ targets = ["filesequence"]
+ label = "Validate File Sequences"
+
+ def process(self, context):
+ assert context, "Nothing collected."
diff --git a/pype/plugins/global/publish/validate_sequence_frames.py b/pype/plugins/global/publish/validate_sequence_frames.py
index cd54e6becc..f03229da22 100644
--- a/pype/plugins/global/publish/validate_sequence_frames.py
+++ b/pype/plugins/global/publish/validate_sequence_frames.py
@@ -22,8 +22,8 @@ class ValidateSequenceFrames(pyblish.api.InstancePlugin):
frames = list(collection.indexes)
current_range = (frames[0], frames[-1])
- required_range = (instance.data["startFrame"],
- instance.data["endFrame"])
+ required_range = (instance.data["frameStart"],
+ instance.data["frameEnd"])
if current_range != required_range:
raise ValueError("Invalid frame range: {0} - "
diff --git a/pype/plugins/houdini/publish/collect_frames.py b/pype/plugins/houdini/publish/collect_frames.py
index 7735d23fb7..7df7d2b86e 100644
--- a/pype/plugins/houdini/publish/collect_frames.py
+++ b/pype/plugins/houdini/publish/collect_frames.py
@@ -23,8 +23,8 @@ class CollectFrames(pyblish.api.InstancePlugin):
match = re.match("(\w+)\.(\d+)\.vdb", file_name)
result = file_name
- start_frame = instance.data.get("startFrame", None)
- end_frame = instance.data.get("endFrame", None)
+ start_frame = instance.data.get("frameStart", None)
+ end_frame = instance.data.get("frameEnd", None)
if match and start_frame is not None:
diff --git a/pype/plugins/houdini/publish/collect_instances.py b/pype/plugins/houdini/publish/collect_instances.py
index 5f9fc7d6c9..413553c864 100644
--- a/pype/plugins/houdini/publish/collect_instances.py
+++ b/pype/plugins/houdini/publish/collect_instances.py
@@ -55,7 +55,7 @@ class CollectInstances(pyblish.api.ContextPlugin):
# Create nice name if the instance has a frame range.
label = data.get("name", node.name())
- if "startFrame" in data and "endFrame" in data:
+ if "frameStart" in data and "frameEnd" in data:
frames = "[{startFrame} - {endFrame}]".format(**data)
label = "{} {}".format(label, frames)
@@ -91,8 +91,8 @@ class CollectInstances(pyblish.api.ContextPlugin):
if node.evalParm("trange") == 0:
return data
- data["startFrame"] = node.evalParm("f1")
- data["endFrame"] = node.evalParm("f2")
+ data["frameStart"] = node.evalParm("f1")
+ data["frameEnd"] = node.evalParm("f2")
data["steps"] = node.evalParm("f3")
return data
diff --git a/pype/plugins/launcher/actions/Aport.py b/pype/plugins/launcher/actions/Aport.py
index 3773b90256..94f14cd0d3 100644
--- a/pype/plugins/launcher/actions/Aport.py
+++ b/pype/plugins/launcher/actions/Aport.py
@@ -6,10 +6,9 @@ import acre
from avalon import api, lib
import pype.api as pype
+from pype.aport import lib as aportlib
-from pype.api import Logger
-
-log = Logger().get_logger(__name__, "aport")
+log = pype.Logger().get_logger(__name__, "aport")
class Aport(api.Action):
@@ -50,15 +49,16 @@ class Aport(api.Action):
env = acre.merge(env, current_env=dict(os.environ))
if not env.get('AVALON_WORKDIR', None):
- os.environ["AVALON_WORKDIR"] = pype.get_workdir_template()
+ os.environ["AVALON_WORKDIR"] = aportlib.get_workdir_template()
env.update(dict(os.environ))
try:
- lib.launch(executable=executable,
- args=arguments,
- environment=env
- )
+ lib.launch(
+ executable=executable,
+ args=arguments,
+ environment=env
+ )
except Exception as e:
log.error(e)
return
diff --git a/pype/plugins/launcher/actions/unused/PremierePro.py b/pype/plugins/launcher/actions/unused/PremierePro.py
index 7d94db4044..97d693ffbb 100644
--- a/pype/plugins/launcher/actions/unused/PremierePro.py
+++ b/pype/plugins/launcher/actions/unused/PremierePro.py
@@ -3,7 +3,7 @@ import sys
from pprint import pprint
import acre
-from avalon import api, lib
+from avalon import api, lib, io
import pype.api as pype
@@ -44,12 +44,42 @@ class PremierePro(api.Action):
env = acre.merge(env, current_env=dict(os.environ))
if not env.get('AVALON_WORKDIR', None):
- os.environ["AVALON_WORKDIR"] = pype.get_workdir_template()
+ project_name = env.get("AVALON_PROJECT")
+ anatomy = Anatomy(project_name)
+ os.environ['AVALON_PROJECT'] = project_name
+ io.Session['AVALON_PROJECT'] = project_name
+
+ task_name = os.environ.get(
+ "AVALON_TASK", io.Session["AVALON_TASK"]
+ )
+ asset_name = os.environ.get(
+ "AVALON_ASSET", io.Session["AVALON_ASSET"]
+ )
+ application = lib.get_application(
+ os.environ["AVALON_APP_NAME"]
+ )
+
+ project_doc = io.find_one({"type": "project"})
+ data = {
+ "task": task_name,
+ "asset": asset_name,
+ "project": {
+ "name": project_doc["name"],
+ "code": project_doc["data"].get("code", '')
+ },
+ "hierarchy": pype.get_hierarchy(),
+ "app": application["application_dir"]
+ }
+ anatomy_filled = anatomy.format(data)
+ workdir = anatomy_filled["work"]["folder"]
+
+ os.environ["AVALON_WORKDIR"] = workdir
env.update(dict(os.environ))
- lib.launch(executable=executable,
- args=arguments,
- environment=env
- )
+ lib.launch(
+ executable=executable,
+ args=arguments,
+ environment=env
+ )
return
diff --git a/pype/plugins/maya/create/create_model.py b/pype/plugins/maya/create/create_model.py
index f9ba229c89..241e2be7f9 100644
--- a/pype/plugins/maya/create/create_model.py
+++ b/pype/plugins/maya/create/create_model.py
@@ -8,7 +8,7 @@ class CreateModel(avalon.maya.Creator):
label = "Model"
family = "model"
icon = "cube"
- defaults = [ "_MD", "_HD", "_LD", "Main", "Proxy",]
+ defaults = ["Main", "Proxy", "_MD", "_HD", "_LD"]
def __init__(self, *args, **kwargs):
super(CreateModel, self).__init__(*args, **kwargs)
diff --git a/pype/plugins/maya/create/create_vrayproxy.py b/pype/plugins/maya/create/create_vrayproxy.py
index f3e6124e1f..010157ca9a 100644
--- a/pype/plugins/maya/create/create_vrayproxy.py
+++ b/pype/plugins/maya/create/create_vrayproxy.py
@@ -13,8 +13,8 @@ class CreateVrayProxy(avalon.maya.Creator):
super(CreateVrayProxy, self).__init__(*args, **kwargs)
self.data["animation"] = False
- self.data["startFrame"] = 1
- self.data["endFrame"] = 1
+ self.data["frameStart"] = 1
+ self.data["frameEnd"] = 1
# Write vertex colors
self.data["vertexColors"] = False
diff --git a/pype/plugins/maya/load/actions.py b/pype/plugins/maya/load/actions.py
index 6db3c6ba34..60316aaf9b 100644
--- a/pype/plugins/maya/load/actions.py
+++ b/pype/plugins/maya/load/actions.py
@@ -25,8 +25,8 @@ class SetFrameRangeLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
- start = version_data.get("startFrame", None)
- end = version_data.get("endFrame", None)
+ start = version_data.get("frameStart", None)
+ end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
@@ -59,8 +59,8 @@ class SetFrameRangeWithHandlesLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
- start = version_data.get("startFrame", None)
- end = version_data.get("endFrame", None)
+ start = version_data.get("frameStart", None)
+ end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
diff --git a/pype/plugins/maya/load/load_image_plane.py b/pype/plugins/maya/load/load_image_plane.py
index 5534cce0ee..e2d94ac82e 100644
--- a/pype/plugins/maya/load/load_image_plane.py
+++ b/pype/plugins/maya/load/load_image_plane.py
@@ -7,9 +7,9 @@ from Qt import QtWidgets
class ImagePlaneLoader(api.Loader):
"""Specific loader of plate for image planes on selected camera."""
- families = ["plate"]
+ families = ["plate", "render"]
label = "Create imagePlane on selected camera."
- representations = ["mov"]
+ representations = ["mov", "exr"]
icon = "image"
color = "orange"
@@ -58,12 +58,10 @@ class ImagePlaneLoader(api.Loader):
camera=camera, showInAllViews=False
)
image_plane_shape.depth.set(image_plane_depth)
- # Need to get "type" by string, because its a method as well.
- pc.Attribute(image_plane_shape + ".type").set(2)
+
image_plane_shape.imageName.set(
context["representation"]["data"]["path"]
)
- image_plane_shape.useFrameExtension.set(1)
start_frame = pc.playbackOptions(q=True, min=True)
end_frame = pc.playbackOptions(q=True, max=True)
@@ -71,6 +69,29 @@ class ImagePlaneLoader(api.Loader):
image_plane_shape.frameOffset.set(1 - start_frame)
image_plane_shape.frameIn.set(start_frame)
image_plane_shape.frameOut.set(end_frame)
+ image_plane_shape.useFrameExtension.set(1)
+
+ if context["representation"]["name"] == "mov":
+ # Need to get "type" by string, because its a method as well.
+ pc.Attribute(image_plane_shape + ".type").set(2)
+
+ # Ask user whether to use sequence or still image.
+ if context["representation"]["name"] == "exr":
+ reply = QtWidgets.QMessageBox.information(
+ None,
+ "Frame Hold.",
+ "Hold image sequence on first frame?",
+ QtWidgets.QMessageBox.Ok,
+ QtWidgets.QMessageBox.Cancel
+ )
+ if reply == QtWidgets.QMessageBox.Ok:
+ pc.delete(
+ image_plane_shape.listConnections(type="expression")[0]
+ )
+ image_plane_shape.frameExtension.set(start_frame)
+
+ # Ensure OpenEXRLoader plugin is loaded.
+ pc.loadPlugin("OpenEXRLoader.mll", quiet=True)
new_nodes.extend(
[image_plane_transform.name(), image_plane_shape.name()]
diff --git a/pype/plugins/maya/load/load_reference.py b/pype/plugins/maya/load/load_reference.py
index 199d79c941..fb4b90a1cd 100644
--- a/pype/plugins/maya/load/load_reference.py
+++ b/pype/plugins/maya/load/load_reference.py
@@ -74,12 +74,14 @@ class ReferenceLoader(pype.maya.plugin.ReferenceLoader):
# for backwards compatibility
class AbcLoader(ReferenceLoader):
+ label = "Deprecated loader (don't use)"
families = ["pointcache", "animation"]
representations = ["abc"]
tool_names = []
# for backwards compatibility
class ModelLoader(ReferenceLoader):
+ label = "Deprecated loader (don't use)"
families = ["model", "pointcache"]
representations = ["abc"]
tool_names = []
diff --git a/pype/plugins/maya/publish/collect_ftrack_family.py b/pype/plugins/maya/publish/collect_ftrack_family.py
index b339f2ef69..c8896a5c9c 100644
--- a/pype/plugins/maya/publish/collect_ftrack_family.py
+++ b/pype/plugins/maya/publish/collect_ftrack_family.py
@@ -19,8 +19,8 @@ class CollectFtrackFamilies(pyblish.api.InstancePlugin):
"setdress",
"model",
"animation",
- "workfile",
- "look"
+ "look",
+ "rig"
]
def process(self, instance):
diff --git a/pype/plugins/maya/publish/collect_instances.py b/pype/plugins/maya/publish/collect_instances.py
index fd22085556..f17072a505 100644
--- a/pype/plugins/maya/publish/collect_instances.py
+++ b/pype/plugins/maya/publish/collect_instances.py
@@ -106,9 +106,9 @@ class CollectInstances(pyblish.api.ContextPlugin):
data["asset"])
# Append start frame and end frame to label if present
- if "startFrame" and "endFrame" in data:
- label += " [{0}-{1}]".format(int(data["startFrame"]),
- int(data["endFrame"]))
+ if "frameStart" and "frameEnd" in data:
+ label += " [{0}-{1}]".format(int(data["frameStart"]),
+ int(data["frameEnd"]))
instance.data["label"] = label
diff --git a/pype/plugins/maya/publish/collect_look.py b/pype/plugins/maya/publish/collect_look.py
index 614e5b44a4..5d5cb6f886 100644
--- a/pype/plugins/maya/publish/collect_look.py
+++ b/pype/plugins/maya/publish/collect_look.py
@@ -211,6 +211,7 @@ class CollectLook(pyblish.api.InstancePlugin):
families = ["look"]
label = "Collect Look"
hosts = ["maya"]
+ maketx = True
def process(self, instance):
"""Collect the Look in the instance with the correct layer settings"""
@@ -219,8 +220,8 @@ class CollectLook(pyblish.api.InstancePlugin):
self.collect(instance)
# make ftrack publishable
- instance.data["families"] = ['ftrack']
- instance.data['maketx'] = True
+ instance.data['maketx'] = self.maketx
+ self.log.info('maketx: {}'.format(self.maketx))
def collect(self, instance):
diff --git a/pype/plugins/maya/publish/collect_mayaascii.py b/pype/plugins/maya/publish/collect_mayaascii.py
index fbed8e0ead..b02f61b7c6 100644
--- a/pype/plugins/maya/publish/collect_mayaascii.py
+++ b/pype/plugins/maya/publish/collect_mayaascii.py
@@ -15,8 +15,8 @@ class CollectMayaAscii(pyblish.api.InstancePlugin):
def process(self, instance):
# Extract only current frame (override)
frame = cmds.currentTime(query=True)
- instance.data['startFrame'] = frame
- instance.data['endFrame'] = frame
+ instance.data["frameStart"] = frame
+ instance.data["frameEnd"] = frame
# make ftrack publishable
if instance.data.get('families'):
diff --git a/pype/plugins/maya/publish/collect_model.py b/pype/plugins/maya/publish/collect_model.py
index b412edf1e9..557f96fe7a 100644
--- a/pype/plugins/maya/publish/collect_model.py
+++ b/pype/plugins/maya/publish/collect_model.py
@@ -22,5 +22,5 @@ class CollectModelData(pyblish.api.InstancePlugin):
def process(self, instance):
# Extract only current frame (override)
frame = cmds.currentTime(query=True)
- instance.data['startFrame'] = frame
- instance.data['endFrame'] = frame
+ instance.data["frameStart"] = frame
+ instance.data["frameEnd"] = frame
diff --git a/pype/plugins/maya/publish/collect_renderable_camera.py b/pype/plugins/maya/publish/collect_renderable_camera.py
index 9bfc010204..6b1732c3cb 100644
--- a/pype/plugins/maya/publish/collect_renderable_camera.py
+++ b/pype/plugins/maya/publish/collect_renderable_camera.py
@@ -8,7 +8,8 @@ from pype.maya import lib
class CollectRenderableCamera(pyblish.api.InstancePlugin):
"""Collect the renderable camera(s) for the render layer"""
- order = pyblish.api.CollectorOrder + 0.01
+ # Offset to be after renderlayer collection.
+ order = pyblish.api.CollectorOrder + 0.02
label = "Collect Renderable Camera(s)"
hosts = ["maya"]
families = ["vrayscene",
diff --git a/pype/plugins/maya/publish/collect_renderlayers.py b/pype/plugins/maya/publish/collect_renderlayers.py
index fd916154c1..ce80039362 100644
--- a/pype/plugins/maya/publish/collect_renderlayers.py
+++ b/pype/plugins/maya/publish/collect_renderlayers.py
@@ -64,9 +64,9 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
"subset": layername,
"setMembers": layer,
"publish": True,
- "startFrame": self.get_render_attribute("startFrame",
+ "frameStart": self.get_render_attribute("startFrame",
layer=layer),
- "endFrame": self.get_render_attribute("endFrame",
+ "frameEnd": self.get_render_attribute("endFrame",
layer=layer),
"byFrameStep": self.get_render_attribute("byFrameStep",
layer=layer),
@@ -106,8 +106,8 @@ class CollectMayaRenderlayers(pyblish.api.ContextPlugin):
# Define nice label
label = "{0} ({1})".format(layername, data["asset"])
- label += " [{0}-{1}]".format(int(data["startFrame"]),
- int(data["endFrame"]))
+ label += " [{0}-{1}]".format(int(data["frameStart"]),
+ int(data["frameEnd"]))
instance = context.create_instance(layername)
instance.data["label"] = label
diff --git a/pype/plugins/maya/publish/collect_review.py b/pype/plugins/maya/publish/collect_review.py
index 52aff1d459..18eee78a9c 100644
--- a/pype/plugins/maya/publish/collect_review.py
+++ b/pype/plugins/maya/publish/collect_review.py
@@ -1,4 +1,4 @@
-from maya import cmds
+from maya import cmds, mel
import pymel.core as pm
import pyblish.api
@@ -54,8 +54,10 @@ class CollectReview(pyblish.api.InstancePlugin):
self.log.debug('adding review family to {}'.format(reviewable_subset))
data['review_camera'] = camera
# data["publish"] = False
- data['startFrameReview'] = instance.data['startFrame']
- data['endFrameReview'] = instance.data['endFrame']
+ data['startFrameReview'] = instance.data["frameStart"]
+ data['endFrameReview'] = instance.data["frameEnd"]
+ data["frameStart"] = instance.data["frameStart"]
+ data["frameEnd"] = instance.data["frameEnd"]
data['handles'] = instance.data['handles']
data['step'] = instance.data['step']
data['fps'] = instance.data['fps']
@@ -67,10 +69,46 @@ class CollectReview(pyblish.api.InstancePlugin):
else:
instance.data['subset'] = task + 'Review'
instance.data['review_camera'] = camera
- instance.data['startFrameReview'] = instance.data['startFrame']
- instance.data['endFrameReview'] = instance.data['endFrame']
+ instance.data['startFrameReview'] = instance.data["frameStart"]
+ instance.data['endFrameReview'] = instance.data["frameEnd"]
# make ftrack publishable
instance.data["families"] = ['ftrack']
cmds.setAttr(str(instance) + '.active', 1)
+
+ # Collect audio
+ playback_slider = mel.eval('$tmpVar=$gPlayBackSlider')
+ audio_name = cmds.timeControl(playback_slider, q=True, s=True)
+ display_sounds = cmds.timeControl(
+ playback_slider, q=True, displaySound=True
+ )
+
+ audio_nodes = []
+
+ if audio_name:
+ audio_nodes.append(pm.PyNode(audio_name))
+
+ if not audio_name and display_sounds:
+ start_frame = int(pm.playbackOptions(q=True, min=True))
+ end_frame = float(pm.playbackOptions(q=True, max=True))
+ frame_range = range(int(start_frame), int(end_frame))
+
+ for node in pm.ls(type="audio"):
+ # Check if frame range and audio range intersections,
+ # for whether to include this audio node or not.
+ start_audio = node.offset.get()
+ end_audio = node.offset.get() + node.duration.get()
+ audio_range = range(int(start_audio), int(end_audio))
+
+ if bool(set(frame_range).intersection(audio_range)):
+ audio_nodes.append(node)
+
+ instance.data["audio"] = []
+ for node in audio_nodes:
+ instance.data["audio"].append(
+ {
+ "offset": node.offset.get(),
+ "filename": node.filename.get()
+ }
+ )
diff --git a/pype/plugins/maya/publish/collect_vray_scene.py b/pype/plugins/maya/publish/collect_vray_scene.py
index 89c0aa8670..211b212a76 100644
--- a/pype/plugins/maya/publish/collect_vray_scene.py
+++ b/pype/plugins/maya/publish/collect_vray_scene.py
@@ -82,8 +82,8 @@ class CollectVRayScene(pyblish.api.ContextPlugin):
"subset": subset,
"setMembers": layer,
- "startFrame": start_frame,
- "endFrame": end_frame,
+ "frameStart": start_frame,
+ "frameEnd": end_frame,
"renderer": "vray",
"resolution": resolution,
"ext": ".{}".format(extension),
diff --git a/pype/plugins/maya/publish/collect_yeti_rig.py b/pype/plugins/maya/publish/collect_yeti_rig.py
index 469651a891..a2cd5b2a5f 100644
--- a/pype/plugins/maya/publish/collect_yeti_rig.py
+++ b/pype/plugins/maya/publish/collect_yeti_rig.py
@@ -45,8 +45,8 @@ class CollectYetiRig(pyblish.api.InstancePlugin):
instance.data["resources"] = yeti_resources
# Force frame range for export
- instance.data["startFrame"] = 1
- instance.data["endFrame"] = 1
+ instance.data["frameStart"] = 1
+ instance.data["frameEnd"] = 1
def collect_input_connections(self, instance):
"""Collect the inputs for all nodes in the input_SET"""
diff --git a/pype/plugins/maya/publish/extract_animation.py b/pype/plugins/maya/publish/extract_animation.py
index 794a80e7a6..a3daf96639 100644
--- a/pype/plugins/maya/publish/extract_animation.py
+++ b/pype/plugins/maya/publish/extract_animation.py
@@ -35,8 +35,8 @@ class ExtractAnimation(pype.api.Extractor):
fullPath=True) or []
# Collect the start and end including handles
- start = instance.data["startFrame"]
- end = instance.data["endFrame"]
+ start = instance.data["frameStart"]
+ end = instance.data["frameEnd"]
handles = instance.data.get("handles", 0)
if handles:
start -= handles
diff --git a/pype/plugins/maya/publish/extract_camera_alembic.py b/pype/plugins/maya/publish/extract_camera_alembic.py
index 77e055daa6..e0e3f8407d 100644
--- a/pype/plugins/maya/publish/extract_camera_alembic.py
+++ b/pype/plugins/maya/publish/extract_camera_alembic.py
@@ -23,8 +23,8 @@ class ExtractCameraAlembic(pype.api.Extractor):
def process(self, instance):
# get settings
- framerange = [instance.data.get("startFrame", 1),
- instance.data.get("endFrame", 1)]
+ framerange = [instance.data.get("frameStart", 1),
+ instance.data.get("frameEnd", 1)]
handles = instance.data.get("handles", 0)
step = instance.data.get("step", 1.0)
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
diff --git a/pype/plugins/maya/publish/extract_camera_mayaAscii.py b/pype/plugins/maya/publish/extract_camera_mayaAscii.py
index cafee6593d..30f686f6f5 100644
--- a/pype/plugins/maya/publish/extract_camera_mayaAscii.py
+++ b/pype/plugins/maya/publish/extract_camera_mayaAscii.py
@@ -88,8 +88,8 @@ class ExtractCameraMayaAscii(pype.api.Extractor):
def process(self, instance):
# get settings
- framerange = [instance.data.get("startFrame", 1),
- instance.data.get("endFrame", 1)]
+ framerange = [instance.data.get("frameStart", 1),
+ instance.data.get("frameEnd", 1)]
handles = instance.data.get("handles", 0)
step = instance.data.get("step", 1.0)
bake_to_worldspace = instance.data("bakeToWorldSpace", True)
diff --git a/pype/plugins/maya/publish/extract_fbx.py b/pype/plugins/maya/publish/extract_fbx.py
index 73d56f9a2c..01b58241c2 100644
--- a/pype/plugins/maya/publish/extract_fbx.py
+++ b/pype/plugins/maya/publish/extract_fbx.py
@@ -166,8 +166,8 @@ class ExtractFBX(pype.api.Extractor):
self.log.info("Export options: {0}".format(options))
# Collect the start and end including handles
- start = instance.data["startFrame"]
- end = instance.data["endFrame"]
+ start = instance.data["frameStart"]
+ end = instance.data["frameEnd"]
handles = instance.data.get("handles", 0)
if handles:
start -= handles
diff --git a/pype/plugins/maya/publish/extract_pointcache.py b/pype/plugins/maya/publish/extract_pointcache.py
index 0879a4bfe3..cec4886712 100644
--- a/pype/plugins/maya/publish/extract_pointcache.py
+++ b/pype/plugins/maya/publish/extract_pointcache.py
@@ -25,8 +25,8 @@ class ExtractAlembic(pype.api.Extractor):
nodes = instance[:]
# Collect the start and end including handles
- start = instance.data.get("startFrame", 1)
- end = instance.data.get("endFrame", 1)
+ start = instance.data.get("frameStart", 1)
+ end = instance.data.get("frameEnd", 1)
handles = instance.data.get("handles", 0)
if handles:
start -= handles
diff --git a/pype/plugins/maya/publish/extract_quicktime.py b/pype/plugins/maya/publish/extract_quicktime.py
index 87608af641..3d8c1dda9b 100644
--- a/pype/plugins/maya/publish/extract_quicktime.py
+++ b/pype/plugins/maya/publish/extract_quicktime.py
@@ -114,11 +114,11 @@ class ExtractQuicktime(pype.api.Extractor):
'ext': 'mov',
'files': collected_frames,
"stagingDir": stagingdir,
- 'startFrame': start,
- 'endFrame': end,
- 'frameRate': fps,
+ "frameStart": start,
+ "frameEnd": end,
+ 'fps': fps,
'preview': True,
- 'tags': ['review']
+ 'tags': ['review', 'delete']
}
instance.data["representations"].append(representation)
diff --git a/pype/plugins/maya/publish/extract_vrayproxy.py b/pype/plugins/maya/publish/extract_vrayproxy.py
index dcaa910730..fe07159b65 100644
--- a/pype/plugins/maya/publish/extract_vrayproxy.py
+++ b/pype/plugins/maya/publish/extract_vrayproxy.py
@@ -28,14 +28,14 @@ class ExtractVRayProxy(pype.api.Extractor):
if not anim_on:
# Remove animation information because it is not required for
# non-animated subsets
- instance.data.pop("startFrame", None)
- instance.data.pop("endFrame", None)
+ instance.data.pop("frameStart", None)
+ instance.data.pop("frameEnd", None)
start_frame = 1
end_frame = 1
else:
- start_frame = instance.data["startFrame"]
- end_frame = instance.data["endFrame"]
+ start_frame = instance.data["frameStart"]
+ end_frame = instance.data["frameEnd"]
vertex_colors = instance.data.get("vertexColors", False)
diff --git a/pype/plugins/maya/publish/extract_yeti_cache.py b/pype/plugins/maya/publish/extract_yeti_cache.py
index e8cbb02b25..cc71052761 100644
--- a/pype/plugins/maya/publish/extract_yeti_cache.py
+++ b/pype/plugins/maya/publish/extract_yeti_cache.py
@@ -31,8 +31,8 @@ class ExtractYetiCache(pype.api.Extractor):
data_file = os.path.join(dirname, "yeti.fursettings")
# Collect information for writing cache
- start_frame = instance.data.get("startFrame")
- end_frame = instance.data.get("endFrame")
+ start_frame = instance.data.get("frameStart")
+ end_frame = instance.data.get("frameEnd")
preroll = instance.data.get("preroll")
if preroll > 0:
start_frame -= preroll
diff --git a/pype/plugins/maya/publish/increment_current_file_deadline.py b/pype/plugins/maya/publish/increment_current_file_deadline.py
index 6f644adacb..8259145b68 100644
--- a/pype/plugins/maya/publish/increment_current_file_deadline.py
+++ b/pype/plugins/maya/publish/increment_current_file_deadline.py
@@ -11,8 +11,7 @@ class IncrementCurrentFileDeadline(pyblish.api.ContextPlugin):
label = "Increment current file"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["maya"]
- families = ["renderlayer",
- "vrayscene"]
+ families = ["workfile"]
optional = True
def process(self, context):
diff --git a/pype/plugins/maya/publish/submit_maya_deadline.py b/pype/plugins/maya/publish/submit_maya_deadline.py
index b7a4a835fa..a9017e5178 100644
--- a/pype/plugins/maya/publish/submit_maya_deadline.py
+++ b/pype/plugins/maya/publish/submit_maya_deadline.py
@@ -51,7 +51,7 @@ def get_renderer_variables(renderlayer=None):
# returns an index number.
filename_base = os.path.basename(filename_0)
extension = os.path.splitext(filename_base)[-1].strip(".")
- filename_prefix = "//"
+ filename_prefix = cmds.getAttr("defaultRenderGlobals.imageFilePrefix")
return {"ext": extension,
"filename_prefix": filename_prefix,
@@ -77,8 +77,19 @@ def preview_fname(folder, scene, layer, padding, ext):
"""
- # Following hardcoded "/_/"
- output = "{scene}/{layer}/{layer}.{number}.{ext}".format(
+ fileprefix = cmds.getAttr("defaultRenderGlobals.imageFilePrefix")
+ output = fileprefix + ".{number}.{ext}"
+ # RenderPass is currently hardcoded to "beauty" because its not important
+ # for the deadline submission, but we will need something to replace
+ # "".
+ mapping = {
+ "": "{scene}",
+ "": "{layer}",
+ "RenderPass": "beauty"
+ }
+ for key, value in mapping.items():
+ output = output.replace(key, value)
+ output = output.format(
scene=scene,
layer=layer,
number="#" * padding,
@@ -171,8 +182,8 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
"Plugin": instance.data.get("mayaRenderPlugin", "MayaBatch"),
"Frames": "{start}-{end}x{step}".format(
- start=int(instance.data["startFrame"]),
- end=int(instance.data["endFrame"]),
+ start=int(instance.data["frameStart"]),
+ end=int(instance.data["frameEnd"]),
step=int(instance.data["byFrameStep"]),
),
@@ -319,7 +330,7 @@ class MayaSubmitDeadline(pyblish.api.InstancePlugin):
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
- for key in ("startFrame", "endFrame", "byFrameStep"):
+ for key in ("frameStart", "frameEnd", "byFrameStep"):
value = instance.data[key]
if int(value) == value:
diff --git a/pype/plugins/maya/publish/submit_maya_muster.py b/pype/plugins/maya/publish/submit_maya_muster.py
index 13a24c0a52..84ad890de1 100644
--- a/pype/plugins/maya/publish/submit_maya_muster.py
+++ b/pype/plugins/maya/publish/submit_maya_muster.py
@@ -402,8 +402,8 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
},
"frames_range": {
"value": "{start}-{end}".format(
- start=int(instance.data["startFrame"]),
- end=int(instance.data["endFrame"])),
+ start=int(instance.data["frameStart"]),
+ end=int(instance.data["frameEnd"])),
"state": True,
"subst": False
},
@@ -552,7 +552,7 @@ class MayaSubmitMuster(pyblish.api.InstancePlugin):
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
- for key in ("startFrame", "endFrame", "byFrameStep"):
+ for key in ("frameStart", "frameEnd", "byFrameStep"):
value = instance.data[key]
if int(value) == value:
diff --git a/pype/plugins/maya/publish/submit_vray_deadline.py b/pype/plugins/maya/publish/submit_vray_deadline.py
index fab6d8ff43..8854edec03 100644
--- a/pype/plugins/maya/publish/submit_vray_deadline.py
+++ b/pype/plugins/maya/publish/submit_vray_deadline.py
@@ -51,8 +51,8 @@ class VraySubmitDeadline(pyblish.api.InstancePlugin):
filename,
vrscene_output)
- start_frame = int(instance.data["startFrame"])
- end_frame = int(instance.data["endFrame"])
+ start_frame = int(instance.data["frameStart"])
+ end_frame = int(instance.data["frameEnd"])
# Primary job
self.log.info("Submitting export job ..")
@@ -123,8 +123,8 @@ class VraySubmitDeadline(pyblish.api.InstancePlugin):
self.log.info("Submitting render job ..")
- start_frame = int(instance.data["startFrame"])
- end_frame = int(instance.data["endFrame"])
+ start_frame = int(instance.data["frameStart"])
+ end_frame = int(instance.data["frameEnd"])
ext = instance.data.get("ext", "exr")
# Create output directory for renders
@@ -215,8 +215,8 @@ class VraySubmitDeadline(pyblish.api.InstancePlugin):
return cmd.format(project=instance.context.data["workspaceDir"],
cam=cammera,
- startFrame=instance.data["startFrame"],
- endFrame=instance.data["endFrame"],
+ startFrame=instance.data["frameStart"],
+ endFrame=instance.data["frameEnd"],
layer=instance.name)
def build_jobinfo_environment(self, env):
@@ -266,7 +266,7 @@ class VraySubmitDeadline(pyblish.api.InstancePlugin):
if dir:
return output_path.replace("\\", "/")
- start_frame = int(instance.data["startFrame"])
+ start_frame = int(instance.data["frameStart"])
filename_zero = "{}_{:04d}.vrscene".format(output_path, start_frame)
result = filename_zero.replace("\\", "/")
diff --git a/pype/plugins/maya/publish/validate_attributes.py b/pype/plugins/maya/publish/validate_attributes.py
index 8b0f14b8b2..6ecebfa107 100644
--- a/pype/plugins/maya/publish/validate_attributes.py
+++ b/pype/plugins/maya/publish/validate_attributes.py
@@ -20,6 +20,7 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
label = "Attributes"
hosts = ["maya"]
actions = [pype.api.RepairContextAction]
+ optional = True
def process(self, context):
# Check for preset existence.
@@ -66,7 +67,7 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
)
# Get invalid attributes.
- nodes = [pm.PyNode(x) for x in instance]
+ nodes = pm.ls()
for node in nodes:
name = node.name(stripNamespace=True)
if name not in attributes.keys():
@@ -74,8 +75,12 @@ class ValidateAttributes(pyblish.api.ContextPlugin):
presets_to_validate = attributes[name]
for attribute in node.listAttr():
- if attribute.attrName() in presets_to_validate:
- expected = presets_to_validate[attribute.attrName()]
+ names = [attribute.shortName(), attribute.longName()]
+ attribute_name = list(
+ set(names) & set(presets_to_validate.keys())
+ )
+ if attribute_name:
+ expected = presets_to_validate[attribute_name[0]]
if attribute.get() != expected:
invalid_attributes.append(
{
diff --git a/pype/plugins/maya/publish/validate_frame_range.py b/pype/plugins/maya/publish/validate_frame_range.py
index 78bce85b96..57eb40eb7c 100644
--- a/pype/plugins/maya/publish/validate_frame_range.py
+++ b/pype/plugins/maya/publish/validate_frame_range.py
@@ -25,8 +25,8 @@ class ValidateFrameRange(pyblish.api.InstancePlugin):
def process(self, instance):
- start = instance.data.get("startFrame", None)
- end = instance.data.get("endFrame", None)
+ start = instance.data.get("frameStart", None)
+ end = instance.data.get("frameEnd", None)
handles = instance.data.get("handles", None)
# Check if any of the values are present
diff --git a/pype/plugins/maya/publish/validate_instancer_frame_ranges.py b/pype/plugins/maya/publish/validate_instancer_frame_ranges.py
index f31014e6e9..3514cf0a98 100644
--- a/pype/plugins/maya/publish/validate_instancer_frame_ranges.py
+++ b/pype/plugins/maya/publish/validate_instancer_frame_ranges.py
@@ -51,8 +51,8 @@ class ValidateInstancerFrameRanges(pyblish.api.InstancePlugin):
import pyseq
- start_frame = instance.data.get("startFrame", 0)
- end_frame = instance.data.get("endFrame", 0)
+ start_frame = instance.data.get("frameStart", 0)
+ end_frame = instance.data.get("frameEnd", 0)
required = range(int(start_frame), int(end_frame) + 1)
invalid = list()
diff --git a/pype/plugins/maya/publish/validate_look_sets.py b/pype/plugins/maya/publish/validate_look_sets.py
index cfa499c763..ebc39bd3ba 100644
--- a/pype/plugins/maya/publish/validate_look_sets.py
+++ b/pype/plugins/maya/publish/validate_look_sets.py
@@ -75,11 +75,11 @@ class ValidateLookSets(pyblish.api.InstancePlugin):
if missing_sets:
for set in missing_sets:
if '_SET' not in set:
- # A set of this node is not coming along, this is wrong!
- cls.log.error("Missing sets '{}' for node "
- "'{}'".format(missing_sets, node))
- invalid.append(node)
- continue
+ # A set of this node is not coming along, this is wrong!
+ cls.log.error("Missing sets '{}' for node "
+ "'{}'".format(missing_sets, node))
+ invalid.append(node)
+ continue
# Ensure the node is in the sets that are collected
for shaderset, data in relationships.items():
diff --git a/pype/plugins/maya/publish/validate_look_single_shader.py b/pype/plugins/maya/publish/validate_look_single_shader.py
index 1b9ebffced..a60d1f1817 100644
--- a/pype/plugins/maya/publish/validate_look_single_shader.py
+++ b/pype/plugins/maya/publish/validate_look_single_shader.py
@@ -40,6 +40,10 @@ class ValidateSingleShader(pyblish.api.InstancePlugin):
shading_engines = cmds.listConnections(shape,
destination=True,
type="shadingEngine") or []
+
+ # Only interested in unique shading engines.
+ shading_engines = list(set(shading_engines))
+
if not shading_engines:
no_shaders.append(shape)
elif len(shading_engines) > 1:
diff --git a/pype/plugins/maya/publish/validate_maya_units.py b/pype/plugins/maya/publish/validate_maya_units.py
index 8610c4dd25..83fb8ecba7 100644
--- a/pype/plugins/maya/publish/validate_maya_units.py
+++ b/pype/plugins/maya/publish/validate_maya_units.py
@@ -21,7 +21,7 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin):
angularunits = context.data('angularUnits')
fps = context.data['fps']
- asset_fps = lib.get_asset_fps()
+ asset_fps = lib.get_asset()["data"]["fps"]
self.log.info('Units (linear): {0}'.format(linearunits))
self.log.info('Units (angular): {0}'.format(angularunits))
@@ -50,5 +50,5 @@ class ValidateMayaUnits(pyblish.api.ContextPlugin):
cls.log.debug(current_linear)
cls.log.info("Setting time unit to match project")
- asset_fps = lib.get_asset_fps()
+ asset_fps = lib.get_asset()["data"]["fps"]
mayalib.set_scene_fps(asset_fps)
diff --git a/pype/plugins/maya/publish/validate_render_image_rule.py b/pype/plugins/maya/publish/validate_render_image_rule.py
index 377dbfeadc..c05a15ab77 100644
--- a/pype/plugins/maya/publish/validate_render_image_rule.py
+++ b/pype/plugins/maya/publish/validate_render_image_rule.py
@@ -1,4 +1,5 @@
import maya.mel as mel
+import pymel.core as pm
import pyblish.api
import pype.api
@@ -18,9 +19,15 @@ class ValidateRenderImageRule(pyblish.api.InstancePlugin):
label = "Images File Rule (Workspace)"
hosts = ["maya"]
families = ["renderlayer"]
+ actions = [pype.api.RepairAction]
def process(self, instance):
assert get_file_rule("images") == "renders", (
"Workspace's `images` file rule must be set to: renders"
)
+
+ @classmethod
+ def repair(cls, instance):
+ pm.workspace.fileRules["images"] = "renders"
+ pm.system.Workspace.save()
diff --git a/pype/plugins/maya/publish/validate_rendersettings.py b/pype/plugins/maya/publish/validate_rendersettings.py
index 0450cb83b5..0d983b9cf1 100644
--- a/pype/plugins/maya/publish/validate_rendersettings.py
+++ b/pype/plugins/maya/publish/validate_rendersettings.py
@@ -1,4 +1,7 @@
-import maya.cmds as cmds
+import os
+
+from maya import cmds, mel
+import pymel.core as pm
import pyblish.api
import pype.api
@@ -9,9 +12,9 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
"""Validates the global render settings
* File Name Prefix must be as followed:
- * vray: //
- * arnold: //
- * default: //
+ * vray: maya//
+ * arnold: maya//
+ * default: maya//
* Frame Padding must be:
* default: 4
@@ -34,8 +37,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
actions = [pype.api.RepairAction]
DEFAULT_PADDING = 4
- RENDERER_PREFIX = {"vray": "//"}
- DEFAULT_PREFIX = "//"
+ RENDERER_PREFIX = {"vray": "maya//"}
+ DEFAULT_PREFIX = "maya//_"
def process(self, instance):
@@ -66,8 +69,8 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
cls.log.error("Animation needs to be enabled. Use the same "
"frame for start and end to render single frame")
- fname_prefix = cls.RENDERER_PREFIX.get(renderer,
- cls.DEFAULT_PREFIX)
+ fname_prefix = cls.get_prefix(renderer)
+
if prefix != fname_prefix:
invalid = True
cls.log.error("Wrong file name prefix: %s (expected: %s)"
@@ -80,6 +83,21 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
return invalid
+ @classmethod
+ def get_prefix(cls, renderer):
+ prefix = cls.RENDERER_PREFIX.get(renderer, cls.DEFAULT_PREFIX)
+ # maya.cmds and pymel.core return only default project directory and
+ # not the current one but only default.
+ output_path = os.path.join(
+ mel.eval("workspace -q -rd;"), pm.workspace.fileRules["images"]
+ )
+ # Workfile paths can be configured to have host name in file path.
+ # In this case we want to avoid duplicate folder names.
+ if "maya" in output_path.lower():
+ prefix = prefix.replace("maya/", "")
+
+ return prefix
+
@classmethod
def repair(cls, instance):
@@ -94,7 +112,7 @@ class ValidateRenderSettings(pyblish.api.InstancePlugin):
node = render_attrs["node"]
prefix_attr = render_attrs["prefix"]
- fname_prefix = cls.RENDERER_PREFIX.get(renderer, cls.DEFAULT_PREFIX)
+ fname_prefix = cls.get_prefix(renderer)
cmds.setAttr("{}.{}".format(node, prefix_attr),
fname_prefix, type="string")
diff --git a/pype/plugins/maya/publish/validate_scene_set_workspace.py b/pype/plugins/maya/publish/validate_scene_set_workspace.py
index 778c7eae86..bda397cf2a 100644
--- a/pype/plugins/maya/publish/validate_scene_set_workspace.py
+++ b/pype/plugins/maya/publish/validate_scene_set_workspace.py
@@ -12,7 +12,7 @@ def is_subdir(path, root_dir):
root_dir = os.path.realpath(root_dir)
# If not on same drive
- if os.path.splitdrive(path)[0] != os.path.splitdrive(root_dir)[0]:
+ if os.path.splitdrive(path)[0].lower() != os.path.splitdrive(root_dir)[0].lower(): # noqa: E501
return False
# Get 'relative path' (can contain ../ which means going up)
diff --git a/pype/plugins/maya/publish/validate_vrayproxy.py b/pype/plugins/maya/publish/validate_vrayproxy.py
index c5303c8ec3..3eceace76d 100644
--- a/pype/plugins/maya/publish/validate_vrayproxy.py
+++ b/pype/plugins/maya/publish/validate_vrayproxy.py
@@ -23,5 +23,5 @@ class ValidateVrayProxy(pyblish.api.InstancePlugin):
cls.log.error("'%s' is empty! This is a bug" % instance.name)
if data["animation"]:
- if data["endFrame"] < data["startFrame"]:
+ if data["frameEnd"] < data["frameStart"]:
cls.log.error("End frame is smaller than start frame")
diff --git a/pype/plugins/nuke/_load_unused/load_sequence.py b/pype/plugins/nuke/_load_unused/load_sequence.py
deleted file mode 100644
index 695dd0b981..0000000000
--- a/pype/plugins/nuke/_load_unused/load_sequence.py
+++ /dev/null
@@ -1,252 +0,0 @@
-import os
-import contextlib
-
-from avalon import api
-import avalon.io as io
-
-from avalon.nuke import log
-import nuke
-
-
-@contextlib.contextmanager
-def preserve_inputs(node, knobs):
- """Preserve the node's inputs after context"""
-
- values = {}
- for name in knobs:
- try:
- knob_value = node[name].vaule()
- values[name] = knob_value
- except ValueError:
- log.warning("missing knob {} in node {}"
- "{}".format(name, node['name'].value()))
-
- try:
- yield
- finally:
- for name, value in values.items():
- node[name].setValue(value)
-
-
-@contextlib.contextmanager
-def preserve_trim(node):
- """Preserve the relative trim of the Loader tool.
-
- This tries to preserve the loader's trim (trim in and trim out) after
- the context by reapplying the "amount" it trims on the clip's length at
- start and end.
-
- """
- # working script frame range
- script_start = nuke.root()["start_frame"].value()
-
- start_at_frame = None
- offset_frame = None
- if node['frame_mode'].value() == "start at":
- start_at_frame = node['frame'].value()
- if node['frame_mode'].value() is "offset":
- offset_frame = node['frame'].value()
-
- try:
- yield
- finally:
- if start_at_frame:
- node['frame_mode'].setValue("start at")
- node['frame'].setValue(str(script_start))
- log.info("start frame of reader was set to"
- "{}".format(script_start))
-
- if offset_frame:
- node['frame_mode'].setValue("offset")
- node['frame'].setValue(str((script_start + offset_frame)))
- log.info("start frame of reader was set to"
- "{}".format(script_start))
-
-
-def loader_shift(node, frame, relative=True):
- """Shift global in time by i preserving duration
-
- This moves the loader by i frames preserving global duration. When relative
- is False it will shift the global in to the start frame.
-
- Args:
- loader (tool): The fusion loader tool.
- frame (int): The amount of frames to move.
- relative (bool): When True the shift is relative, else the shift will
- change the global in to frame.
-
- Returns:
- int: The resulting relative frame change (how much it moved)
-
- """
- # working script frame range
- script_start = nuke.root()["start_frame"].value()
-
- if node['frame_mode'].value() == "start at":
- start_at_frame = node['frame'].value()
- if node['frame_mode'].value() is "offset":
- offset_frame = node['frame'].value()
-
- if relative:
- shift = frame
- else:
- if start_at_frame:
- shift = frame
- if offset_frame:
- shift = frame + offset_frame
-
- # Shifting global in will try to automatically compensate for the change
- # in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those
- # input values to "just shift" the clip
- with preserve_inputs(node, knobs=["file",
- "first",
- "last",
- "originfirst",
- "originlast",
- "frame_mode",
- "frame"]):
-
- # GlobalIn cannot be set past GlobalOut or vice versa
- # so we must apply them in the order of the shift.
- if start_at_frame:
- node['frame_mode'].setValue("start at")
- node['frame'].setValue(str(script_start + shift))
- if offset_frame:
- node['frame_mode'].setValue("offset")
- node['frame'].setValue(str(shift))
-
- return int(shift)
-
-
-class LoadSequence(api.Loader):
- """Load image sequence into Nuke"""
-
- families = ["write"]
- representations = ["*"]
-
- label = "Load sequence"
- order = -10
- icon = "code-fork"
- color = "orange"
-
- def load(self, context, name, namespace, data):
-
- from avalon.nuke import (
- containerise,
- ls_img_sequence,
- viewer_update_and_undo_stop
- )
- log.info("here i am")
- # Fallback to asset name when namespace is None
- if namespace is None:
- namespace = context['asset']['name']
-
- # Use the first file for now
- # TODO: fix path fname
- file = ls_img_sequence(os.path.dirname(self.fname), one=True)
-
- # Create the Loader with the filename path set
- with viewer_update_and_undo_stop():
- # TODO: it might be universal read to img/geo/camera
- r = nuke.createNode(
- "Read",
- "name {}".format(self.name)) # TODO: does self.name exist?
- r["file"].setValue(file['path'])
- if len(file['frames']) is 1:
- first = file['frames'][0][0]
- last = file['frames'][0][1]
- r["originfirst"].setValue(first)
- r["first"].setValue(first)
- r["originlast"].setValue(last)
- r["last"].setValue(last)
- else:
- first = file['frames'][0][0]
- last = file['frames'][:-1][1]
- r["originfirst"].setValue(first)
- r["first"].setValue(first)
- r["originlast"].setValue(last)
- r["last"].setValue(last)
- log.warning("Missing frames in image sequence")
-
- # Set global in point to start frame (if in version.data)
- start = context["version"]["data"].get("startFrame", None)
- if start is not None:
- loader_shift(r, start, relative=False)
-
- containerise(r,
- name=name,
- namespace=namespace,
- context=context,
- loader=self.__class__.__name__)
-
- def switch(self, container, representation):
- self.update(container, representation)
-
- def update(self, container, representation):
- """Update the Loader's path
-
- Fusion automatically tries to reset some variables when changing
- the loader's path to a new file. These automatic changes are to its
- inputs:
-
- """
-
- from avalon.nuke import (
- viewer_update_and_undo_stop,
- ls_img_sequence,
- update_container
- )
- log.info("this i can see")
- node = container["_tool"]
- # TODO: prepare also for other readers img/geo/camera
- assert node.Class() == "Reader", "Must be Reader"
-
- root = api.get_representation_path(representation)
- file = ls_img_sequence(os.path.dirname(root), one=True)
-
- # Get start frame from version data
- version = io.find_one({"type": "version",
- "_id": representation["parent"]})
- start = version["data"].get("startFrame")
- if start is None:
- log.warning("Missing start frame for updated version"
- "assuming starts at frame 0 for: "
- "{} ({})".format(node['name'].value(), representation))
- start = 0
-
- with viewer_update_and_undo_stop():
-
- # Update the loader's path whilst preserving some values
- with preserve_trim(node):
- with preserve_inputs(node,
- knobs=["file",
- "first",
- "last",
- "originfirst",
- "originlast",
- "frame_mode",
- "frame"]):
- node["file"] = file["path"]
-
- # Set the global in to the start frame of the sequence
- global_in_changed = loader_shift(node, start, relative=False)
- if global_in_changed:
- # Log this change to the user
- log.debug("Changed '{}' global in:"
- " {:d}".format(node['name'].value(), start))
-
- # Update the imprinted representation
- update_container(
- node,
- {"representation": str(representation["_id"])}
- )
-
- def remove(self, container):
-
- from avalon.nuke import viewer_update_and_undo_stop
-
- node = container["_tool"]
- assert node.Class() == "Reader", "Must be Reader"
-
- with viewer_update_and_undo_stop():
- nuke.delete(node)
diff --git a/pype/plugins/nuke/_publish_unused/publish_image_sequences.py b/pype/plugins/nuke/_publish_unused/publish_image_sequences.py
index e8b468e94a..34634dcc6b 100644
--- a/pype/plugins/nuke/_publish_unused/publish_image_sequences.py
+++ b/pype/plugins/nuke/_publish_unused/publish_image_sequences.py
@@ -53,8 +53,8 @@ class PublishImageSequence(pyblish.api.InstancePlugin):
# The instance has most of the information already stored
metadata = {
"regex": regex,
- "startFrame": instance.context.data["startFrame"],
- "endFrame": instance.context.data["endFrame"],
+ "frameStart": instance.context.data["frameStart"],
+ "frameEnd": instance.context.data["frameEnd"],
"families": ["imagesequence"],
}
diff --git a/pype/plugins/nuke/_publish_unused/submit_deadline.py b/pype/plugins/nuke/_publish_unused/submit_deadline.py
index b5476876e0..8b86189425 100644
--- a/pype/plugins/nuke/_publish_unused/submit_deadline.py
+++ b/pype/plugins/nuke/_publish_unused/submit_deadline.py
@@ -78,8 +78,8 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"Plugin": "Nuke",
"Frames": "{start}-{end}".format(
- start=int(instance.data["startFrame"]),
- end=int(instance.data["endFrame"])
+ start=int(instance.data["frameStart"]),
+ end=int(instance.data["frameEnd"])
),
"Comment": comment,
diff --git a/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py b/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py
index 4fbf1ac56c..dd66b4fb3a 100644
--- a/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py
+++ b/pype/plugins/nuke/_publish_unused/validate_nuke_settings.py
@@ -19,10 +19,10 @@ class ValidateSettingsNuke(pyblish.api.Validator):
asset = io.find_one({"name": os.environ['AVALON_ASSET']})
try:
avalon_resolution = asset["data"].get("resolution", '')
- avalon_pixel_aspect = asset["data"].get("pixel_aspect", '')
+ avalon_pixel_aspect = asset["data"].get("pixelAspect", '')
avalon_fps = asset["data"].get("fps", '')
- avalon_first = asset["data"].get("edit_in", '')
- avalon_last = asset["data"].get("edit_out", '')
+ avalon_first = asset["data"].get("frameStart", '')
+ avalon_last = asset["data"].get("frameEnd", '')
avalon_crop = asset["data"].get("crop", '')
except KeyError:
print(
diff --git a/pype/plugins/nuke/create/create_write b/pype/plugins/nuke/create/create_write
deleted file mode 100644
index dcb132875a..0000000000
--- a/pype/plugins/nuke/create/create_write
+++ /dev/null
@@ -1,17 +0,0 @@
-# type: render
-# if no render type node in script then first is having in name [master] for definition of main script renderer
-# colorspace setting from templates
-# dataflow setting from templates
-
-# type: mask_render
-# created with shuffle gizmo for RGB separation into davinci matte
-# colorspace setting from templates
-# dataflow setting from templates
-
-# type: prerender
-# backdrop with write and read
-# colorspace setting from templates
-# dataflow setting from templates
-
-# type: geo
-# dataflow setting from templates
diff --git a/pype/plugins/nuke/create/create_write.py b/pype/plugins/nuke/create/create_write.py
index ff1fde6638..c3da555259 100644
--- a/pype/plugins/nuke/create/create_write.py
+++ b/pype/plugins/nuke/create/create_write.py
@@ -1,11 +1,9 @@
from collections import OrderedDict
import avalon.api
import avalon.nuke
-from pype.nuke import (
- create_write_node
-)
+from pype.nuke.lib import create_write_node
from pype import api as pype
-# from pypeapp import Logger
+from pypeapp import config
import nuke
@@ -18,7 +16,6 @@ def subset_to_families(subset, family, families):
new_subset = families + subset_sufx
return "{}.{}".format(family, new_subset)
-
class CreateWriteRender(avalon.nuke.Creator):
# change this to template preset
preset = "render"
@@ -33,6 +30,11 @@ class CreateWriteRender(avalon.nuke.Creator):
def __init__(self, *args, **kwargs):
super(CreateWriteRender, self).__init__(*args, **kwargs)
+ self.presets = config.get_presets()['plugins']["nuke"]["create"].get(
+ self.__class__.__name__, {}
+ )
+
+ self.name = self.data["subset"]
data = OrderedDict()
@@ -44,7 +46,6 @@ class CreateWriteRender(avalon.nuke.Creator):
self.data = data
def process(self):
- self.name = self.data["subset"]
family = self.family
node = 'write'
@@ -58,9 +59,17 @@ class CreateWriteRender(avalon.nuke.Creator):
"avalon": self.data
}
- create_write_node(self.data["subset"], write_data)
+ if self.presets.get('fpath_template'):
+ self.log.info("Adding template path from preset")
+ write_data.update(
+ {"fpath_template": self.presets["fpath_template"]}
+ )
+ else:
+ self.log.info("Adding template path from plugin")
+ write_data.update({
+ "fpath_template": "{work}/renders/nuke/{subset}/{subset}.{frame}.{ext}"})
- return
+ return create_write_node(self.data["subset"], write_data)
class CreateWritePrerender(avalon.nuke.Creator):
@@ -77,6 +86,9 @@ class CreateWritePrerender(avalon.nuke.Creator):
def __init__(self, *args, **kwargs):
super(CreateWritePrerender, self).__init__(*args, **kwargs)
+ self.presets = config.get_presets()['plugins']["nuke"]["create"].get(
+ self.__class__.__name__, {}
+ )
data = OrderedDict()
@@ -100,56 +112,34 @@ class CreateWritePrerender(avalon.nuke.Creator):
"avalon": self.data
}
- create_write_node(self.data["subset"], write_data)
+ if self.presets.get('fpath_template'):
+ self.log.info("Adding template path from preset")
+ write_data.update(
+ {"fpath_template": self.presets["fpath_template"]}
+ )
+ else:
+ self.log.info("Adding template path from plugin")
+ write_data.update({
+ "fpath_template": "{work}/prerenders/{subset}/{subset}.{frame}.{ext}"})
+
+ # get group node
+ group_node = create_write_node(self.data["subset"], write_data)
+
+ # open group node
+ group_node.begin()
+ for n in nuke.allNodes():
+ # get write node
+ if n.Class() in "Write":
+ write_node = n
+ group_node.end()
+
+ # linking knobs to group property panel
+ linking_knobs = ["first", "last", "use_limit"]
+ for k in linking_knobs:
+ lnk = nuke.Link_Knob(k)
+ lnk.makeLink(write_node.name(), k)
+ lnk.setName(k.replace('_', ' ').capitalize())
+ lnk.clearFlag(nuke.STARTLINE)
+ group_node.addKnob(lnk)
return
-
-
-"""
-class CrateWriteStill(avalon.nuke.Creator):
- # change this to template preset
- preset = "still"
-
- name = "WriteStill"
- label = "Create Write Still"
- hosts = ["nuke"]
- family = "{}_write".format(preset)
- families = preset
- icon = "image"
-
- def __init__(self, *args, **kwargs):
- super(CrateWriteStill, self).__init__(*args, **kwargs)
-
- data = OrderedDict()
-
- data["family"] = self.family.split("_")[-1]
- data["families"] = self.families
-
- {data.update({k: v}) for k, v in self.data.items()
- if k not in data.keys()}
- self.data = data
-
- def process(self):
- self.name = self.data["subset"]
-
- node_name = self.data["subset"].replace(
- "_", "_f{}_".format(nuke.frame()))
- instance = nuke.toNode(self.data["subset"])
- self.data["subset"] = node_name
-
- family = self.family
- node = 'write'
-
- if not instance:
- write_data = {
- "frame_range": [nuke.frame(), nuke.frame()],
- "class": node,
- "preset": self.preset,
- "avalon": self.data
- }
-
- nuke.createNode("FrameHold", "first_frame {}".format(nuke.frame()))
- create_write_node(node_name, write_data)
-
- return
-"""
diff --git a/pype/plugins/nuke/inventory/select_containers.py b/pype/plugins/nuke/inventory/select_containers.py
index 339e3a4992..b420f53431 100644
--- a/pype/plugins/nuke/inventory/select_containers.py
+++ b/pype/plugins/nuke/inventory/select_containers.py
@@ -11,7 +11,7 @@ class SelectContainers(api.InventoryAction):
import avalon.nuke
- nodes = [i["_tool"] for i in containers]
+ nodes = [i["_node"] for i in containers]
with avalon.nuke.viewer_update_and_undo_stop():
# clear previous_selection
diff --git a/pype/plugins/nuke/inventory/set_tool_color.py b/pype/plugins/nuke/inventory/set_tool_color.py
index 725a3f3e74..7a81444c90 100644
--- a/pype/plugins/nuke/inventory/set_tool_color.py
+++ b/pype/plugins/nuke/inventory/set_tool_color.py
@@ -20,7 +20,7 @@
#
# # Get tool color
# first = containers[0]
-# tool = first["_tool"]
+# tool = first["_node"]
# color = tool.TileColor
#
# if color is not None:
@@ -40,7 +40,7 @@
# rgb_f_table = {"R": rgb_f[0], "G": rgb_f[1], "B": rgb_f[2]}
#
# # Update tool
-# tool = container["_tool"]
+# tool = container["_node"]
# tool.TileColor = rgb_f_table
#
# result.append(container)
diff --git a/pype/plugins/nuke/load/actions.py b/pype/plugins/nuke/load/actions.py
index 917e7e71b0..a435633fd6 100644
--- a/pype/plugins/nuke/load/actions.py
+++ b/pype/plugins/nuke/load/actions.py
@@ -30,8 +30,8 @@ class SetFrameRangeLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
- start = version_data.get("startFrame", None)
- end = version_data.get("endFrame", None)
+ start = version_data.get("frameStart", None)
+ end = version_data.get("frameEnd", None)
log.info("start: {}, end: {}".format(start, end))
if start is None or end is None:
@@ -64,8 +64,8 @@ class SetFrameRangeWithHandlesLoader(api.Loader):
version = context['version']
version_data = version.get("data", {})
- start = version_data.get("startFrame", None)
- end = version_data.get("endFrame", None)
+ start = version_data.get("frameStart", None)
+ end = version_data.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
diff --git a/pype/plugins/nuke/load/load_luts.py b/pype/plugins/nuke/load/load_luts.py
new file mode 100644
index 0000000000..7e1302fffe
--- /dev/null
+++ b/pype/plugins/nuke/load/load_luts.py
@@ -0,0 +1,317 @@
+from avalon import api, style, io
+import nuke
+import json
+from collections import OrderedDict
+
+
+class LoadLuts(api.Loader):
+ """Loading colorspace soft effect exported from nukestudio"""
+
+ representations = ["lutJson"]
+ families = ["lut"]
+
+ label = "Load Luts - nodes"
+ order = 0
+ icon = "cc"
+ color = style.colors.light
+
+ def load(self, context, name, namespace, data):
+ """
+ Loading function to get the soft effects to particular read node
+
+ Arguments:
+ context (dict): context of version
+ name (str): name of the version
+ namespace (str): asset name
+ data (dict): compulsory attribute > not used
+
+ Returns:
+ nuke node: containerised nuke node object
+ """
+ # import dependencies
+ from avalon.nuke import containerise
+
+ # get main variables
+ version = context['version']
+ version_data = version.get("data", {})
+ vname = version.get("name", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
+ workfile_first_frame = int(nuke.root()["first_frame"].getValue())
+ namespace = namespace or context['asset']['name']
+ colorspace = version_data.get("colorspace", None)
+ object_name = "{}_{}".format(name, namespace)
+
+ # prepare data for imprinting
+ # add additional metadata from the version to imprint to Avalon knob
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
+
+ data_imprint = {"frameStart": first,
+ "frameEnd": last,
+ "version": vname,
+ "colorspaceInput": colorspace,
+ "objectName": object_name}
+
+ for k in add_keys:
+ data_imprint.update({k: version_data[k]})
+
+ # getting file path
+ file = self.fname.replace("\\", "/")
+
+ # getting data from json file with unicode conversion
+ with open(file, "r") as f:
+ json_f = {self.byteify(key): self.byteify(value)
+ for key, value in json.load(f).iteritems()}
+
+ # get correct order of nodes by positions on track and subtrack
+ nodes_order = self.reorder_nodes(json_f["effects"])
+
+ # adding nodes to node graph
+ # just in case we are in group lets jump out of it
+ nuke.endGroup()
+
+ GN = nuke.createNode("Group")
+
+ GN["name"].setValue(object_name)
+
+ # adding content to the group node
+ with GN:
+ pre_node = nuke.createNode("Input")
+ pre_node["name"].setValue("rgb")
+
+ for ef_name, ef_val in nodes_order.items():
+ node = nuke.createNode(ef_val["class"])
+ for k, v in ef_val["node"].items():
+ if isinstance(v, list) and len(v) > 4:
+ node[k].setAnimated()
+ for i, value in enumerate(v):
+ if isinstance(value, list):
+ for ci, cv in enumerate(value):
+ node[k].setValueAt(
+ cv,
+ (workfile_first_frame + i),
+ ci)
+ else:
+ node[k].setValueAt(
+ value,
+ (workfile_first_frame + i))
+ else:
+ node[k].setValue(v)
+ node.setInput(0, pre_node)
+ pre_node = node
+
+ output = nuke.createNode("Output")
+ output.setInput(0, pre_node)
+
+ # try to find parent read node
+ self.connect_read_node(GN, namespace, json_f["assignTo"])
+
+ GN["tile_color"].setValue(int("0x3469ffff", 16))
+
+ self.log.info("Loaded lut setup: `{}`".format(GN["name"].value()))
+
+ return containerise(
+ node=GN,
+ name=name,
+ namespace=namespace,
+ context=context,
+ loader=self.__class__.__name__,
+ data=data_imprint)
+
+ def update(self, container, representation):
+ """Update the Loader's path
+
+ Nuke automatically tries to reset some variables when changing
+ the loader's path to a new file. These automatic changes are to its
+ inputs:
+
+ """
+
+ from avalon.nuke import (
+ update_container
+ )
+ # get main variables
+ # Get version from io
+ version = io.find_one({
+ "type": "version",
+ "_id": representation["parent"]
+ })
+ # get corresponding node
+ GN = nuke.toNode(container['objectName'])
+
+ file = api.get_representation_path(representation).replace("\\", "/")
+ name = container['name']
+ version_data = version.get("data", {})
+ vname = version.get("name", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
+ workfile_first_frame = int(nuke.root()["first_frame"].getValue())
+ namespace = container['namespace']
+ colorspace = version_data.get("colorspace", None)
+ object_name = "{}_{}".format(name, namespace)
+
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
+
+ data_imprint = {"representation": str(representation["_id"]),
+ "frameStart": first,
+ "frameEnd": last,
+ "version": vname,
+ "colorspaceInput": colorspace,
+ "objectName": object_name}
+
+ for k in add_keys:
+ data_imprint.update({k: version_data[k]})
+
+ # Update the imprinted representation
+ update_container(
+ GN,
+ data_imprint
+ )
+
+ # getting data from json file with unicode conversion
+ with open(file, "r") as f:
+ json_f = {self.byteify(key): self.byteify(value)
+ for key, value in json.load(f).iteritems()}
+
+ # get correct order of nodes by positions on track and subtrack
+ nodes_order = self.reorder_nodes(json_f["effects"])
+
+ # adding nodes to node graph
+ # just in case we are in group lets jump out of it
+ nuke.endGroup()
+
+ # adding content to the group node
+ with GN:
+ # first remove all nodes
+ [nuke.delete(n) for n in nuke.allNodes()]
+
+ # create input node
+ pre_node = nuke.createNode("Input")
+ pre_node["name"].setValue("rgb")
+
+ for ef_name, ef_val in nodes_order.items():
+ node = nuke.createNode(ef_val["class"])
+ for k, v in ef_val["node"].items():
+ if isinstance(v, list) and len(v) > 3:
+ node[k].setAnimated()
+ for i, value in enumerate(v):
+ if isinstance(value, list):
+ for ci, cv in enumerate(value):
+ node[k].setValueAt(
+ cv,
+ (workfile_first_frame + i),
+ ci)
+ else:
+ node[k].setValueAt(
+ value,
+ (workfile_first_frame + i))
+ else:
+ node[k].setValue(v)
+ node.setInput(0, pre_node)
+ pre_node = node
+
+ # create output node
+ output = nuke.createNode("Output")
+ output.setInput(0, pre_node)
+
+ # try to find parent read node
+ self.connect_read_node(GN, namespace, json_f["assignTo"])
+
+ # get all versions in list
+ versions = io.find({
+ "type": "version",
+ "parent": version["parent"]
+ }).distinct('name')
+
+ max_version = max(versions)
+
+ # change color of node
+ if version.get("name") not in [max_version]:
+ GN["tile_color"].setValue(int("0xd84f20ff", 16))
+ else:
+ GN["tile_color"].setValue(int("0x3469ffff", 16))
+
+ self.log.info("udated to version: {}".format(version.get("name")))
+
+ def connect_read_node(self, group_node, asset, subset):
+ """
+ Finds read node and selects it
+
+ Arguments:
+ asset (str): asset name
+
+ Returns:
+ nuke node: node is selected
+ None: if nothing found
+ """
+ search_name = "{0}_{1}".format(asset, subset)
+ node = [n for n in nuke.allNodes() if search_name in n["name"].value()]
+ if len(node) > 0:
+ rn = node[0]
+ else:
+ rn = None
+
+ # Parent read node has been found
+ # solving connections
+ if rn:
+ dep_nodes = rn.dependent()
+
+ if len(dep_nodes) > 0:
+ for dn in dep_nodes:
+ dn.setInput(0, group_node)
+
+ group_node.setInput(0, rn)
+ group_node.autoplace()
+
+ def reorder_nodes(self, data):
+ new_order = OrderedDict()
+ trackNums = [v["trackIndex"] for k, v in data.items()]
+ subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
+
+ for trackIndex in range(
+ min(trackNums), max(trackNums) + 1):
+ for subTrackIndex in range(
+ min(subTrackNums), max(subTrackNums) + 1):
+ item = self.get_item(data, trackIndex, subTrackIndex)
+ if item is not {}:
+ new_order.update(item)
+ return new_order
+
+ def get_item(self, data, trackIndex, subTrackIndex):
+ return {key: val for key, val in data.items()
+ if subTrackIndex == val["subTrackIndex"]
+ if trackIndex == val["trackIndex"]}
+
+ def byteify(self, input):
+ """
+ Converts unicode strings to strings
+ It goes trought all dictionary
+
+ Arguments:
+ input (dict/str): input
+
+ Returns:
+ dict: with fixed values and keys
+
+ """
+
+ if isinstance(input, dict):
+ return {self.byteify(key): self.byteify(value)
+ for key, value in input.iteritems()}
+ elif isinstance(input, list):
+ return [self.byteify(element) for element in input]
+ elif isinstance(input, unicode):
+ return input.encode('utf-8')
+ else:
+ return input
+
+ def switch(self, container, representation):
+ self.update(container, representation)
+
+ def remove(self, container):
+ from avalon.nuke import viewer_update_and_undo_stop
+ node = nuke.toNode(container['objectName'])
+ with viewer_update_and_undo_stop():
+ nuke.delete(node)
diff --git a/pype/plugins/nuke/load/load_luts_ip.py b/pype/plugins/nuke/load/load_luts_ip.py
new file mode 100644
index 0000000000..b0a30d78e4
--- /dev/null
+++ b/pype/plugins/nuke/load/load_luts_ip.py
@@ -0,0 +1,330 @@
+from avalon import api, style, io
+import nuke
+import json
+from collections import OrderedDict
+from pype.nuke import lib
+
+class LoadLutsInputProcess(api.Loader):
+ """Loading colorspace soft effect exported from nukestudio"""
+
+ representations = ["lutJson"]
+ families = ["lut"]
+
+ label = "Load Luts - Input Process"
+ order = 0
+ icon = "eye"
+ color = style.colors.alert
+
+ def load(self, context, name, namespace, data):
+ """
+ Loading function to get the soft effects to particular read node
+
+ Arguments:
+ context (dict): context of version
+ name (str): name of the version
+ namespace (str): asset name
+ data (dict): compulsory attribute > not used
+
+ Returns:
+ nuke node: containerised nuke node object
+ """
+ # import dependencies
+ from avalon.nuke import containerise
+
+ # get main variables
+ version = context['version']
+ version_data = version.get("data", {})
+ vname = version.get("name", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
+ workfile_first_frame = int(nuke.root()["first_frame"].getValue())
+ namespace = namespace or context['asset']['name']
+ colorspace = version_data.get("colorspace", None)
+ object_name = "{}_{}".format(name, namespace)
+
+ # prepare data for imprinting
+ # add additional metadata from the version to imprint to Avalon knob
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
+
+ data_imprint = {"frameStart": first,
+ "frameEnd": last,
+ "version": vname,
+ "colorspaceInput": colorspace,
+ "objectName": object_name}
+
+ for k in add_keys:
+ data_imprint.update({k: version_data[k]})
+
+ # getting file path
+ file = self.fname.replace("\\", "/")
+
+ # getting data from json file with unicode conversion
+ with open(file, "r") as f:
+ json_f = {self.byteify(key): self.byteify(value)
+ for key, value in json.load(f).iteritems()}
+
+ # get correct order of nodes by positions on track and subtrack
+ nodes_order = self.reorder_nodes(json_f["effects"])
+
+ # adding nodes to node graph
+ # just in case we are in group lets jump out of it
+ nuke.endGroup()
+
+ GN = nuke.createNode("Group")
+
+ GN["name"].setValue(object_name)
+
+ # adding content to the group node
+ with GN:
+ pre_node = nuke.createNode("Input")
+ pre_node["name"].setValue("rgb")
+
+ for ef_name, ef_val in nodes_order.items():
+ node = nuke.createNode(ef_val["class"])
+ for k, v in ef_val["node"].items():
+ if isinstance(v, list) and len(v) > 4:
+ node[k].setAnimated()
+ for i, value in enumerate(v):
+ if isinstance(value, list):
+ for ci, cv in enumerate(value):
+ node[k].setValueAt(
+ cv,
+ (workfile_first_frame + i),
+ ci)
+ else:
+ node[k].setValueAt(
+ value,
+ (workfile_first_frame + i))
+ else:
+ node[k].setValue(v)
+ node.setInput(0, pre_node)
+ pre_node = node
+
+ output = nuke.createNode("Output")
+ output.setInput(0, pre_node)
+
+ # try to place it under Viewer1
+ if not self.connect_active_viewer(GN):
+ nuke.delete(GN)
+ return
+
+ GN["tile_color"].setValue(int("0x3469ffff", 16))
+
+ self.log.info("Loaded lut setup: `{}`".format(GN["name"].value()))
+
+ return containerise(
+ node=GN,
+ name=name,
+ namespace=namespace,
+ context=context,
+ loader=self.__class__.__name__,
+ data=data_imprint)
+
+ def update(self, container, representation):
+ """Update the Loader's path
+
+ Nuke automatically tries to reset some variables when changing
+ the loader's path to a new file. These automatic changes are to its
+ inputs:
+
+ """
+
+ from avalon.nuke import (
+ update_container
+ )
+ # get main variables
+ # Get version from io
+ version = io.find_one({
+ "type": "version",
+ "_id": representation["parent"]
+ })
+ # get corresponding node
+ GN = nuke.toNode(container['objectName'])
+
+ file = api.get_representation_path(representation).replace("\\", "/")
+ name = container['name']
+ version_data = version.get("data", {})
+ vname = version.get("name", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
+ workfile_first_frame = int(nuke.root()["first_frame"].getValue())
+ namespace = container['namespace']
+ colorspace = version_data.get("colorspace", None)
+ object_name = "{}_{}".format(name, namespace)
+
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
+
+ data_imprint = {"representation": str(representation["_id"]),
+ "frameStart": first,
+ "frameEnd": last,
+ "version": vname,
+ "colorspaceInput": colorspace,
+ "objectName": object_name}
+
+ for k in add_keys:
+ data_imprint.update({k: version_data[k]})
+
+ # Update the imprinted representation
+ update_container(
+ GN,
+ data_imprint
+ )
+
+ # getting data from json file with unicode conversion
+ with open(file, "r") as f:
+ json_f = {self.byteify(key): self.byteify(value)
+ for key, value in json.load(f).iteritems()}
+
+ # get correct order of nodes by positions on track and subtrack
+ nodes_order = self.reorder_nodes(json_f["effects"])
+
+ # adding nodes to node graph
+ # just in case we are in group lets jump out of it
+ nuke.endGroup()
+
+ # adding content to the group node
+ with GN:
+ # first remove all nodes
+ [nuke.delete(n) for n in nuke.allNodes()]
+
+ # create input node
+ pre_node = nuke.createNode("Input")
+ pre_node["name"].setValue("rgb")
+
+ for ef_name, ef_val in nodes_order.items():
+ node = nuke.createNode(ef_val["class"])
+ for k, v in ef_val["node"].items():
+ if isinstance(v, list) and len(v) > 3:
+ node[k].setAnimated()
+ for i, value in enumerate(v):
+ if isinstance(value, list):
+ for ci, cv in enumerate(value):
+ node[k].setValueAt(
+ cv,
+ (workfile_first_frame + i),
+ ci)
+ else:
+ node[k].setValueAt(
+ value,
+ (workfile_first_frame + i))
+ else:
+ node[k].setValue(v)
+ node.setInput(0, pre_node)
+ pre_node = node
+
+ # create output node
+ output = nuke.createNode("Output")
+ output.setInput(0, pre_node)
+
+ # try to place it under Viewer1
+ if not self.connect_active_viewer(GN):
+ nuke.delete(GN)
+ return
+
+ # get all versions in list
+ versions = io.find({
+ "type": "version",
+ "parent": version["parent"]
+ }).distinct('name')
+
+ max_version = max(versions)
+
+ # change color of node
+ if version.get("name") not in [max_version]:
+ GN["tile_color"].setValue(int("0xd84f20ff", 16))
+ else:
+ GN["tile_color"].setValue(int("0x3469ffff", 16))
+
+ self.log.info("udated to version: {}".format(version.get("name")))
+
+ def connect_active_viewer(self, group_node):
+ """
+ Finds Active viewer and
+ place the node under it, also adds
+ name of group into Input Process of the viewer
+
+ Arguments:
+ group_node (nuke node): nuke group node object
+
+ """
+ group_node_name = group_node["name"].value()
+
+ viewer = [n for n in nuke.allNodes() if "Viewer1" in n["name"].value()]
+ if len(viewer) > 0:
+ viewer = viewer[0]
+ else:
+ self.log.error("Please create Viewer node before you run this action again")
+ return None
+
+ # get coordinates of Viewer1
+ xpos = viewer["xpos"].value()
+ ypos = viewer["ypos"].value()
+
+ ypos += 150
+
+ viewer["ypos"].setValue(ypos)
+
+ # set coordinates to group node
+ group_node["xpos"].setValue(xpos)
+ group_node["ypos"].setValue(ypos + 50)
+
+ # add group node name to Viewer Input Process
+ viewer["input_process_node"].setValue(group_node_name)
+
+ # put backdrop under
+ lib.create_backdrop(label="Input Process", layer=2, nodes=[viewer, group_node], color="0x7c7faaff")
+
+ return True
+
+ def reorder_nodes(self, data):
+ new_order = OrderedDict()
+ trackNums = [v["trackIndex"] for k, v in data.items()]
+ subTrackNums = [v["subTrackIndex"] for k, v in data.items()]
+
+ for trackIndex in range(
+ min(trackNums), max(trackNums) + 1):
+ for subTrackIndex in range(
+ min(subTrackNums), max(subTrackNums) + 1):
+ item = self.get_item(data, trackIndex, subTrackIndex)
+ if item is not {}:
+ new_order.update(item)
+ return new_order
+
+ def get_item(self, data, trackIndex, subTrackIndex):
+ return {key: val for key, val in data.items()
+ if subTrackIndex == val["subTrackIndex"]
+ if trackIndex == val["trackIndex"]}
+
+ def byteify(self, input):
+ """
+ Converts unicode strings to strings
+ It goes trought all dictionary
+
+ Arguments:
+ input (dict/str): input
+
+ Returns:
+ dict: with fixed values and keys
+
+ """
+
+ if isinstance(input, dict):
+ return {self.byteify(key): self.byteify(value)
+ for key, value in input.iteritems()}
+ elif isinstance(input, list):
+ return [self.byteify(element) for element in input]
+ elif isinstance(input, unicode):
+ return input.encode('utf-8')
+ else:
+ return input
+
+ def switch(self, container, representation):
+ self.update(container, representation)
+
+ def remove(self, container):
+ from avalon.nuke import viewer_update_and_undo_stop
+ node = nuke.toNode(container['objectName'])
+ with viewer_update_and_undo_stop():
+ nuke.delete(node)
diff --git a/pype/plugins/nuke/load/load_mov.py b/pype/plugins/nuke/load/load_mov.py
index f74c31d232..e6daaaff8a 100644
--- a/pype/plugins/nuke/load/load_mov.py
+++ b/pype/plugins/nuke/load/load_mov.py
@@ -75,8 +75,8 @@ def loader_shift(node, frame, relative=True):
class LoadMov(api.Loader):
"""Load mov file into Nuke"""
- families = ["write", "source", "plate", "render"]
- representations = ["mov", "preview", "review", "mp4"]
+ families = ["write", "source", "plate", "render", "review"]
+ representations = ["wipmov", "h264", "mov", "preview", "review", "mp4"]
label = "Load mov"
order = -10
@@ -92,8 +92,8 @@ class LoadMov(api.Loader):
version = context['version']
version_data = version.get("data", {})
- orig_first = version_data.get("startFrame", None)
- orig_last = version_data.get("endFrame", None)
+ orig_first = version_data.get("frameStart", None)
+ orig_last = version_data.get("frameEnd", None)
diff = orig_first - 1
# set first to 1
first = orig_first - diff
@@ -101,7 +101,8 @@ class LoadMov(api.Loader):
handles = version_data.get("handles", None)
handle_start = version_data.get("handleStart", None)
handle_end = version_data.get("handleEnd", None)
-
+ repr_cont = context["representation"]["context"]
+
# fix handle start and end if none are available
if not handle_start and not handle_end:
handle_start = handles
@@ -119,9 +120,11 @@ class LoadMov(api.Loader):
file = self.fname.replace("\\", "/")
log.info("file: {}\n".format(self.fname))
- read_name = "Read"
- read_name += '_' + context["representation"]["context"]["subset"]
- read_name += '_' + context["representation"]["name"]
+ read_name = "Read_{0}_{1}_{2}".format(
+ repr_cont["asset"],
+ repr_cont["subset"],
+ repr_cont["representation"])
+
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
@@ -141,7 +144,7 @@ class LoadMov(api.Loader):
read_node["frame"].setValue(str(offset_frame))
# add additional metadata from the version to imprint to Avalon knob
add_keys = [
- "startFrame", "endFrame", "handles", "source", "author",
+ "frameStart", "frameEnd", "handles", "source", "author",
"fps", "version", "handleStart", "handleEnd"
]
@@ -182,7 +185,6 @@ class LoadMov(api.Loader):
"""
from avalon.nuke import (
- ls_img_sequence,
update_container
)
@@ -190,8 +192,7 @@ class LoadMov(api.Loader):
# TODO: prepare also for other Read img/geo/camera
assert node.Class() == "Read", "Must be Read"
- root = api.get_representation_path(representation)
- file = ls_img_sequence(os.path.dirname(root), one=True)
+ file = api.get_representation_path(representation)
# Get start frame from version data
version = io.find_one({
@@ -209,8 +210,8 @@ class LoadMov(api.Loader):
version_data = version.get("data", {})
- orig_first = version_data.get("startFrame", None)
- orig_last = version_data.get("endFrame", None)
+ orig_first = version_data.get("frameStart", None)
+ orig_last = version_data.get("frameEnd", None)
diff = orig_first - 1
# set first to 1
first = orig_first - diff
@@ -238,7 +239,7 @@ class LoadMov(api.Loader):
# Update the loader's path whilst preserving some values
with preserve_trim(node):
node["file"].setValue(file["path"])
- log.info("__ node['file']: {}".format(node["file"]))
+ log.info("__ node['file']: {}".format(node["file"].value()))
# Set the global in to the start frame of the sequence
loader_shift(node, first, relative=True)
@@ -252,8 +253,8 @@ class LoadMov(api.Loader):
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
- "startFrame": version_data.get("startFrame"),
- "endFrame": version_data.get("endFrame"),
+ "frameStart": version_data.get("frameStart"),
+ "frameEnd": version_data.get("frameEnd"),
"version": version.get("name"),
"source": version_data.get("source"),
"handles": version_data.get("handles"),
diff --git a/pype/plugins/nuke/load/load_script_precomp.py b/pype/plugins/nuke/load/load_script_precomp.py
index bc63150206..e84e23a890 100644
--- a/pype/plugins/nuke/load/load_script_precomp.py
+++ b/pype/plugins/nuke/load/load_script_precomp.py
@@ -1,22 +1,18 @@
from avalon import api, style, io
-from pype.nuke.lib import get_avalon_knob_data
+from avalon.nuke import get_avalon_knob_data
import nuke
-import os
-from pype.api import Logger
-log = Logger().get_logger(__name__, "nuke")
-
class LinkAsGroup(api.Loader):
"""Copy the published file to be pasted at the desired location"""
representations = ["nk"]
- families = ["*"]
+ families = ["workfile"]
label = "Load Precomp"
- order = 10
+ order = 0
icon = "file"
- color = style.colors.dark
+ color = style.colors.alert
def load(self, context, name, namespace, data):
@@ -27,8 +23,8 @@ class LinkAsGroup(api.Loader):
version_data = version.get("data", {})
vname = version.get("name", None)
- first = version_data.get("startFrame", None)
- last = version_data.get("endFrame", None)
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
# Fallback to asset name when namespace is None
if namespace is None:
@@ -41,17 +37,14 @@ class LinkAsGroup(api.Loader):
self.log.info("versionData: {}\n".format(context["version"]["data"]))
- # Set global in point to start frame (if in version.data)
- start = context["version"]["data"].get("startFrame", None)
- self.log.info("start: {}\n".format(start))
-
# add additional metadata from the version to imprint to Avalon knob
- add_keys = ["startFrame", "endFrame", "handle_start", "handle_end", "source", "author", "fps"]
+ add_keys = ["frameStart", "frameEnd", "handleStart", "handleEnd",
+ "source", "author", "fps"]
data_imprint = {
- "start_frame": start,
- "fstart": first,
- "fend": last,
+ "startingFrame": first,
+ "frameStart": first,
+ "frameEnd": last,
"version": vname
}
for k in add_keys:
@@ -70,7 +63,6 @@ class LinkAsGroup(api.Loader):
colorspace = context["version"]["data"].get("colorspace", None)
self.log.info("colorspace: {}\n".format(colorspace))
-
# ['version', 'file', 'reading', 'output', 'useOutput']
P["name"].setValue("{}_{}".format(name, namespace))
@@ -79,7 +71,7 @@ class LinkAsGroup(api.Loader):
with P:
# iterate trough all nodes in group node and find pype writes
writes = [n.name() for n in nuke.allNodes()
- if n.Class() == "Write"
+ if n.Class() == "Group"
if get_avalon_knob_data(n)]
# create panel for selecting output
@@ -87,7 +79,7 @@ class LinkAsGroup(api.Loader):
panel_label = "Select write node for output"
p = nuke.Panel("Select Write Node")
p.addEnumerationPulldown(
- panel_label, panel_choices)
+ panel_label, panel_choices)
p.show()
P["output"].setValue(p.value(panel_label))
@@ -119,7 +111,7 @@ class LinkAsGroup(api.Loader):
node = nuke.toNode(container['objectName'])
- root = api.get_representation_path(representation).replace("\\","/")
+ root = api.get_representation_path(representation).replace("\\", "/")
# Get start frame from version data
version = io.find_one({
@@ -138,7 +130,7 @@ class LinkAsGroup(api.Loader):
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
- "endFrame": version["data"].get("endFrame"),
+ "frameEnd": version["data"].get("frameEnd"),
"version": version.get("name"),
"colorspace": version["data"].get("colorspace"),
"source": version["data"].get("source"),
@@ -162,8 +154,7 @@ class LinkAsGroup(api.Loader):
else:
node["tile_color"].setValue(int("0xff0ff0ff", 16))
- log.info("udated to version: {}".format(version.get("name")))
-
+ self.log.info("udated to version: {}".format(version.get("name")))
def remove(self, container):
from avalon.nuke import viewer_update_and_undo_stop
diff --git a/pype/plugins/nuke/load/load_sequence.py b/pype/plugins/nuke/load/load_sequence.py
index 9dd83de064..2946857e09 100644
--- a/pype/plugins/nuke/load/load_sequence.py
+++ b/pype/plugins/nuke/load/load_sequence.py
@@ -76,7 +76,7 @@ class LoadSequence(api.Loader):
"""Load image sequence into Nuke"""
families = ["write", "source", "plate", "render"]
- representations = ["exr", "dpx"]
+ representations = ["exr", "dpx", "jpg", "jpeg"]
label = "Load sequence"
order = -10
@@ -92,29 +92,31 @@ class LoadSequence(api.Loader):
version = context['version']
version_data = version.get("data", {})
- first = version_data.get("startFrame", None)
- last = version_data.get("endFrame", None)
- handles = version_data.get("handles", None)
- handle_start = version_data.get("handleStart", None)
- handle_end = version_data.get("handleEnd", None)
+ log.info("version_data: {}\n".format(version_data))
- # fix handle start and end if none are available
- if not handle_start and not handle_end:
- handle_start = handles
- handle_end = handles
+ self.first_frame = int(nuke.root()["first_frame"].getValue())
+ self.handle_start = version_data.get("handleStart", 0)
+ self.handle_start = version_data.get("handleStart", 0)
+ self.handle_end = version_data.get("handleEnd", 0)
- # create handles offset
- first -= handle_start
- last += handle_end
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
# Fallback to asset name when namespace is None
if namespace is None:
namespace = context['asset']['name']
+ first -= self.handle_start
+ last += self.handle_end
+
file = self.fname.replace("\\", "/")
log.info("file: {}\n".format(self.fname))
- read_name = "Read_" + context["representation"]["context"]["subset"]
+ repr_cont = context["representation"]["context"]
+ read_name = "Read_{0}_{1}_{2}".format(
+ repr_cont["asset"],
+ repr_cont["subset"],
+ repr_cont["representation"])
# Create the Loader with the filename path set
with viewer_update_and_undo_stop():
@@ -130,13 +132,13 @@ class LoadSequence(api.Loader):
r["colorspace"].setValue(str(colorspace))
loader_shift(r, first, relative=True)
- r["origfirst"].setValue(first)
- r["first"].setValue(first)
- r["origlast"].setValue(last)
- r["last"].setValue(last)
+ r["origfirst"].setValue(int(first))
+ r["first"].setValue(int(first))
+ r["origlast"].setValue(int(last))
+ r["last"].setValue(int(last))
# add additional metadata from the version to imprint to Avalon knob
- add_keys = ["startFrame", "endFrame", "handles",
+ add_keys = ["frameStart", "frameEnd",
"source", "colorspace", "author", "fps", "version",
"handleStart", "handleEnd"]
@@ -145,12 +147,18 @@ class LoadSequence(api.Loader):
if k is 'version':
data_imprint.update({k: context["version"]['name']})
else:
- data_imprint.update({k: context["version"]['data'].get(k, str(None))})
+ data_imprint.update(
+ {k: context["version"]['data'].get(k, str(None))})
data_imprint.update({"objectName": read_name})
r["tile_color"].setValue(int("0x4ecd25ff", 16))
+ if version_data.get("retime", None):
+ speed = version_data.get("speed", 1)
+ time_warp_nodes = version_data.get("timewarps", [])
+ self.make_retimes(r, speed, time_warp_nodes)
+
return containerise(r,
name=name,
namespace=namespace,
@@ -158,6 +166,34 @@ class LoadSequence(api.Loader):
loader=self.__class__.__name__,
data=data_imprint)
+ def make_retimes(self, node, speed, time_warp_nodes):
+ ''' Create all retime and timewarping nodes with coppied animation '''
+ if speed != 1:
+ rtn = nuke.createNode(
+ "Retime",
+ "speed {}".format(speed))
+ rtn["before"].setValue("continue")
+ rtn["after"].setValue("continue")
+ rtn["input.first_lock"].setValue(True)
+ rtn["input.first"].setValue(
+ self.handle_start + self.first_frame
+ )
+
+ if time_warp_nodes != []:
+ for timewarp in time_warp_nodes:
+ twn = nuke.createNode(timewarp["Class"],
+ "name {}".format(timewarp["name"]))
+ if isinstance(timewarp["lookup"], list):
+ # if array for animation
+ twn["lookup"].setAnimated()
+ for i, value in enumerate(timewarp["lookup"]):
+ twn["lookup"].setValueAt(
+ (self.first_frame + i) + value,
+ (self.first_frame + i))
+ else:
+ # if static value `int`
+ twn["lookup"].setValue(timewarp["lookup"])
+
def switch(self, container, representation):
self.update(container, representation)
@@ -179,8 +215,8 @@ class LoadSequence(api.Loader):
# TODO: prepare also for other Read img/geo/camera
assert node.Class() == "Read", "Must be Read"
- root = api.get_representation_path(representation)
- file = ls_img_sequence(os.path.dirname(root), one=True)
+ path = api.get_representation_path(representation)
+ file = ls_img_sequence(path)
# Get start frame from version data
version = io.find_one({
@@ -198,11 +234,12 @@ class LoadSequence(api.Loader):
version_data = version.get("data", {})
- first = version_data.get("startFrame", None)
- last = version_data.get("endFrame", None)
- handles = version_data.get("handles", 0)
- handle_start = version_data.get("handleStart", 0)
- handle_end = version_data.get("handleEnd", 0)
+ self.first_frame = int(nuke.root()["first_frame"].getValue())
+ self.handle_start = version_data.get("handleStart", 0)
+ self.handle_end = version_data.get("handleEnd", 0)
+
+ first = version_data.get("frameStart", None)
+ last = version_data.get("frameEnd", None)
if first is None:
log.warning("Missing start frame for updated version"
@@ -210,36 +247,29 @@ class LoadSequence(api.Loader):
"{} ({})".format(node['name'].value(), representation))
first = 0
- # fix handle start and end if none are available
- if not handle_start and not handle_end:
- handle_start = handles
- handle_end = handles
-
- # create handles offset
- first -= handle_start
- last += handle_end
+ first -= self.handle_start
+ last += self.handle_end
# Update the loader's path whilst preserving some values
with preserve_trim(node):
node["file"].setValue(file["path"])
- log.info("__ node['file']: {}".format(node["file"]))
+ log.info("__ node['file']: {}".format(node["file"].value()))
# Set the global in to the start frame of the sequence
loader_shift(node, first, relative=True)
- node["origfirst"].setValue(first)
- node["first"].setValue(first)
- node["origlast"].setValue(last)
- node["last"].setValue(last)
+ node["origfirst"].setValue(int(first))
+ node["first"].setValue(int(first))
+ node["origlast"].setValue(int(last))
+ node["last"].setValue(int(last))
updated_dict = {}
updated_dict.update({
"representation": str(representation["_id"]),
- "startFrame": version_data.get("startFrame"),
- "endFrame": version_data.get("endFrame"),
+ "frameStart": version_data.get("frameStart"),
+ "frameEnd": version_data.get("frameEnd"),
"version": version.get("name"),
"colorspace": version_data.get("colorspace"),
"source": version_data.get("source"),
- "handles": version_data.get("handles"),
"handleStart": version_data.get("handleStart"),
"handleEnd": version_data.get("handleEnd"),
"fps": version_data.get("fps"),
@@ -253,6 +283,11 @@ class LoadSequence(api.Loader):
else:
node["tile_color"].setValue(int("0x4ecd25ff", 16))
+ if version_data.get("retime", None):
+ speed = version_data.get("speed", 1)
+ time_warp_nodes = version_data.get("timewarps", [])
+ self.make_retimes(node, speed, time_warp_nodes)
+
# Update the imprinted representation
update_container(
node,
diff --git a/pype/plugins/nuke/publish/collect_active_viewer.py b/pype/plugins/nuke/publish/collect_active_viewer.py
index 3bcc1367f3..5dc17d8768 100644
--- a/pype/plugins/nuke/publish/collect_active_viewer.py
+++ b/pype/plugins/nuke/publish/collect_active_viewer.py
@@ -12,3 +12,4 @@ class CollectActiveViewer(pyblish.api.ContextPlugin):
def process(self, context):
context.data["ViewerProcess"] = nuke.ViewerProcess.node()
+ context.data["ActiveViewer"] = nuke.activeViewer()
diff --git a/pype/plugins/nuke/publish/collect_asset_info.py b/pype/plugins/nuke/publish/collect_asset_info.py
index ae49c6e86f..76b93ef3d0 100644
--- a/pype/plugins/nuke/publish/collect_asset_info.py
+++ b/pype/plugins/nuke/publish/collect_asset_info.py
@@ -1,4 +1,3 @@
-import nuke
from avalon import api, io
import pyblish.api
@@ -19,3 +18,6 @@ class CollectAssetInfo(pyblish.api.ContextPlugin):
self.log.info("asset_data: {}".format(asset_data))
context.data['handles'] = int(asset_data["data"].get("handles", 0))
+ context.data["handleStart"] = int(asset_data["data"].get(
+ "handleStart", 0))
+ context.data["handleEnd"] = int(asset_data["data"].get("handleEnd", 0))
diff --git a/pype/plugins/nuke/publish/collect_framerate.py b/pype/plugins/nuke/publish/collect_framerate.py
index 980ec22872..88a449e745 100644
--- a/pype/plugins/nuke/publish/collect_framerate.py
+++ b/pype/plugins/nuke/publish/collect_framerate.py
@@ -14,5 +14,4 @@ class CollectFramerate(pyblish.api.ContextPlugin):
]
def process(self, context):
- context.data["framerate"] = nuke.root()["fps"].getValue()
context.data["fps"] = nuke.root()["fps"].getValue()
diff --git a/pype/plugins/nuke/publish/collect_instances.py b/pype/plugins/nuke/publish/collect_instances.py
index cca5a861ff..7f2e1566c3 100644
--- a/pype/plugins/nuke/publish/collect_instances.py
+++ b/pype/plugins/nuke/publish/collect_instances.py
@@ -3,7 +3,7 @@ import os
import nuke
import pyblish.api
from avalon import io, api
-from pype.nuke.lib import get_avalon_knob_data
+from avalon.nuke import get_avalon_knob_data
@pyblish.api.log
@@ -18,22 +18,26 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
asset_data = io.find_one({"type": "asset",
"name": api.Session["AVALON_ASSET"]})
- # add handles into context
- context.data['handles'] = context.data['handles']
self.log.debug("asset_data: {}".format(asset_data["data"]))
instances = []
# creating instances per write node
- for node in nuke.allNodes():
+ self.log.debug("nuke.allNodes(): {}".format(nuke.allNodes()))
+ for node in nuke.allNodes():
try:
if node["disable"].value():
continue
- except Exception:
+ except Exception as E:
+ self.log.warning(E)
continue
# get data from avalon knob
+ self.log.debug("node[name]: {}".format(node['name'].value()))
avalon_knob_data = get_avalon_knob_data(node)
+
+ self.log.debug("avalon_knob_data: {}".format(avalon_knob_data))
+
if not avalon_knob_data:
continue
@@ -45,7 +49,14 @@ class CollectNukeInstances(pyblish.api.ContextPlugin):
# Create instance
instance = context.create_instance(subset)
- instance.add(node)
+ instance.append(node)
+
+ # Add all nodes in group instances.
+ if node.Class() == "Group":
+ node.begin()
+ for i in nuke.allNodes():
+ instance.append(i)
+ node.end()
family = avalon_knob_data["families"]
if node["render"].value():
diff --git a/pype/plugins/nuke/publish/collect_legacy_read.py b/pype/plugins/nuke/publish/collect_legacy_read.py
new file mode 100644
index 0000000000..6b6ce57245
--- /dev/null
+++ b/pype/plugins/nuke/publish/collect_legacy_read.py
@@ -0,0 +1,30 @@
+import toml
+
+import nuke
+
+import pyblish.api
+
+
+class CollectReadLegacy(pyblish.api.ContextPlugin):
+ """Collect legacy read nodes."""
+
+ order = pyblish.api.CollectorOrder
+ label = "Collect Read Legacy"
+ hosts = ["nuke", "nukeassist"]
+
+ def process(self, context):
+
+ for node in nuke.allNodes():
+ if node.Class() != "Read":
+ continue
+
+ if "avalon" not in node.knobs().keys():
+ continue
+
+ if not toml.loads(node["avalon"].value()):
+ return
+
+ instance = context.create_instance(
+ node.name(), family="read.legacy"
+ )
+ instance.append(node)
diff --git a/pype/plugins/nuke/publish/collect_reads.py b/pype/plugins/nuke/publish/collect_reads.py
index 1bba6198d2..352ae129f4 100644
--- a/pype/plugins/nuke/publish/collect_reads.py
+++ b/pype/plugins/nuke/publish/collect_reads.py
@@ -99,8 +99,8 @@ class CollectNukeReads(pyblish.api.ContextPlugin):
"stagingDir": source_dir,
"ext": ext,
"label": label,
- "startFrame": first_frame,
- "endFrame": last_frame,
+ "frameStart": first_frame,
+ "frameEnd": last_frame,
"colorspace": node["colorspace"].value(),
"handles": int(asset_data["data"].get("handles", 0)),
"step": 1,
diff --git a/pype/plugins/nuke/publish/collect_workfile.py b/pype/plugins/nuke/publish/collect_workfile.py
index 2ec6464ace..1e0ecb33f6 100644
--- a/pype/plugins/nuke/publish/collect_workfile.py
+++ b/pype/plugins/nuke/publish/collect_workfile.py
@@ -38,8 +38,8 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
first_frame = int(root["first_frame"].getValue())
last_frame = int(root["last_frame"].getValue())
- handle_start = int(knob_data.get("handle_start", 0))
- handle_end = int(knob_data.get("handle_end", 0))
+ handle_start = int(knob_data.get("handleStart", 0))
+ handle_end = int(knob_data.get("handleEnd", 0))
# Get format
format = root['format'].value()
@@ -54,17 +54,17 @@ class CollectWorkfile(pyblish.api.ContextPlugin):
script_data = {
"asset": os.getenv("AVALON_ASSET", None),
"version": version,
- "startFrame": first_frame + handle_start,
- "endFrame": last_frame - handle_end,
- "resolution_width": resolution_width,
- "resolution_height": resolution_height,
- "pixel_aspect": pixel_aspect,
+ "frameStart": first_frame + handle_start,
+ "frameEnd": last_frame - handle_end,
+ "resolutionWidth": resolution_width,
+ "resolutionHeight": resolution_height,
+ "pixelAspect": pixel_aspect,
# backward compatibility
"handles": handle_start,
- "handle_start": handle_start,
- "handle_end": handle_end,
+ "handleStart": handle_start,
+ "handleEnd": handle_end,
"step": 1,
"fps": root['fps'].value(),
}
diff --git a/pype/plugins/nuke/publish/collect_writes.py b/pype/plugins/nuke/publish/collect_writes.py
index f98a3a0f7d..29ae6cb929 100644
--- a/pype/plugins/nuke/publish/collect_writes.py
+++ b/pype/plugins/nuke/publish/collect_writes.py
@@ -15,12 +15,12 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
def process(self, instance):
- # if not instance.data["publish"]:
- # continue
+ node = None
+ for x in instance:
+ if x.Class() == "Write":
+ node = x
- node = instance[0]
-
- if node.Class() != "Write":
+ if node is None:
return
self.log.debug("checking instance: {}".format(instance))
@@ -34,7 +34,9 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
output_type = "mov"
# Get frame range
- handles = instance.context.data.get('handles', 0)
+ handles = instance.context.data['handles']
+ handle_start = instance.context.data["handleStart"]
+ handle_end = instance.context.data["handleEnd"]
first_frame = int(nuke.root()["first_frame"].getValue())
last_frame = int(nuke.root()["last_frame"].getValue())
@@ -85,16 +87,37 @@ class CollectNukeWrites(pyblish.api.InstancePlugin):
if 'render.local' in instance.data['families']:
instance.data['families'].append('ftrack')
+ # Add version data to instance
+ version_data = {
+ "handles": handle_start,
+ "handleStart": handle_start,
+ "handleEnd": handle_end,
+ "frameStart": first_frame,
+ "frameEnd": last_frame,
+ "version": int(version),
+ "colorspace": node["colorspace"].value(),
+ "families": [instance.data["family"]],
+ "subset": instance.data["subset"],
+ "fps": instance.context.data["fps"]
+ }
+
+ group_node = [x for x in instance if x.Class() == "Group"][0]
+ deadlineChunkSize = 1
+ if "deadlineChunkSize" in group_node.knobs():
+ deadlineChunkSize = group_node["deadlineChunkSize"].value()
+
instance.data.update({
+ "versionData": version_data,
"path": path,
"outputDir": output_dir,
"ext": ext,
"label": label,
"handles": handles,
- "startFrame": first_frame,
- "endFrame": last_frame,
+ "frameStart": first_frame,
+ "frameEnd": last_frame,
"outputType": output_type,
"colorspace": node["colorspace"].value(),
+ "deadlineChunkSize": deadlineChunkSize
})
self.log.debug("instance.data: {}".format(instance.data))
diff --git a/pype/plugins/nuke/publish/extract_ouput_node.py b/pype/plugins/nuke/publish/extract_ouput_node.py
new file mode 100644
index 0000000000..4d7533f010
--- /dev/null
+++ b/pype/plugins/nuke/publish/extract_ouput_node.py
@@ -0,0 +1,42 @@
+import nuke
+import pyblish.api
+from avalon.nuke import maintained_selection
+
+class CreateOutputNode(pyblish.api.ContextPlugin):
+ """Adding output node for each ouput write node
+ So when latly user will want to Load .nk as LifeGroup or Precomp
+ Nuke will not complain about missing Output node
+ """
+ label = 'Output Node Create'
+ order = pyblish.api.ExtractorOrder + 0.4
+ families = ["workfile"]
+ hosts = ['nuke']
+
+ def process(self, context):
+ # capture selection state
+ with maintained_selection():
+ # deselect all allNodes
+ self.log.info(context.data["ActiveViewer"])
+
+ active_viewer = context.data["ActiveViewer"]
+ active_input = active_viewer.activeInput()
+ active_node = active_viewer.node()
+
+
+ last_viewer_node = active_node.input(active_input)
+
+ name = last_viewer_node.name()
+ self.log.info("Node name: {}".format(name))
+
+ # select only instance render node
+ last_viewer_node['selected'].setValue(True)
+ output_node = nuke.createNode("Output")
+
+ # deselect all and select the original selection
+ output_node['selected'].setValue(False)
+
+ # save script
+ nuke.scriptSave()
+
+ # add node to instance node list
+ context.data["outputNode"] = output_node
diff --git a/pype/plugins/nuke/publish/extract_post_json.py b/pype/plugins/nuke/publish/extract_post_json.py
deleted file mode 100644
index fe42781d52..0000000000
--- a/pype/plugins/nuke/publish/extract_post_json.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import os
-import json
-import datetime
-import time
-
-import clique
-from pprint import pformat
-import pyblish.api
-
-
-class ExtractJSON(pyblish.api.ContextPlugin):
- """ Extract all instances to a serialized json file. """
-
- order = pyblish.api.IntegratorOrder + 1
- label = "Extract to JSON"
- families = ["write"]
-
- def process(self, context):
- workspace = os.path.join(
- os.path.dirname(context.data["currentFile"]), "workspace",
- "instances")
-
- if not os.path.exists(workspace):
- os.makedirs(workspace)
-
- context_data = context.data.copy()
- unwrapped_instance = []
- for i in context_data["instances"]:
- unwrapped_instance.append(i.data)
-
- context_data["instances"] = unwrapped_instance
-
- timestamp = datetime.datetime.fromtimestamp(
- time.time()).strftime("%Y%m%d-%H%M%S")
- filename = timestamp + "_instances.json"
-
- with open(os.path.join(workspace, filename), "w") as outfile:
- outfile.write(pformat(context_data, depth=20))
-
- def serialize(self, data):
- """
- Convert all nested content to serialized objects
-
- Args:
- data (dict): nested data
-
- Returns:
- dict: nested data
- """
-
- def encoding_obj(value):
- try:
- value = str(value).replace("\\", "/")
- # value = getattr(value, '__dict__', str(value))
- except Exception:
- pass
- return value
-
- for key, value in dict(data).items():
- if key in ["records", "instances", "results"]:
- # escape all record objects
- data[key] = None
- continue
-
- if hasattr(value, '__module__'):
- # only deals with module objects
- if "plugins" in value.__module__:
- # only dealing with plugin objects
- data[key] = str(value.__module__)
- else:
- if ".lib." in value.__module__:
- # will allow only anatomy dict
- data[key] = self.serialize(value)
- else:
- data[key] = None
- continue
- continue
-
- if isinstance(value, dict):
- # loops if dictionary
- data[key] = self.serialize(value)
-
- if isinstance(value, (list or tuple)):
- # loops if list or tuple
- for i, item in enumerate(value):
- value[i] = self.serialize(item)
- data[key] = value
-
- data[key] = encoding_obj(value)
-
- return data
diff --git a/pype/plugins/nuke/publish/extract_render_local.py b/pype/plugins/nuke/publish/extract_render_local.py
index 2b185720a6..29de347288 100644
--- a/pype/plugins/nuke/publish/extract_render_local.py
+++ b/pype/plugins/nuke/publish/extract_render_local.py
@@ -20,18 +20,26 @@ class NukeRenderLocal(pype.api.Extractor):
families = ["render.local"]
def process(self, instance):
- node = instance[0]
+ node = None
+ for x in instance:
+ if x.Class() == "Write":
+ node = x
self.log.debug("instance collected: {}".format(instance.data))
- first_frame = instance.data.get("startFrame", None)
- last_frame = instance.data.get("endFrame", None)
+ first_frame = instance.data.get("frameStart", None)
+ last_frame = instance.data.get("frameEnd", None)
node_subset_name = instance.data.get("name", None)
self.log.info("Starting render")
self.log.info("Start frame: {}".format(first_frame))
self.log.info("End frame: {}".format(last_frame))
+ # Ensure output directory exists.
+ directory = os.path.dirname(node["file"].value())
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+
# Render frames
nuke.execute(
node_subset_name,
diff --git a/pype/plugins/nuke/publish/extract_review_data.py b/pype/plugins/nuke/publish/extract_review_data.py
index f5a017321e..40c3e37434 100644
--- a/pype/plugins/nuke/publish/extract_review_data.py
+++ b/pype/plugins/nuke/publish/extract_review_data.py
@@ -67,8 +67,8 @@ class ExtractReviewData(pype.api.Extractor):
else:
fname = os.path.basename(instance.data.get("path", None))
fhead = os.path.splitext(fname)[0] + "."
- first_frame = instance.data.get("startFrame", None)
- last_frame = instance.data.get("endFrame", None)
+ first_frame = instance.data.get("frameStart", None)
+ last_frame = instance.data.get("frameEnd", None)
node = previous_node = nuke.createNode("Read")
@@ -149,8 +149,8 @@ class ExtractReviewData(pype.api.Extractor):
'ext': representation,
'files': file,
"stagingDir": stagingDir,
- "startFrame": first_frame,
- "endFrame": last_frame,
+ "frameStart": first_frame,
+ "frameEnd": last_frame,
"anatomy_template": "render",
"tags": tags
}
diff --git a/pype/plugins/nuke/publish/remove_ouput_node.py b/pype/plugins/nuke/publish/remove_ouput_node.py
new file mode 100644
index 0000000000..12361595fe
--- /dev/null
+++ b/pype/plugins/nuke/publish/remove_ouput_node.py
@@ -0,0 +1,22 @@
+import nuke
+import pyblish.api
+
+
+class RemoveOutputNode(pyblish.api.ContextPlugin):
+ """Removing output node for each ouput write node
+
+ """
+ label = 'Output Node Remove'
+ order = pyblish.api.IntegratorOrder + 0.4
+ families = ["workfile"]
+ hosts = ['nuke']
+
+ def process(self, context):
+ try:
+ output_node = context.data["outputNode"]
+ name = output_node["name"].value()
+ self.log.info("Removing output node: '{}'".format(name))
+
+ nuke.delete(output_node)
+ except Exception:
+ return
diff --git a/pype/plugins/nuke/publish/submit_nuke_deadline.py b/pype/plugins/nuke/publish/submit_nuke_deadline.py
index 3d854f66e9..ef971f3a37 100644
--- a/pype/plugins/nuke/publish/submit_nuke_deadline.py
+++ b/pype/plugins/nuke/publish/submit_nuke_deadline.py
@@ -27,9 +27,13 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
def process(self, instance):
- # root = nuke.root()
- # node_subset_name = instance.data.get("name", None)
- node = instance[0]
+ node = None
+ for x in instance:
+ if x.Class() == "Write":
+ node = x
+
+ if node is None:
+ return
DEADLINE_REST_URL = os.environ.get("DEADLINE_REST_URL",
"http://localhost:8082")
@@ -77,9 +81,10 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
"Plugin": "Nuke",
"Frames": "{start}-{end}".format(
- start=int(instance.data["startFrame"]),
- end=int(instance.data["endFrame"])
+ start=int(instance.data["frameStart"]),
+ end=int(instance.data["frameEnd"])
),
+ "ChunkSize": instance.data["deadlineChunkSize"],
"Comment": comment,
@@ -100,6 +105,9 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
# Resolve relative references
"ProjectPath": workspace,
+
+ # Only the specific write node is rendered.
+ "WriteNode": instance[0].name()
},
# Mandatory for Deadline, may be empty
@@ -196,7 +204,7 @@ class NukeSubmitDeadline(pyblish.api.InstancePlugin):
def preflight_check(self, instance):
"""Ensure the startFrame, endFrame and byFrameStep are integers"""
- for key in ("startFrame", "endFrame"):
+ for key in ("frameStart", "frameEnd"):
value = instance.data[key]
if int(value) == value:
diff --git a/pype/plugins/nuke/publish/validate_read_legacy.py b/pype/plugins/nuke/publish/validate_read_legacy.py
new file mode 100644
index 0000000000..22a9b3678e
--- /dev/null
+++ b/pype/plugins/nuke/publish/validate_read_legacy.py
@@ -0,0 +1,83 @@
+import os
+import toml
+
+import nuke
+
+import pyblish.api
+from avalon import api
+from bson.objectid import ObjectId
+
+
+class RepairReadLegacyAction(pyblish.api.Action):
+
+ label = "Repair"
+ icon = "wrench"
+ on = "failed"
+
+ def process(self, context, plugin):
+
+ # Get the errored instances
+ failed = []
+ for result in context.data["results"]:
+ if (result["error"] is not None and result["instance"] is not None
+ and result["instance"] not in failed):
+ failed.append(result["instance"])
+
+ # Apply pyblish.logic to get the instances for the plug-in
+ instances = pyblish.api.instances_by_plugin(failed, plugin)
+
+ for instance in instances:
+
+ data = toml.loads(instance[0]["avalon"].value())
+ data["name"] = instance[0].name()
+ data["xpos"] = instance[0].xpos()
+ data["ypos"] = instance[0].ypos()
+ data["extension"] = os.path.splitext(
+ instance[0]["file"].value()
+ )[1][1:]
+
+ data["connections"] = []
+ for d in instance[0].dependent():
+ for i in range(d.inputs()):
+ if d.input(i) == instance[0]:
+ data["connections"].append([i, d])
+
+ nuke.delete(instance[0])
+
+ loader_name = "LoadSequence"
+ if data["extension"] == "mov":
+ loader_name = "LoadMov"
+
+ loader_plugin = None
+ for Loader in api.discover(api.Loader):
+ if Loader.__name__ != loader_name:
+ continue
+
+ loader_plugin = Loader
+
+ api.load(
+ Loader=loader_plugin,
+ representation=ObjectId(data["representation"])
+ )
+
+ node = nuke.toNode(data["name"])
+ for connection in data["connections"]:
+ connection[1].setInput(connection[0], node)
+
+ node.setXYpos(data["xpos"], data["ypos"])
+
+
+class ValidateReadLegacy(pyblish.api.InstancePlugin):
+ """Validate legacy read instance[0]s."""
+
+ order = pyblish.api.ValidatorOrder
+ optional = True
+ families = ["read.legacy"]
+ label = "Read Legacy"
+ hosts = ["nuke"]
+ actions = [RepairReadLegacyAction]
+
+ def process(self, instance):
+
+ msg = "Clean up legacy read node \"{}\"".format(instance)
+ assert False, msg
diff --git a/pype/plugins/nuke/publish/validate_rendered_frames.py b/pype/plugins/nuke/publish/validate_rendered_frames.py
index 93eb84f304..85cbe7b2c0 100644
--- a/pype/plugins/nuke/publish/validate_rendered_frames.py
+++ b/pype/plugins/nuke/publish/validate_rendered_frames.py
@@ -11,12 +11,12 @@ class RepairCollectionAction(pyblish.api.Action):
icon = "wrench"
def process(self, context, plugin):
- self.log.info(context[0])
+ self.log.info(context[0][0])
files_remove = [os.path.join(context[0].data["outputDir"], f)
for r in context[0].data.get("representations", [])
for f in r.get("files", [])
]
- self.log.info(files_remove)
+ self.log.info("Files to be removed: {}".format(files_remove))
for f in files_remove:
os.remove(f)
self.log.debug("removing file: {}".format(f))
@@ -51,7 +51,7 @@ class ValidateRenderedFrames(pyblish.api.InstancePlugin):
collection = collections[0]
frame_length = int(
- instance.data["endFrame"] - instance.data["startFrame"] + 1
+ instance.data["frameEnd"] - instance.data["frameStart"] + 1
)
if frame_length != 1:
diff --git a/pype/plugins/nuke/publish/validate_script.py b/pype/plugins/nuke/publish/validate_script.py
index efb0537246..837e30dab7 100644
--- a/pype/plugins/nuke/publish/validate_script.py
+++ b/pype/plugins/nuke/publish/validate_script.py
@@ -1,5 +1,6 @@
import pyblish.api
from avalon import io
+from pype import lib
@pyblish.api.log
@@ -15,20 +16,34 @@ class ValidateScript(pyblish.api.InstancePlugin):
ctx_data = instance.context.data
asset_name = ctx_data["asset"]
- asset = io.find_one({
- "type": "asset",
- "name": asset_name
- })
+ # asset = io.find_one({
+ # "type": "asset",
+ # "name": asset_name
+ # })
+
+ asset = lib.get_asset(asset_name)
asset_data = asset["data"]
# These attributes will be checked
attributes = [
- "fps", "fstart", "fend",
- "resolution_width", "resolution_height", "handle_start", "handle_end"
+ "fps",
+ "frameStart",
+ "frameEnd",
+ "resolutionWidth",
+ "resolutionHeight",
+ "handleStart",
+ "handleEnd"
]
# Value of these attributes can be found on parents
- hierarchical_attributes = ["fps", "resolution_width", "resolution_height", "pixel_aspect", "handle_start", "handle_end"]
+ hierarchical_attributes = [
+ "fps",
+ "resolutionWidth",
+ "resolutionHeight",
+ "pixelAspect",
+ "handleStart",
+ "handleEnd"
+ ]
missing_attributes = []
asset_attributes = {}
@@ -60,32 +75,33 @@ class ValidateScript(pyblish.api.InstancePlugin):
# Get handles from database, Default is 0 (if not found)
handle_start = 0
handle_end = 0
- if "handle_start" in asset_attributes:
- handle_start = asset_attributes["handle_start"]
- if "handle_end" in asset_attributes:
- handle_end = asset_attributes["handle_end"]
+ if "handleStart" in asset_attributes:
+ handle_start = asset_attributes["handleStart"]
+ if "handleEnd" in asset_attributes:
+ handle_end = asset_attributes["handleEnd"]
# Set frame range with handles
- # asset_attributes["fstart"] -= handle_start
- # asset_attributes["fend"] += handle_end
+ # asset_attributes["frameStart"] -= handle_start
+ # asset_attributes["frameEnd"] += handle_end
# Get values from nukescript
script_attributes = {
- "handle_start": ctx_data["handle_start"],
- "handle_end": ctx_data["handle_end"],
+ "handleStart": ctx_data["handleStart"],
+ "handleEnd": ctx_data["handleEnd"],
"fps": ctx_data["fps"],
- "fstart": ctx_data["startFrame"],
- "fend": ctx_data["endFrame"],
- "resolution_width": ctx_data["resolution_width"],
- "resolution_height": ctx_data["resolution_height"],
- "pixel_aspect": ctx_data["pixel_aspect"]
+ "frameStart": ctx_data["frameStart"],
+ "frameEnd": ctx_data["frameEnd"],
+ "resolutionWidth": ctx_data["resolutionWidth"],
+ "resolutionHeight": ctx_data["resolutionHeight"],
+ "pixelAspect": ctx_data["pixelAspect"]
}
# Compare asset's values Nukescript X Database
not_matching = []
for attr in attributes:
- self.log.debug("asset vs script attribute: {0}, {1}".format(
- asset_attributes[attr], script_attributes[attr]))
+ self.log.debug("asset vs script attribute \"{}\": {}, {}".format(
+ attr, asset_attributes[attr], script_attributes[attr])
+ )
if asset_attributes[attr] != script_attributes[attr]:
not_matching.append(attr)
@@ -94,7 +110,7 @@ class ValidateScript(pyblish.api.InstancePlugin):
msg = "Attributes '{}' are not set correctly"
# Alert user that handles are set if Frame start/end not match
if (
- (("fstart" in not_matching) or ("fend" in not_matching)) and
+ (("frameStart" in not_matching) or ("frameEnd" in not_matching)) and
((handle_start > 0) or (handle_end > 0))
):
msg += " (`handle_start` are set to {})".format(handle_start)
diff --git a/pype/plugins/nuke/publish/validate_write_bounding_box.py b/pype/plugins/nuke/publish/validate_write_bounding_box.py
new file mode 100644
index 0000000000..cedeea6d9f
--- /dev/null
+++ b/pype/plugins/nuke/publish/validate_write_bounding_box.py
@@ -0,0 +1,106 @@
+import nuke
+
+import pyblish.api
+
+
+class RepairNukeBoundingBoxAction(pyblish.api.Action):
+
+ label = "Repair"
+ icon = "wrench"
+ on = "failed"
+
+ def process(self, context, plugin):
+
+ # Get the errored instances
+ failed = []
+ for result in context.data["results"]:
+ if (result["error"] is not None and result["instance"] is not None
+ and result["instance"] not in failed):
+ failed.append(result["instance"])
+
+ # Apply pyblish.logic to get the instances for the plug-in
+ instances = pyblish.api.instances_by_plugin(failed, plugin)
+
+ for instance in instances:
+ crop = instance[0].dependencies()[0]
+ if crop.Class() != "Crop":
+ crop = nuke.nodes.Crop(inputs=[instance[0].input(0)])
+
+ xpos = instance[0].xpos()
+ ypos = instance[0].ypos() - 26
+
+ dependent_ypos = instance[0].dependencies()[0].ypos()
+ if (instance[0].ypos() - dependent_ypos) <= 51:
+ xpos += 110
+
+ crop.setXYpos(xpos, ypos)
+
+ instance[0].setInput(0, crop)
+
+ crop["box"].setValue(
+ (
+ 0.0,
+ 0.0,
+ instance[0].input(0).width(),
+ instance[0].input(0).height()
+ )
+ )
+
+
+class ValidateNukeWriteBoundingBox(pyblish.api.InstancePlugin):
+ """Validates write bounding box.
+
+ Ffmpeg does not support bounding boxes outside of the image
+ resolution a crop is needed. This needs to validate all frames, as each
+ rendered exr can break the ffmpeg transcode.
+ """
+
+ order = pyblish.api.ValidatorOrder
+ optional = True
+ families = ["write"]
+ label = "Write Bounding Box"
+ hosts = ["nuke"]
+ actions = [RepairNukeBoundingBoxAction]
+
+ def process(self, instance):
+
+ # Skip bounding box check if a crop node exists.
+ if instance[0].dependencies()[0].Class() == "Crop":
+ return
+
+ msg = "Bounding box is outside the format."
+ assert self.check_bounding_box(instance), msg
+
+ def check_bounding_box(self, instance):
+ node = instance[0]
+
+ first_frame = instance.data["frameStart"]
+ last_frame = instance.data["frameEnd"]
+
+ format_width = node.format().width()
+ format_height = node.format().height()
+
+ # The trick is that we need to execute() some node every time we go to
+ # a next frame, to update the context.
+ # So we create a CurveTool that we can execute() on every frame.
+ temporary_node = nuke.nodes.CurveTool()
+ bbox_check = True
+ for frame in range(first_frame, last_frame + 1):
+ # Workaround to update the tree
+ nuke.execute(temporary_node, frame, frame)
+
+ x = node.bbox().x()
+ y = node.bbox().y()
+ w = node.bbox().w()
+ h = node.bbox().h()
+
+ if x < 0 or (x + w) > format_width:
+ bbox_check = False
+ break
+
+ if y < 0 or (y + h) > format_height:
+ bbox_check = False
+ break
+
+ nuke.delete(temporary_node)
+ return bbox_check
diff --git a/pype/plugins/nuke/publish/validate_write_deadline_tab.py b/pype/plugins/nuke/publish/validate_write_deadline_tab.py
new file mode 100644
index 0000000000..0c222a164a
--- /dev/null
+++ b/pype/plugins/nuke/publish/validate_write_deadline_tab.py
@@ -0,0 +1,42 @@
+import pyblish.api
+import pype.nuke.lib
+
+
+class RepairNukeWriteDeadlineTab(pyblish.api.Action):
+
+ label = "Repair"
+ icon = "wrench"
+ on = "failed"
+
+ def process(self, context, plugin):
+
+ # Get the errored instances
+ failed = []
+ for result in context.data["results"]:
+ if (result["error"] is not None and result["instance"] is not None
+ and result["instance"] not in failed):
+ failed.append(result["instance"])
+
+ # Apply pyblish.logic to get the instances for the plug-in
+ instances = pyblish.api.instances_by_plugin(failed, plugin)
+
+ for instance in instances:
+ group_node = [x for x in instance if x.Class() == "Group"][0]
+ pype.nuke.lib.add_deadline_tab(group_node)
+
+
+class ValidateNukeWriteDeadlineTab(pyblish.api.InstancePlugin):
+ """Ensure Deadline tab is present and current."""
+
+ order = pyblish.api.ValidatorOrder
+ label = "Deadline Tab"
+ hosts = ["nuke"]
+ optional = True
+ families = ["write"]
+ actions = [RepairNukeWriteDeadlineTab]
+
+ def process(self, instance):
+ group_node = [x for x in instance if x.Class() == "Group"][0]
+
+ msg = "Deadline tab missing on \"{}\"".format(group_node.name())
+ assert "Deadline" in group_node.knobs(), msg
diff --git a/pype/plugins/nuke/publish/validate_write_nodes.py b/pype/plugins/nuke/publish/validate_write_nodes.py
index 105c133ebe..564c912a7a 100644
--- a/pype/plugins/nuke/publish/validate_write_nodes.py
+++ b/pype/plugins/nuke/publish/validate_write_nodes.py
@@ -14,7 +14,7 @@ class RepairNukeWriteNodeAction(pyblish.api.Action):
instances = pype.utils.filter_instances(context, plugin)
for instance in instances:
- node = instance[0]
+ node = instance[1]
correct_data = nukelib.get_write_node_template_attr(node)
for k, v in correct_data.items():
node[k].setValue(v)
@@ -33,7 +33,7 @@ class ValidateNukeWriteNode(pyblish.api.InstancePlugin):
def process(self, instance):
- node = instance[0]
+ node = instance[1]
correct_data = nukelib.get_write_node_template_attr(node)
check = []
diff --git a/pype/plugins/nukestudio/_unused/collect.py b/pype/plugins/nukestudio/_unused/collect.py
deleted file mode 100644
index 4e20202fe0..0000000000
--- a/pype/plugins/nukestudio/_unused/collect.py
+++ /dev/null
@@ -1,191 +0,0 @@
-from pyblish import api
-
-class CollectFramerate(api.ContextPlugin):
- """Collect framerate from selected sequence."""
-
- order = api.CollectorOrder
- label = "Collect Framerate"
- hosts = ["nukestudio"]
-
- def process(self, context):
- for item in context.data.get("selection", []):
- context.data["framerate"] = item.sequence().framerate().toFloat()
- return
-
-
-class CollectTrackItems(api.ContextPlugin):
- """Collect all tasks from submission."""
-
- order = api.CollectorOrder
- label = "Collect Track Items"
- hosts = ["nukestudio"]
-
- def process(self, context):
- import os
-
- submission = context.data.get("submission", None)
- data = {}
-
- # Set handles
- handles = 0
- if submission:
- for task in submission.getLeafTasks():
-
- if task._cutHandles:
- handles = task._cutHandles
- self.log.info("__ handles: '{}'".format(handles))
-
- # Skip audio track items
- media_type = "core.Hiero.Python.TrackItem.MediaType.kAudio"
- if str(task._item.mediaType()) == media_type:
- continue
-
- item = task._item
- if item.name() not in data:
- data[item.name()] = {"item": item, "tasks": [task]}
- else:
- data[item.name()]["tasks"].append(task)
-
- data[item.name()]["startFrame"] = task.outputRange()[0]
- data[item.name()]["endFrame"] = task.outputRange()[1]
- else:
- for item in context.data.get("selection", []):
- # Skip audio track items
- # Try/Except is to handle items types, like EffectTrackItem
- try:
- media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
- if str(item.mediaType()) != media_type:
- continue
- except:
- continue
-
- data[item.name()] = {
- "item": item,
- "tasks": [],
- "startFrame": item.timelineIn(),
- "endFrame": item.timelineOut()
- }
-
- for key, value in data.items():
-
- context.create_instance(
- name=key,
- subset="trackItem",
- asset=value["item"].name(),
- item=value["item"],
- family="trackItem",
- tasks=value["tasks"],
- startFrame=value["startFrame"] + handles,
- endFrame=value["endFrame"] - handles,
- handles=handles
- )
- context.create_instance(
- name=key + "_review",
- subset="reviewItem",
- asset=value["item"].name(),
- item=value["item"],
- family="trackItem_review",
- families=["output"],
- handles=handles,
- output_path=os.path.abspath(
- os.path.join(
- context.data["activeProject"].path(),
- "..",
- "workspace",
- key + ".mov"
- )
- )
- )
-
-
-class CollectTasks(api.ContextPlugin):
- """Collect all tasks from submission."""
-
- order = api.CollectorOrder + 0.01
- label = "Collect Tasks"
- hosts = ["nukestudio"]
-
- def process(self, context):
- import os
- import re
-
- import hiero.exporters as he
- import clique
-
- for parent in context:
- if "trackItem" != parent.data["family"]:
- continue
-
- for task in parent.data["tasks"]:
- asset_type = None
-
- hiero_cls = he.FnSymLinkExporter.SymLinkExporter
- if isinstance(task, hiero_cls):
- asset_type = "img"
- movie_formats = [".mov", ".R3D"]
- ext = os.path.splitext(task.resolvedExportPath())[1]
- if ext in movie_formats:
- asset_type = "mov"
-
- hiero_cls = he.FnTranscodeExporter.TranscodeExporter
- if isinstance(task, hiero_cls):
- asset_type = "img"
- if task.resolvedExportPath().endswith(".mov"):
- asset_type = "mov"
-
- hiero_cls = he.FnNukeShotExporter.NukeShotExporter
- if isinstance(task, hiero_cls):
- asset_type = "scene"
-
- hiero_cls = he.FnAudioExportTask.AudioExportTask
- if isinstance(task, hiero_cls):
- asset_type = "audio"
-
- # Skip all non supported export types
- if not asset_type:
- continue
-
- resolved_path = task.resolvedExportPath()
-
- # Formatting the basename to not include frame padding or
- # extension.
- name = os.path.splitext(os.path.basename(resolved_path))[0]
- name = name.replace(".", "")
- name = name.replace("#", "")
- name = re.sub(r"%.*d", "", name)
- instance = context.create_instance(name=name, parent=parent)
-
- instance.data["task"] = task
- instance.data["item"] = parent.data["item"]
-
- instance.data["family"] = "trackItem.task"
- instance.data["families"] = [asset_type, "local", "task"]
-
- label = "{1}/{0} - {2} - local".format(
- name, parent, asset_type
- )
- instance.data["label"] = label
-
- instance.data["handles"] = parent.data["handles"]
-
- # Add collection or output
- if asset_type == "img":
- collection = None
-
- if "#" in resolved_path:
- head = resolved_path.split("#")[0]
- padding = resolved_path.count("#")
- tail = resolved_path.split("#")[-1]
-
- collection = clique.Collection(
- head=head, padding=padding, tail=tail
- )
-
- if "%" in resolved_path:
- collection = clique.parse(
- resolved_path, pattern="{head}{padding}{tail}"
- )
-
- instance.data["collection"] = collection
- else:
- instance.data["output_path"] = resolved_path
diff --git a/pype/plugins/nukestudio/publish/collect_metadata.py b/pype/plugins/nukestudio/_unused/collect_metadata.py
similarity index 100%
rename from pype/plugins/nukestudio/publish/collect_metadata.py
rename to pype/plugins/nukestudio/_unused/collect_metadata.py
diff --git a/pype/plugins/nukestudio/_unused/collect_plates.py b/pype/plugins/nukestudio/_unused/collect_plates.py
deleted file mode 100644
index 6dbd27dc59..0000000000
--- a/pype/plugins/nukestudio/_unused/collect_plates.py
+++ /dev/null
@@ -1,215 +0,0 @@
-from pyblish import api
-import pype
-
-
-class CollectPlates(api.InstancePlugin):
- """Collect plates"""
-
- order = api.CollectorOrder + 0.49
- label = "Collect Plates"
- hosts = ["nukestudio"]
- families = ["plate"]
-
- def process(self, instance):
- import os
-
- # add to representations
- if not instance.data.get("representations"):
- instance.data["representations"] = list()
-
- version_data = dict()
- context = instance.context
- anatomy = context.data.get("anatomy", None)
- padding = int(anatomy.templates['render']['padding'])
-
- name = instance.data["subset"]
- asset = instance.data["asset"]
- track = instance.data["track"]
- family = instance.data["family"]
- families = instance.data["families"]
- version = instance.data["version"]
- source_path = instance.data["sourcePath"]
- source_file = os.path.basename(source_path)
-
- # staging dir creation
- staging_dir = os.path.dirname(
- source_path)
-
- item = instance.data["item"]
-
- # get handles
- handles = int(instance.data["handles"])
- handle_start = int(instance.data["handleStart"])
- handle_end = int(instance.data["handleEnd"])
-
- # get source frames
- source_in = int(instance.data["sourceIn"])
- source_out = int(instance.data["sourceOut"])
-
- # get source frames
- frame_start = int(instance.data["startFrame"])
- frame_end = int(instance.data["endFrame"])
-
- # get source frames
- source_in_h = int(instance.data["sourceInH"])
- source_out_h = int(instance.data["sourceOutH"])
-
- # get timeline frames
- timeline_in = int(instance.data["timelineIn"])
- timeline_out = int(instance.data["timelineOut"])
-
- # frame-ranges with handles
- timeline_frame_start = int(instance.data["timelineInHandles"])
- timeline_frame_end = int(instance.data["timelineOutHandles"])
-
- # get colorspace
- colorspace = item.sourceMediaColourTransform()
-
- # get sequence from context, and fps
- fps = float(str(instance.data["fps"]))
-
- # test output
- self.log.debug("__ handles: {}".format(handles))
- self.log.debug("__ handle_start: {}".format(handle_start))
- self.log.debug("__ handle_end: {}".format(handle_end))
- self.log.debug("__ frame_start: {}".format(frame_start))
- self.log.debug("__ frame_end: {}".format(frame_end))
- self.log.debug("__ f duration: {}".format(frame_end - frame_start + 1))
- self.log.debug("__ source_in: {}".format(source_in))
- self.log.debug("__ source_out: {}".format(source_out))
- self.log.debug("__ s duration: {}".format(source_out - source_in + 1))
- self.log.debug("__ source_in_h: {}".format(source_in_h))
- self.log.debug("__ source_out_h: {}".format(source_out_h))
- self.log.debug("__ sh duration: {}".format(source_out_h - source_in_h + 1))
- self.log.debug("__ timeline_in: {}".format(timeline_in))
- self.log.debug("__ timeline_out: {}".format(timeline_out))
- self.log.debug("__ t duration: {}".format(timeline_out - timeline_in + 1))
- self.log.debug("__ timeline_frame_start: {}".format(
- timeline_frame_start))
- self.log.debug("__ timeline_frame_end: {}".format(timeline_frame_end))
- self.log.debug("__ colorspace: {}".format(colorspace))
- self.log.debug("__ track: {}".format(track))
- self.log.debug("__ fps: {}".format(fps))
- self.log.debug("__ source_file: {}".format(source_file))
- self.log.debug("__ staging_dir: {}".format(staging_dir))
-
- self.log.debug("__ before family: {}".format(family))
- self.log.debug("__ before families: {}".format(families))
- #
- # this is just workaround because 'clip' family is filtered
- instance.data["family"] = families[-1]
- instance.data["families"].append(family)
-
- # add to data of representation
- version_data.update({
- "handles": handles,
- "handleStart": handle_start,
- "handleEnd": handle_end,
- "sourceIn": source_in,
- "sourceOut": source_out,
- "startFrame": frame_start,
- "endFrame": frame_end,
- "timelineIn": timeline_in,
- "timelineOut": timeline_out,
- "timelineInHandles": timeline_frame_start,
- "timelineOutHandles": timeline_frame_end,
- "fps": fps,
- "colorspace": colorspace,
- "families": [f for f in families if 'ftrack' not in f],
- "asset": asset,
- "subset": name,
- "track": track,
- "version": int(version)
- })
- instance.data["versionData"] = version_data
-
- try:
- basename, ext = os.path.splitext(source_file)
- head, padding = os.path.splitext(basename)
- ext = ext[1:]
- padding = padding[1:]
- # head, padding, ext = source_file.split('.')
- source_first_frame = int(padding)
- padding = len(padding)
- file = "{head}.%0{padding}d.{ext}".format(
- head=head,
- padding=padding,
- ext=ext
- )
- start_frame = source_first_frame
- end_frame = source_first_frame + source_out
- files = [file % i for i in range(
- (source_first_frame + source_in_h),
- ((source_first_frame + source_out_h) + 1), 1)]
- except Exception as e:
- self.log.debug("Exception in file: {}".format(e))
- head, ext = os.path.splitext(source_file)
- ext = ext[1:]
- files = source_file
- start_frame = source_in_h
- end_frame = source_out_h
-
-
- mov_file = head + ".mov"
- mov_path = os.path.normpath(os.path.join(staging_dir, mov_file))
- if os.path.exists(mov_path):
- # adding mov into the representations
- self.log.debug("__ mov_path: {}".format(mov_path))
- plates_mov_representation = {
- 'files': mov_file,
- 'stagingDir': staging_dir,
- 'startFrame': 0,
- 'endFrame': source_out - source_in + 1,
- 'step': 1,
- 'frameRate': fps,
- 'preview': True,
- 'thumbnail': False,
- 'name': "preview",
- 'ext': "mov",
- }
- instance.data["representations"].append(
- plates_mov_representation)
-
- thumb_file = head + ".png"
- thumb_path = os.path.join(staging_dir, thumb_file)
- self.log.debug("__ thumb_path: {}".format(thumb_path))
- thumbnail = item.thumbnail(source_in).save(
- thumb_path,
- format='png'
- )
- self.log.debug("__ thumbnail: {}".format(thumbnail))
-
- thumb_representation = {
- 'files': thumb_file,
- 'stagingDir': staging_dir,
- 'name': "thumbnail",
- 'thumbnail': True,
- 'ext': "png"
- }
- instance.data["representations"].append(
- thumb_representation)
-
- # adding representation for plates
- plates_representation = {
- 'files': files,
- 'stagingDir': staging_dir,
- 'name': ext,
- 'ext': ext,
- 'startFrame': start_frame,
- 'endFrame': end_frame,
- }
- instance.data["representations"].append(plates_representation)
-
- # testing families
- family = instance.data["family"]
- families = instance.data["families"]
-
- # test prints version_data
- self.log.debug("__ version_data: {}".format(version_data))
- self.log.debug("__ plates_representation: {}".format(
- plates_representation))
- self.log.debug("__ after family: {}".format(family))
- self.log.debug("__ after families: {}".format(families))
-
- # # this will do FnNsFrameServer
- # FnNsFrameServer.renderFrames(*_args)
diff --git a/pype/plugins/nukestudio/publish/collect_workfile_version.py b/pype/plugins/nukestudio/_unused/collect_workfile_version.py
similarity index 85%
rename from pype/plugins/nukestudio/publish/collect_workfile_version.py
rename to pype/plugins/nukestudio/_unused/collect_workfile_version.py
index 3904c22f52..733fbfc44a 100644
--- a/pype/plugins/nukestudio/publish/collect_workfile_version.py
+++ b/pype/plugins/nukestudio/_unused/collect_workfile_version.py
@@ -11,5 +11,5 @@ class CollectWorkfileVersion(pyblish.api.ContextPlugin):
project = context.data('activeProject')
path = project.path()
- context.data["version"] = pype.get_version_from_path(path)
+ context.data["version"] = int(pype.get_version_from_path(path))
self.log.info("version: {}".format(context.data["version"]))
diff --git a/pype/plugins/nukestudio/_unused/extract_plate.py b/pype/plugins/nukestudio/_unused/extract_plate.py
deleted file mode 100644
index acd7f056ba..0000000000
--- a/pype/plugins/nukestudio/_unused/extract_plate.py
+++ /dev/null
@@ -1,140 +0,0 @@
-import os
-import subprocess
-
-from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles
-
-import pype.api
-from pype.vendor import ffmpeg
-
-
-class ExtractPlate(pype.api.Extractor):
- """Extract plate cut to the timeline.
-
- Only supporting mov plates for now. Image sequences already get cut down to
- timeline range.
-
- """
-
- label = "Plate"
- hosts = ["nukestudio"]
- families = ["plate"]
- optional = True
-
- def process(self, instance):
- if not instance.data["sourcePath"].endswith(".mov"):
- self.log.debug(
- "Skipping {} because its not a \"*.mov\" "
- "format.".format(instance)
- )
- return
-
- staging_dir = self.staging_dir(instance)
- filename = "{0}_without_sound".format(instance.name) + ".mov"
- output_path = os.path.join(staging_dir, filename)
- input_path = instance.data["sourcePath"]
-
- self.log.info("Outputting movie to %s" % output_path)
-
- # Cut plate to timeline.
- item = instance.data["item"]
- start_frame = item.mapTimelineToSource(
- item.timelineIn() - (
- instance.data["handleStart"]
- )
- )
- end_frame = item.mapTimelineToSource(
- item.timelineOut() + (
- instance.data["handleEnd"]
- )
- )
- framerate = item.sequence().framerate().toFloat()
- output_options = {
- "vcodec": "copy",
- "ss": start_frame / framerate,
- "frames": int(end_frame - start_frame) + 1
- }
-
- try:
- (
- ffmpeg
- .input(input_path)
- .output(output_path, **output_options)
- .run(overwrite_output=True,
- capture_stdout=True,
- capture_stderr=True)
- )
- except ffmpeg.Error as e:
- ffmpeg_error = "ffmpeg error: {}".format(e.stderr)
- self.log.error(ffmpeg_error)
- raise RuntimeError(ffmpeg_error)
-
- # Extract audio.
- filename = "{0}".format(instance.name) + ".wav"
- audio_path = os.path.join(staging_dir, filename)
- writeSequenceAudioWithHandles(
- audio_path,
- item.sequence(),
- item.timelineIn(),
- item.timelineOut(),
- 0,
- 0
- )
-
- input_path = output_path
- filename = "{0}_with_sound".format(instance.name) + ".mov"
- output_path = os.path.join(staging_dir, filename)
-
- args = [
- "ffmpeg",
- "-i", input_path,
- "-i", audio_path,
- "-vcodec", "copy",
- output_path
- ]
-
- self.log.debug(subprocess.list2cmdline(args))
- p = subprocess.Popen(
- args,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- stdin=subprocess.PIPE,
- cwd=os.path.dirname(args[-1])
- )
-
- output = p.communicate()[0]
-
- if p.returncode != 0:
- raise ValueError(output)
-
- self.log.debug(output)
-
- # Adding representation.
- ext = os.path.splitext(output_path)[1][1:]
- representation = {
- "files": os.path.basename(output_path),
- "staging_dir": staging_dir,
- "startFrame": 0,
- "endFrame": end_frame - start_frame,
- "step": 1,
- "frameRate": framerate,
- "thumbnail": False,
- "name": ext,
- "ext": ext
- }
- instance.data["representations"] = [representation]
- self.log.debug("Adding representation: {}".format(representation))
-
- # Adding thumbnail representation.
- path = instance.data["sourcePath"].replace(".mov", ".png")
- if not os.path.exists(path):
- item.thumbnail(start_frame).save(path, format="png")
-
- representation = {
- "files": os.path.basename(path),
- "stagingDir": os.path.dirname(path),
- "name": "thumbnail",
- "thumbnail": True,
- "ext": "png"
- }
- instance.data["representations"].append(representation)
- self.log.debug("Adding representation: {}".format(representation))
diff --git a/pype/plugins/nukestudio/_unused/extract_plates.py b/pype/plugins/nukestudio/_unused/extract_plates.py
deleted file mode 100644
index 6bf6019adb..0000000000
--- a/pype/plugins/nukestudio/_unused/extract_plates.py
+++ /dev/null
@@ -1,238 +0,0 @@
-from pyblish import api
-import pype
-
-
-class ExtractPlates(pype.api.Extractor):
- """Extracts plates"""
-
- order = api.ExtractorOrder
- label = "Extract Plates"
- hosts = ["nukestudio"]
- families = ["encode"]
-
- def process(self, instance):
- import os
- import hiero.core
- # from hiero.ui.nuke_bridge import FnNsFrameServer
-
- # add to representations
- if not instance.data.get("representations"):
- instance.data["representations"] = list()
-
- version_data = dict()
- context = instance.context
- anatomy = context.data.get("anatomy", None)
- padding = int(anatomy.templates['render']['padding'])
-
- name = instance.data["subset"]
- asset = instance.data["asset"]
- track = instance.data["track"]
- family = instance.data["family"]
- families = instance.data["families"]
- attrs = instance.data["attributes"]
- version = instance.data["version"]
-
- # staging dir creation
- self.log.debug("creating staging dir")
- self.staging_dir(instance)
-
- staging_dir = instance.data['stagingDir']
-
- Nuke_writer = hiero.core.nuke.ScriptWriter()
-
- item = instance.data["item"]
-
- # get handles
- handles = int(instance.data["handles"])
- handle_start = int(instance.data["handleStart"])
- handle_end = int(instance.data["handleEnd"])
-
- # get timeline frames
- timeline_in = int(instance.data["timelineIn"])
- timeline_out = int(instance.data["timelineOut"])
-
- # frame-ranges with handles
- timeline_frame_start = int(instance.data["timelineInHandles"])
- timeline_frame_end = int(instance.data["timelineOutHandles"])
-
- # creating comp frame range
- frame_start = int(instance.data["startFrame"])
- frame_end = int(instance.data["endFrame"])
-
- # get colorspace
- colorspace = instance.context.data["colorspace"]
-
- # get sequence from context, and fps
- fps = int(instance.data["fps"])
-
- # test output
- self.log.debug("__ handles: {}".format(handles))
- self.log.debug("__ handle_start: {}".format(handle_start))
- self.log.debug("__ handle_end: {}".format(handle_end))
- self.log.debug("__ timeline_in: {}".format(timeline_in))
- self.log.debug("__ timeline_out: {}".format(timeline_out))
- self.log.debug("__ timeline_frame_start: {}".format(
- timeline_frame_start))
- self.log.debug("__ timeline_frame_end: {}".format(timeline_frame_end))
- self.log.debug("__ frame_start: {}".format(frame_start))
- self.log.debug("__ frame_end: {}".format(frame_end))
- self.log.debug("__ colorspace: {}".format(colorspace))
- self.log.debug("__ track: {}".format(track))
- self.log.debug("__ fps: {}".format(fps))
-
- # Generate Nuke script
- write_name = "Write_out"
-
- # root node
- root_node = hiero.core.nuke.RootNode(
- frame_start,
- frame_end,
- fps=fps
- )
-
- root_node.addProjectSettings(colorspace)
-
- # create write node and link it to root node
- Nuke_writer.addNode(root_node)
- '''TrackItem.addToNukeScript(script=, firstFrame=None, additionalNodes=[], additionalNodesCallback=None, includeRetimes=False, retimeMethod=None, startHandle=None, endHandle=None, colourTransform=None, offset=0, nodeLabel=None, includeAnnotations=False, includeEffects=True, outputToSequenceFormat=False)'''
- item.addToNukeScript(
- script=Nuke_writer,
- firstFrame=frame_start,
- includeRetimes=attrs["includeRetimes"],
- retimeMethod=attrs["retimeMethod"],
- startHandle=handle_start,
- endHandle=handle_end,
- includeEffects=attrs["includeEffects"],
- includeAnnotations=attrs["includeAnnotations"]
- )
-
- write_knobs = attrs["nodes"]["write"]["attributes"]
-
- # TODO: take template from anatomy
- nukescript_file = "{asset}_{name}_v{version}.{ext}".format(
- asset=asset,
- name=name,
- version=version,
- ext="nk"
- )
- nukescript_path = os.path.join(
- staging_dir, nukescript_file
- )
-
- # TODO: take template from anatomy
- output_file = "{asset}_{name}_v{version}.%0{padding}d.{ext}".format(
- asset=asset,
- name=name,
- version=version,
- padding=padding,
- ext=write_knobs["file_type"]
- )
- output_path = os.path.join(
- staging_dir, output_file
- )
-
- write_node = hiero.core.nuke.WriteNode(output_path.replace("\\", "/"))
- write_node.setKnob("name", write_name)
- write_node.setKnob("file_type", write_knobs["file_type"])
- for knob, value in write_knobs.items():
- write_node.setKnob(knob, value)
-
- Nuke_writer.addNode(write_node)
-
- Nuke_writer.writeToDisk(nukescript_path)
-
- # test prints
- self.log.debug("__ output_file: {}".format(output_file))
- self.log.debug("__ output_path: {}".format(output_path))
- self.log.debug("__ nukescript_file: {}".format(nukescript_file))
- self.log.debug("__ nukescript_path: {}".format(nukescript_path))
- self.log.debug("__ write_knobs: {}".format(write_knobs))
- self.log.debug("__ write_name: {}".format(write_name))
- self.log.debug("__ Nuke_writer: {}".format(Nuke_writer))
-
- # create rendering arguments for FnNsFrameServer
- _args = [
- nukescript_path,
- "{}-{}".format(frame_start, frame_end),
- write_name,
- ["main"]
- ]
-
- # add to data of representation
- version_data.update({
- "handles": handles,
- "handleStart": handle_start,
- "handleEnd": handle_end,
- "timelineIn": timeline_in,
- "timelineOut": timeline_out,
- "timelineInHandles": timeline_frame_start,
- "timelineOutHandles": timeline_frame_end,
- "compFrameIn": frame_start,
- "compFrameOut": frame_end,
- "fps": fps,
- "colorspace": write_knobs["colorspace"],
- "nukeScriptFileName": nukescript_file,
- "nukeWriteFileName": output_file,
- "nukeWriteName": write_name,
- "FnNsFrameServer_renderFrames_args": str(_args),
- "family": family,
- "families": families,
- "asset": asset,
- "subset": name,
- "track": track,
- "version": int(version)
- })
- instance.data["versionData"] = version_data
-
- # adding representation for nukescript
- nk_representation = {
- 'files': nukescript_file,
- 'stagingDir': staging_dir,
- 'name': "nk",
- 'ext': "nk",
- }
- instance.data["representations"].append(nk_representation)
-
- # adding representation for plates
- plates_representation = {
- 'files': [output_file % i for i in range(
- frame_start, (frame_end + 1), 1)],
- 'stagingDir': staging_dir,
- 'name': write_knobs["file_type"],
- 'ext': write_knobs["file_type"],
- }
- instance.data["representations"].append(plates_representation)
-
- # adding checking file to context for ExtractPlateCheck(context) plugin
- context.data["platesCheck"] = os.path.join(
- staging_dir, output_file % frame_end
- )
-
- if not context.data.get("frameServerRenderQueue"):
- context.data["frameServerRenderQueue"] = list()
-
- # add to render queue list
- context.data["frameServerRenderQueue"].append(_args)
-
- # test prints
- self.log.debug("__ before family: {}".format(family))
- self.log.debug("__ before families: {}".format(families))
-
- # this is just workaround because 'clip' family is filtered
- instance.data["family"] = families[-1]
- instance.data["families"].append(family)
-
- # testing families
- family = instance.data["family"]
- families = instance.data["families"]
-
- # test prints version_data
- self.log.debug("__ version_data: {}".format(version_data))
- self.log.debug("__ nk_representation: {}".format(nk_representation))
- self.log.debug("__ plates_representation: {}".format(
- plates_representation))
- self.log.debug("__ after family: {}".format(family))
- self.log.debug("__ after families: {}".format(families))
-
- # # this will do FnNsFrameServer
- # FnNsFrameServer.renderFrames(*_args)
diff --git a/pype/plugins/nukestudio/_unused/extract_review.py b/pype/plugins/nukestudio/_unused/extract_review.py
deleted file mode 100644
index 13ef3484aa..0000000000
--- a/pype/plugins/nukestudio/_unused/extract_review.py
+++ /dev/null
@@ -1,138 +0,0 @@
-import os
-import subprocess
-
-from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles
-
-import pype.api
-
-
-class ExtractReview(pype.api.Extractor):
- """Extract Quicktime with optimized codec for reviewing."""
-
- label = "Review"
- hosts = ["nukestudio"]
- families = ["review"]
- optional = True
-
- def process(self, instance):
- staging_dir = self.staging_dir(instance)
- filename = "{0}_without_sound".format(instance.name) + ".mov"
- output_path = os.path.join(staging_dir, filename)
- input_path = instance.data["sourcePath"]
- item = instance.data["item"]
-
- # Has to be yuv420p for compatibility with older players and smooth
- # playback. This does come with a sacrifice of more visible banding
- # issues.
- start_frame = item.mapTimelineToSource(item.timelineIn())
- end_frame = item.mapTimelineToSource(item.timelineOut())
- args = [
- "ffmpeg",
- "-ss", str(start_frame / item.sequence().framerate().toFloat()),
- "-i", input_path,
- "-pix_fmt", "yuv420p",
- "-crf", "18",
- "-timecode", "00:00:00:01",
- "-vf", "scale=trunc(iw/2)*2:trunc(ih/2)*2",
- "-frames", str(int(end_frame - start_frame) + 1),
- output_path
- ]
-
- self.log.debug(subprocess.list2cmdline(args))
- p = subprocess.Popen(
- args,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- stdin=subprocess.PIPE,
- cwd=os.path.dirname(args[-1])
- )
-
- output = p.communicate()[0]
-
- if p.returncode != 0:
- raise ValueError(output)
-
- self.log.debug(output)
-
- # Extract audio.
- filename = "{0}".format(instance.name) + ".wav"
- audio_path = os.path.join(staging_dir, filename)
- writeSequenceAudioWithHandles(
- audio_path,
- item.sequence(),
- item.timelineIn(),
- item.timelineOut(),
- 0,
- 0
- )
-
- input_path = output_path
- filename = "{0}_with_sound".format(instance.name) + ".mov"
- output_path = os.path.join(staging_dir, filename)
-
- args = [
- "ffmpeg",
- "-i", input_path,
- "-i", audio_path,
- "-vcodec", "copy",
- output_path
- ]
-
- self.log.debug(subprocess.list2cmdline(args))
- p = subprocess.Popen(
- args,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- stdin=subprocess.PIPE,
- cwd=os.path.dirname(args[-1])
- )
-
- output = p.communicate()[0]
-
- if p.returncode != 0:
- raise ValueError(output)
-
- self.log.debug(output)
-
- # Adding movie representation.
- start_frame = int(
- instance.data["sourceIn"] - (
- instance.data["handleStart"]
- )
- )
- end_frame = int(
- instance.data["sourceOut"] + (
- instance.data["handleEnd"]
- )
- )
- representation = {
- "files": os.path.basename(output_path),
- "staging_dir": staging_dir,
- "startFrame": 0,
- "endFrame": end_frame - start_frame,
- "step": 1,
- "frameRate": (
- instance.context.data["activeSequence"].framerate().toFloat()
- ),
- "preview": True,
- "thumbnail": False,
- "name": "preview",
- "ext": "mov",
- }
- instance.data["representations"] = [representation]
- self.log.debug("Adding representation: {}".format(representation))
-
- # Adding thumbnail representation.
- path = instance.data["sourcePath"].replace(".mov", ".png")
- if not os.path.exists(path):
- item.thumbnail(start_frame).save(path, format="png")
-
- representation = {
- "files": os.path.basename(path),
- "stagingDir": os.path.dirname(path),
- "name": "thumbnail",
- "thumbnail": True,
- "ext": "png"
- }
- instance.data["representations"].append(representation)
- self.log.debug("Adding representation: {}".format(representation))
diff --git a/pype/plugins/nukestudio/publish/collect_calculate_retime.py b/pype/plugins/nukestudio/publish/collect_calculate_retime.py
new file mode 100644
index 0000000000..a97b43a4ce
--- /dev/null
+++ b/pype/plugins/nukestudio/publish/collect_calculate_retime.py
@@ -0,0 +1,121 @@
+from pyblish import api
+import hiero
+import math
+
+
+class CollectCalculateRetime(api.InstancePlugin):
+ """Calculate Retiming of selected track items."""
+
+ order = api.CollectorOrder + 0.02
+ label = "Collect Calculate Retiming"
+ hosts = ["nukestudio"]
+ families = ['retime']
+
+ def process(self, instance):
+ margin_in = instance.data["retimeMarginIn"]
+ margin_out = instance.data["retimeMarginOut"]
+ self.log.debug("margin_in: '{0}', margin_out: '{1}'".format(margin_in, margin_out))
+
+ handle_start = instance.data["handleStart"]
+ handle_end = instance.data["handleEnd"]
+
+ track_item = instance.data["item"]
+
+ # define basic clip frame range variables
+ timeline_in = int(track_item.timelineIn())
+ timeline_out = int(track_item.timelineOut())
+ source_in = int(track_item.sourceIn())
+ source_out = int(track_item.sourceOut())
+ speed = track_item.playbackSpeed()
+ self.log.debug("_BEFORE: \n timeline_in: `{0}`,\n timeline_out: `{1}`,\
+ \n source_in: `{2}`,\n source_out: `{3}`,\n speed: `{4}`,\n handle_start: `{5}`,\n handle_end: `{6}`".format(
+ timeline_in,
+ timeline_out,
+ source_in,
+ source_out,
+ speed,
+ handle_start,
+ handle_end
+ ))
+
+ # loop withing subtrack items
+ source_in_change = 0
+ source_out_change = 0
+ for s_track_item in track_item.linkedItems():
+ if isinstance(s_track_item, hiero.core.EffectTrackItem) \
+ and "TimeWarp" in s_track_item.node().Class():
+
+ # adding timewarp attribute to instance
+ if not instance.data.get("timeWarpNodes", None):
+ instance.data["timeWarpNodes"] = list()
+
+ # ignore item if not enabled
+ if s_track_item.isEnabled():
+ node = s_track_item.node()
+ name = node["name"].value()
+ look_up = node["lookup"].value()
+ animated = node["lookup"].isAnimated()
+ if animated:
+ look_up = [((node["lookup"].getValueAt(i)) - i)
+ for i in range((timeline_in - handle_start), (timeline_out + handle_end) + 1)
+ ]
+ # calculate differnce
+ diff_in = (node["lookup"].getValueAt(
+ timeline_in)) - timeline_in
+ diff_out = (node["lookup"].getValueAt(
+ timeline_out)) - timeline_out
+
+ # calculate source
+ source_in_change += diff_in
+ source_out_change += diff_out
+
+ # calculate speed
+ speed_in = (node["lookup"].getValueAt(timeline_in) / (
+ float(timeline_in) * .01)) * .01
+ speed_out = (node["lookup"].getValueAt(timeline_out) / (
+ float(timeline_out) * .01)) * .01
+
+ # calculate handles
+ handle_start = int(
+ math.ceil(
+ (handle_start * speed_in * 1000) / 1000.0)
+ )
+
+ handle_end = int(
+ math.ceil(
+ (handle_end * speed_out * 1000) / 1000.0)
+ )
+ self.log.debug(
+ ("diff_in, diff_out", diff_in, diff_out))
+ self.log.debug(
+ ("source_in_change, source_out_change", source_in_change, source_out_change))
+
+ instance.data["timeWarpNodes"].append({"Class": "TimeWarp",
+ "name": name,
+ "lookup": look_up})
+
+ self.log.debug((source_in_change, source_out_change))
+ # recalculate handles by the speed
+ handle_start *= speed
+ handle_end *= speed
+ self.log.debug("speed: handle_start: '{0}', handle_end: '{1}'".format(handle_start, handle_end))
+
+ source_in += int(source_in_change)
+ source_out += int(source_out_change * speed)
+ handle_start += (margin_in)
+ handle_end += (margin_out)
+ self.log.debug("margin: handle_start: '{0}', handle_end: '{1}'".format(handle_start, handle_end))
+
+ # add all data to Instance
+ instance.data["sourceIn"] = source_in
+ instance.data["sourceOut"] = source_out
+ instance.data["sourceInH"] = int(source_in - math.ceil(
+ (handle_start * 1000) / 1000.0))
+ instance.data["sourceOutH"] = int(source_out + math.ceil(
+ (handle_end * 1000) / 1000.0))
+ instance.data["speed"] = speed
+
+ self.log.debug("timeWarpNodes: {}".format(instance.data["timeWarpNodes"]))
+ self.log.debug("sourceIn: {}".format(instance.data["sourceIn"]))
+ self.log.debug("sourceOut: {}".format(instance.data["sourceOut"]))
+ self.log.debug("speed: {}".format(instance.data["speed"]))
diff --git a/pype/plugins/nukestudio/publish/collect_clips.py b/pype/plugins/nukestudio/publish/collect_clips.py
index a91558ae2c..7a400909fd 100644
--- a/pype/plugins/nukestudio/publish/collect_clips.py
+++ b/pype/plugins/nukestudio/publish/collect_clips.py
@@ -20,84 +20,114 @@ class CollectClips(api.ContextPlugin):
projectdata = context.data["projectData"]
version = context.data.get("version", "001")
- instances_data = []
- for item in context.data.get("selection", []):
- # Skip audio track items
- # Try/Except is to handle items types, like EffectTrackItem
- try:
- media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
- if str(item.mediaType()) != media_type:
+ sequence = context.data.get("activeSequence")
+ selection = context.data.get("selection")
+
+ track_effects = dict()
+
+ # collect all trackItems as instances
+ for track_index, video_track in enumerate(sequence.videoTracks()):
+ items = video_track.items()
+ sub_items = video_track.subTrackItems()
+
+ for item in items:
+ # compare with selection or if disabled
+ if item not in selection or not item.isEnabled():
continue
- except:
+
+ # Skip audio track items
+ # Try/Except is to handle items types, like EffectTrackItem
+ try:
+ media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
+ if str(item.mediaType()) != media_type:
+ continue
+ except Exception:
+ continue
+
+ asset = item.name()
+ track = item.parent()
+ source = item.source().mediaSource()
+ source_path = source.firstpath()
+ effects = [f for f in item.linkedItems() if f.isEnabled()]
+
+ # If source is *.nk its a comp effect and we need to fetch the
+ # write node output. This should be improved by parsing the script
+ # rather than opening it.
+ if source_path.endswith(".nk"):
+ nuke.scriptOpen(source_path)
+ # There should noly be one.
+ write_node = nuke.allNodes(filter="Write")[0]
+ path = nuke.filename(write_node)
+
+ if "%" in path:
+ # Get start frame from Nuke script and use the item source
+ # in/out, because you can have multiple shots covered with
+ # one nuke script.
+ start_frame = int(nuke.root()["first_frame"].getValue())
+ if write_node["use_limit"].getValue():
+ start_frame = int(write_node["first"].getValue())
+
+ path = path % (start_frame + item.sourceIn())
+
+ source_path = path
+ self.log.debug(
+ "Fetched source path \"{}\" from \"{}\" in "
+ "\"{}\".".format(
+ source_path, write_node.name(), source.firstpath()
+ )
+ )
+
+ try:
+ head, padding, ext = os.path.basename(source_path).split(".")
+ source_first_frame = int(padding)
+ except Exception:
+ source_first_frame = 0
+
+ data = {"name": "{0}_{1}".format(track.name(), item.name()),
+ "item": item,
+ "source": source,
+ "sourcePath": source_path,
+ "track": track.name(),
+ "trackIndex": track_index,
+ "sourceFirst": source_first_frame,
+ "effects": effects,
+ "sourceIn": int(item.sourceIn()),
+ "sourceOut": int(item.sourceOut()),
+ "clipIn": int(item.timelineIn()),
+ "clipOut": int(item.timelineOut()),
+ "asset": asset,
+ "family": "clip",
+ "families": [],
+ "handles": 0,
+ "handleStart": projectdata.get("handles", 0),
+ "handleEnd": projectdata.get("handles", 0),
+ "version": int(version)}
+
+ instance = context.create_instance(**data)
+
+ self.log.info("Created instance: {}".format(instance))
+ self.log.debug(">> effects: {}".format(instance.data["effects"]))
+
+ context.data["assetsShared"][asset] = dict()
+
+ # from now we are collecting only subtrackitems on
+ # track with no video items
+ if len(items) > 0:
continue
- track = item.parent()
- source = item.source().mediaSource()
- source_path = source.firstpath()
+ # create list in track key
+ # get all subTrackItems and add it to context
+ track_effects[track_index] = list()
- # If source is *.nk its a comp effect and we need to fetch the
- # write node output. This should be improved by parsing the script
- # rather than opening it.
- if source_path.endswith(".nk"):
- nuke.scriptOpen(source_path)
- # There should noly be one.
- write_node = nuke.allNodes(filter="Write")[0]
- path = nuke.filename(write_node)
+ # collect all subtrack items
+ for sitem in sub_items:
+ # unwrap from tuple >> it is always tuple with one item
+ sitem = sitem[0]
+ # checking if not enabled
+ if not sitem.isEnabled():
+ continue
- if "%" in path:
- # Get start frame from Nuke script and use the item source
- # in/out, because you can have multiple shots covered with
- # one nuke script.
- start_frame = int(nuke.root()["first_frame"].getValue())
- if write_node["use_limit"].getValue():
- start_frame = int(write_node["first"].getValue())
+ track_effects[track_index].append(sitem)
- path = path % (start_frame + item.sourceIn())
-
- source_path = path
- self.log.debug(
- "Fetched source path \"{}\" from \"{}\" in "
- "\"{}\".".format(
- source_path, write_node.name(), source.firstpath()
- )
- )
-
- try:
- head, padding, ext = os.path.basename(source_path).split(".")
- source_first_frame = int(padding)
- except:
- source_first_frame = 0
-
- instances_data.append(
- {
- "name": "{0}_{1}".format(track.name(), item.name()),
- "item": item,
- "source": source,
- "sourcePath": source_path,
- "track": track.name(),
- "sourceFirst": source_first_frame,
- "sourceIn": int(item.sourceIn()),
- "sourceOut": int(item.sourceOut()),
- "startFrame": int(item.timelineIn()),
- "endFrame": int(item.timelineOut()),
- "fps": float(item.sequence().framerate().toFloat())
- }
- )
-
- for data in instances_data:
- data.update(
- {
- "asset": data["item"].name(),
- "family": "clip",
- "families": [],
- "handles": 0,
- "handleStart": projectdata.get("handles", 0),
- "handleEnd": projectdata.get("handles", 0),
- "version": version
- }
- )
- instance = context.create_instance(**data)
- self.log.debug(
- "Created instance with data: {}".format(instance.data)
- )
- context.data["assetsShared"][data["asset"]] = dict()
+ context.data["trackEffects"] = track_effects
+ self.log.debug(">> sub_track_items: `{}`".format(track_effects))
diff --git a/pype/plugins/nukestudio/publish/collect_effects.py b/pype/plugins/nukestudio/publish/collect_effects.py
new file mode 100644
index 0000000000..11693ab1fe
--- /dev/null
+++ b/pype/plugins/nukestudio/publish/collect_effects.py
@@ -0,0 +1,96 @@
+import pyblish.api
+import re
+
+
+class CollectVideoTracksLuts(pyblish.api.InstancePlugin):
+ """Collect video tracks effects into context."""
+
+ order = pyblish.api.CollectorOrder + 0.1015
+ label = "Collect Soft Lut Effects"
+ families = ["clip"]
+
+ def process(self, instance):
+
+ self.log.debug("Finding soft effect for subset: `{}`".format(instance.data.get("subset")))
+
+ # taking active sequence
+ subset = instance.data["subset"]
+ track_effects = instance.context.data.get("trackEffects", {})
+ track_index = instance.data["trackIndex"]
+ effects = instance.data["effects"]
+
+ # creating context attribute
+ self.effects = {"assignTo": subset, "effects": dict()}
+
+ for sitem in effects:
+ self.add_effect(instance, track_index, sitem)
+
+ for t_index, sitems in track_effects.items():
+ for sitem in sitems:
+ if not t_index > track_index:
+ continue
+ self.log.debug(">> sitem: `{}`".format(sitem))
+ self.add_effect(instance, t_index, sitem)
+
+ if self.effects["effects"]:
+ instance.data["effectTrackItems"] = self.effects
+
+ if len(instance.data.get("effectTrackItems", {}).keys()) > 0:
+ instance.data["families"] += ["lut"]
+ self.log.debug("effects.keys: {}".format(instance.data.get("effectTrackItems", {}).keys()))
+ self.log.debug("effects: {}".format(instance.data.get("effectTrackItems", {})))
+
+ def add_effect(self, instance, track_index, item):
+ track = item.parentTrack().name()
+ # node serialization
+ node = item.node()
+ node_serialized = self.node_serialisation(instance, node)
+
+ # collect timelineIn/Out
+ effect_t_in = int(item.timelineIn())
+ effect_t_out = int(item.timelineOut())
+
+ node_name = item.name()
+ node_class = re.sub(r"\d+", "", node_name)
+
+ self.effects["effects"].update({node_name: {
+ "class": node_class,
+ "timelineIn": effect_t_in,
+ "timelineOut": effect_t_out,
+ "subTrackIndex": item.subTrackIndex(),
+ "trackIndex": track_index,
+ "track": track,
+ "node": node_serialized
+ }})
+
+ def node_serialisation(self, instance, node):
+ node_serialized = {}
+ timeline_in_h = instance.data["clipInH"]
+ timeline_out_h = instance.data["clipOutH"]
+
+ # adding ignoring knob keys
+ _ignoring_keys = ['invert_mask', 'help', 'mask',
+ 'xpos', 'ypos', 'layer', 'process_mask', 'channel',
+ 'channels', 'maskChannelMask', 'maskChannelInput',
+ 'note_font', 'note_font_size', 'unpremult',
+ 'postage_stamp_frame', 'maskChannel', 'export_cc',
+ 'select_cccid', 'mix', 'version']
+
+ # loop trough all knobs and collect not ignored
+ # and any with any value
+ for knob in node.knobs().keys():
+ # skip nodes in ignore keys
+ if knob in _ignoring_keys:
+ continue
+
+ # get animation if node is animated
+ if node[knob].isAnimated():
+ # grab animation including handles
+ knob_anim = [node[knob].getValueAt(i)
+ for i in range(timeline_in_h, timeline_out_h + 1)]
+
+ node_serialized[knob] = knob_anim
+ else:
+ node_serialized[knob] = node[knob].value()
+
+ return node_serialized
diff --git a/pype/plugins/nukestudio/publish/collect_frame_ranges.py b/pype/plugins/nukestudio/publish/collect_frame_ranges.py
new file mode 100644
index 0000000000..38224f683d
--- /dev/null
+++ b/pype/plugins/nukestudio/publish/collect_frame_ranges.py
@@ -0,0 +1,49 @@
+import pyblish.api
+
+
+class CollectClipFrameRanges(pyblish.api.InstancePlugin):
+ """Collect all frame range data: source(In,Out), timeline(In,Out), edit_(in, out), f(start, end)"""
+
+ order = pyblish.api.CollectorOrder + 0.101
+ label = "Collect Frame Ranges"
+ hosts = ["nukestudio"]
+
+ def process(self, instance):
+
+ data = dict()
+
+ # Timeline data.
+ handle_start = instance.data["handleStart"]
+ handle_end = instance.data["handleEnd"]
+
+ source_in_h = instance.data("sourceInH",
+ instance.data("sourceIn") - handle_start)
+ source_out_h = instance.data("sourceOutH",
+ instance.data("sourceOut") + handle_end)
+
+ timeline_in = instance.data["clipIn"]
+ timeline_out = instance.data["clipOut"]
+
+ timeline_in_h = timeline_in - handle_start
+ timeline_out_h = timeline_out + handle_end
+
+ # set frame start with tag or take it from timeline
+ frame_start = instance.data.get("startingFrame")
+
+ if not frame_start:
+ frame_start = timeline_in
+
+ frame_end = frame_start + (timeline_out - timeline_in)
+
+ data.update(
+ {
+ "sourceInH": source_in_h,
+ "sourceOutH": source_out_h,
+ "frameStart": frame_start,
+ "frameEnd": frame_end,
+ "clipInH": timeline_in_h,
+ "clipOutH": timeline_out_h
+ }
+ )
+ self.log.debug("__ data: {}".format(data))
+ instance.data.update(data)
diff --git a/pype/plugins/nukestudio/publish/collect_framerate.py b/pype/plugins/nukestudio/publish/collect_framerate.py
index 56b76b5011..a0fd4df599 100644
--- a/pype/plugins/nukestudio/publish/collect_framerate.py
+++ b/pype/plugins/nukestudio/publish/collect_framerate.py
@@ -9,4 +9,4 @@ class CollectFramerate(api.ContextPlugin):
def process(self, context):
sequence = context.data["activeSequence"]
- context.data["framerate"] = sequence.framerate().toFloat()
+ context.data["fps"] = sequence.framerate().toFloat()
diff --git a/pype/plugins/nukestudio/publish/collect_handles.py b/pype/plugins/nukestudio/publish/collect_handles.py
index 03652989b8..8da83e715b 100644
--- a/pype/plugins/nukestudio/publish/collect_handles.py
+++ b/pype/plugins/nukestudio/publish/collect_handles.py
@@ -1,4 +1,3 @@
-import json
from pyblish import api
@@ -32,13 +31,18 @@ class CollectClipHandles(api.ContextPlugin):
if instance.data.get("main"):
name = instance.data["asset"]
if assets_shared.get(name):
- self.log.debug("Adding to shared assets: `{}`".format(
- instance.data["name"]))
- assets_shared[name].update({
- "handles": handles,
- "handleStart": handle_start,
- "handleEnd": handle_end
- })
+ asset_shared = assets_shared.get(name)
+ else:
+ asset_shared = assets_shared[name]
+
+ self.log.debug("Adding to shared assets: `{}`".format(
+ instance.data["name"]))
+ asset_shared.update({
+ "handles": handles,
+ "handleStart": handle_start,
+ "handleEnd": handle_end
+ })
+
for instance in filtered_instances:
if not instance.data.get("main") and not instance.data.get("handleTag"):
@@ -51,6 +55,8 @@ class CollectClipHandles(api.ContextPlugin):
"handleStart", 0
)
instance.data["handleEnd"] = s_asset_data.get("handleEnd", 0)
+
+ # debug printing
self.log.debug("_ s_asset_data: `{}`".format(
s_asset_data))
self.log.debug("_ instance.data[handles]: `{}`".format(
diff --git a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py
index bbae365fa6..5f29837d80 100644
--- a/pype/plugins/nukestudio/publish/collect_hierarchy_context.py
+++ b/pype/plugins/nukestudio/publish/collect_hierarchy_context.py
@@ -34,9 +34,14 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
def process(self, context):
for instance in context[:]:
+ assets_shared = context.data.get("assetsShared")
tags = instance.data.get("tags", None)
clip = instance.data["item"]
asset = instance.data.get("asset")
+ sequence = context.data['activeSequence']
+ width = int(sequence.format().width())
+ height = int(sequence.format().height())
+ pixel_aspect = sequence.format().pixelAspect()
# build data for inner nukestudio project property
data = {
@@ -54,7 +59,7 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
self.log.debug("__ tags: {}".format(tags))
if not tags:
- return
+ continue
# loop trough all tags
for t in tags:
@@ -139,19 +144,36 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"Clip: `{}`".format(asset)
)
- assetsShared = {
- asset: {
- "asset": instance.data["asset"],
- "hierarchy": hierarchy,
- "parents": parents,
- "tasks": instance.data['tasks']
- }}
- self.log.debug("__ assetsShared: {}".format(assetsShared))
# add formated hierarchy path into instance data
instance.data["hierarchy"] = hierarchy
instance.data["parents"] = parents
- context.data["assetsShared"].update(
- assetsShared)
+
+ # adding to asset shared dict
+ self.log.debug("__ assets_shared: {}".format(assets_shared))
+ if assets_shared.get(asset):
+ self.log.debug("Adding to shared assets: `{}`".format(
+ asset))
+ asset_shared = assets_shared.get(asset)
+ else:
+ asset_shared = assets_shared[asset]
+
+ asset_shared.update({
+ "asset": asset,
+ "hierarchy": hierarchy,
+ "parents": parents,
+ "width": width,
+ "height": height,
+ "pixelAspect": pixel_aspect,
+ "tasks": instance.data["tasks"]
+ })
+
+ # adding frame start if any on instance
+ start_frame = instance.data.get("startingFrame")
+ if start_frame:
+ asset_shared.update({
+ "startingFrame": start_frame
+ })
+
class CollectHierarchyContext(pyblish.api.ContextPlugin):
@@ -176,6 +198,7 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
def process(self, context):
instances = context[:]
+
# create hierarchyContext attr if context has none
temp_context = {}
@@ -200,6 +223,19 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
+ instance.data["width"] = s_asset_data["width"]
+ instance.data["height"] = s_asset_data["height"]
+ instance.data["pixelAspect"] = s_asset_data["pixelAspect"]
+
+ # adding frame start if any on instance
+ start_frame = s_asset_data.get("startingFrame")
+ if start_frame:
+ instance.data["frameStart"] = start_frame
+ instance.data["frameEnd"] = start_frame + (
+ instance.data["clipOut"] -
+ instance.data["clipIn"])
+
+
self.log.debug(
"__ instance.data[parents]: {}".format(
@@ -226,37 +262,23 @@ class CollectHierarchyContext(pyblish.api.ContextPlugin):
# get custom attributes of the shot
if instance.data.get("main"):
- start_frame = instance.data.get("frameStart", 0)
-
in_info['custom_attributes'] = {
- 'handles': int(instance.data.get('handles')),
- 'handle_start': handle_start,
- 'handle_end': handle_end,
- 'fstart': int(instance.data["startFrame"]),
- 'fend': int(instance.data["endFrame"]),
- 'fps': instance.data["fps"],
- "edit_in": int(instance.data["startFrame"]),
- "edit_out": int(instance.data["endFrame"])
+ 'handles': int(instance.data.get('handles', 0)),
+ "handleStart": handle_start,
+ "handleEnd": handle_end,
+ "frameStart": instance.data["frameStart"],
+ "frameEnd": instance.data["frameEnd"],
+ "clipIn": instance.data["clipIn"],
+ "clipOut": instance.data["clipOut"],
+ 'fps': instance.context.data["fps"]
}
- if start_frame is not 0:
- in_info['custom_attributes'].update({
- 'fstart': start_frame,
- 'fend': start_frame + (
- instance.data["endFrame"] - instance.data["startFrame"])
- })
+
# adding SourceResolution if Tag was present
- s_res = instance.data.get("sourceResolution")
- if s_res and instance.data.get("main"):
- item = instance.data["item"]
- self.log.debug("TrackItem: `{0}`".format(
- item))
- width = int(item.source().mediaSource().width())
- height = int(item.source().mediaSource().height())
- self.log.info("Source Width and Height are: `{0} x {1}`".format(
- width, height))
+ if instance.data.get("main"):
in_info['custom_attributes'].update({
- "resolution_width": width,
- "resolution_height": height
+ "resolutionWidth": instance.data["width"],
+ "resolutionHeight": instance.data["height"],
+ "pixelAspect": instance.data["pixelAspect"]
})
in_info['tasks'] = instance.data['tasks']
diff --git a/pype/plugins/nukestudio/publish/collect_leader_clip.py b/pype/plugins/nukestudio/publish/collect_leader_clip.py
new file mode 100644
index 0000000000..62ef420316
--- /dev/null
+++ b/pype/plugins/nukestudio/publish/collect_leader_clip.py
@@ -0,0 +1,24 @@
+from pyblish import api
+
+
+class CollectLeaderClip(api.InstancePlugin):
+ """Collect Leader clip from selected track items. Clip with hierarchy Tag is defining sharable data attributes between other clips with `subset` tags. So `handle_start/end`, `frame_start`, etc"""
+
+ order = api.CollectorOrder + 0.0111
+ label = "Collect Leader Clip"
+ hosts = ["nukestudio"]
+ families = ['clip']
+
+ def process(self, instance):
+ # gets tags
+ tags = instance.data["tags"]
+
+ for t in tags:
+ t_metadata = dict(t["metadata"])
+ t_type = t_metadata.get("tag.label", "")
+ self.log.info("`hierarhy`: `{}`".format(t_type))
+ # gets only task family tags and collect labels
+ if "hierarchy" in t_type.lower():
+ if not instance.data.get("main"):
+ instance.data["main"] = True
+ self.log.info("`Leader Clip` found in instance.name: `{}`".format(instance.data["name"]))
diff --git a/pype/plugins/nukestudio/publish/collect_plates.py b/pype/plugins/nukestudio/publish/collect_plates.py
index abd02bfa78..7f6f4138cb 100644
--- a/pype/plugins/nukestudio/publish/collect_plates.py
+++ b/pype/plugins/nukestudio/publish/collect_plates.py
@@ -54,65 +54,31 @@ class CollectPlates(api.InstancePlugin):
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {} - ({})".format(
- data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[1]
+ data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[
+ 1]
)
- # # Timeline data.
- # handle_start = int(instance.data["handleStart"] + data["handles"])
- # handle_end = int(instance.data["handleEnd"] + data["handles"])
- # Timeline data.
- handle_start = int(instance.data["handleStart"])
- handle_end = int(instance.data["handleEnd"])
-
- source_in_h = data["sourceIn"] - handle_start
- source_out_h = data["sourceOut"] + handle_end
-
- timeline_in = int(data["item"].timelineIn())
- timeline_out = int(data["item"].timelineOut())
-
- timeline_frame_start = timeline_in - handle_start
- timeline_frame_end = timeline_out + handle_end
-
- frame_start = instance.data.get("frameStart", 1)
- frame_end = frame_start + (data["sourceOut"] - data["sourceIn"])
-
- data.update(
- {
- "sourceFirst": data["sourceFirst"],
- "sourceIn": data["sourceIn"],
- "sourceOut": data["sourceOut"],
- "sourceInH": source_in_h,
- "sourceOutH": source_out_h,
- "frameStart": frame_start,
- "startFrame": frame_start,
- "endFrame": frame_end,
- "timelineIn": timeline_in,
- "timelineOut": timeline_out,
- "timelineInHandles": timeline_frame_start,
- "timelineOutHandles": timeline_frame_end,
- "handleStart": handle_start,
- "handleEnd": handle_end
- }
- )
+ if "review" in instance.data["families"]:
+ data["label"] += " - review"
# adding SourceResolution if Tag was present
if instance.data.get("sourceResolution") and instance.data.get("main"):
item = instance.data["item"]
width = int(item.source().mediaSource().width())
height = int(item.source().mediaSource().height())
- self.log.info("Source Width and Height are: `{0} x {1}`".format(
- width, height))
+ pixel_aspect = int(item.source().mediaSource().pixelAspect())
+
+ self.log.info("Source Width and Height are: `{0} x {1} : {2}`".format(
+ width, height, pixel_aspect))
data.update({
"width": width,
- "height": height
+ "height": height,
+ "pixelAspect": pixel_aspect
})
self.log.debug("Creating instance with name: {}".format(data["name"]))
instance.context.create_instance(**data)
- # # remove original instance
- # instance.context.remove(instance)
-
class CollectPlatesData(api.InstancePlugin):
"""Collect plates"""
@@ -124,6 +90,12 @@ class CollectPlatesData(api.InstancePlugin):
def process(self, instance):
import os
+ if "review" in instance.data.get("track", ""):
+ self.log.debug(
+ "Skipping \"{}\" because its `review` track "
+ "\"plate\"".format(instance)
+ )
+ return
# add to representations
if not instance.data.get("representations"):
@@ -135,9 +107,6 @@ class CollectPlatesData(api.InstancePlugin):
padding = int(anatomy.templates['render']['padding'])
name = instance.data["subset"]
- asset = instance.data["asset"]
- track = instance.data["track"]
- version = instance.data["version"]
source_path = instance.data["sourcePath"]
source_file = os.path.basename(source_path)
@@ -154,58 +123,24 @@ class CollectPlatesData(api.InstancePlugin):
item = instance.data["item"]
- # get handles
- handle_start = int(instance.data["handleStart"])
- handle_end = int(instance.data["handleEnd"])
+ transfer_data = [
+ "handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart",
+ "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut",
+ "clipInH", "clipOutH", "asset", "track", "version", "width", "height", "pixelAspect"
+ ]
- # get source frames
- source_in = int(instance.data["sourceIn"])
- source_out = int(instance.data["sourceOut"])
-
- # get source frames
- frame_start = int(instance.data["startFrame"])
- frame_end = int(instance.data["endFrame"])
-
- # get source frames
- source_in_h = int(instance.data["sourceInH"])
- source_out_h = int(instance.data["sourceOutH"])
-
- # get timeline frames
- timeline_in = int(instance.data["timelineIn"])
- timeline_out = int(instance.data["timelineOut"])
-
- # frame-ranges with handles
- timeline_frame_start = int(instance.data["timelineInHandles"])
- timeline_frame_end = int(instance.data["timelineOutHandles"])
-
- # get colorspace
- colorspace = item.sourceMediaColourTransform()
-
- # get sequence from context, and fps
- fps = instance.data["fps"]
+ # pass data to version
+ version_data.update({k: instance.data[k] for k in transfer_data})
# add to data of representation
version_data.update({
- "handles": handle_start,
- "handleStart": handle_start,
- "handleEnd": handle_end,
- "sourceIn": source_in,
- "sourceOut": source_out,
- "startFrame": frame_start,
- "endFrame": frame_end,
- "timelineIn": timeline_in,
- "timelineOut": timeline_out,
- "timelineInHandles": timeline_frame_start,
- "timelineOutHandles": timeline_frame_end,
- "fps": fps,
- "colorspace": colorspace,
+ "handles": version_data['handleStart'],
+ "colorspace": item.sourceMediaColourTransform(),
+ "colorspaceScript": instance.context.data["colorspace"],
"families": [f for f in families if 'ftrack' not in f],
- "asset": asset,
"subset": name,
- "track": track,
- "version": int(version)
+ "fps": instance.context.data["fps"]
})
- instance.data["versionData"] = version_data
try:
basename, ext = os.path.splitext(source_file)
@@ -220,19 +155,20 @@ class CollectPlatesData(api.InstancePlugin):
padding=padding,
ext=ext
)
- self.log.debug("__ source_in_h: {}".format(source_in_h))
- self.log.debug("__ source_out_h: {}".format(source_out_h))
- start_frame = source_first_frame + source_in_h
- duration = source_out_h - source_in_h
+
+ start_frame = source_first_frame + instance.data["sourceInH"]
+ duration = instance.data["sourceOutH"] - instance.data["sourceInH"]
end_frame = start_frame + duration
+ self.log.debug("start_frame: `{}`".format(start_frame))
+ self.log.debug("end_frame: `{}`".format(end_frame))
files = [file % i for i in range(start_frame, (end_frame + 1), 1)]
except Exception as e:
- self.log.debug("Exception in file: {}".format(e))
+ self.log.warning("Exception in file: {}".format(e))
head, ext = os.path.splitext(source_file)
ext = ext[1:]
files = source_file
- start_frame = source_in_h
- end_frame = source_out_h
+ start_frame = instance.data["sourceInH"]
+ end_frame = instance.data["sourceOutH"]
mov_file = head + ".mov"
mov_path = os.path.normpath(os.path.join(staging_dir, mov_file))
@@ -242,10 +178,10 @@ class CollectPlatesData(api.InstancePlugin):
plates_mov_representation = {
'files': mov_file,
'stagingDir': staging_dir,
- 'startFrame': 0,
- 'endFrame': source_out - source_in + 1,
+ "frameStart": 0,
+ "frameEnd": instance.data["sourceOut"] - instance.data["sourceIn"] + 1,
'step': 1,
- 'frameRate': fps,
+ 'fps': instance.context.data["fps"],
'preview': True,
'thumbnail': False,
'name': "preview",
@@ -258,8 +194,8 @@ class CollectPlatesData(api.InstancePlugin):
thumb_file = head + ".png"
thumb_path = os.path.join(staging_dir, thumb_file)
- self.log.debug("__ thumb_path: {}".format(thumb_path))
- thumbnail = item.thumbnail(source_in).save(
+
+ thumbnail = item.thumbnail(instance.data["sourceIn"]).save(
thumb_path,
format='png'
)
@@ -276,16 +212,41 @@ class CollectPlatesData(api.InstancePlugin):
thumb_representation)
# adding representation for plates
+ frame_start = instance.data["frameStart"] - \
+ instance.data["handleStart"]
+ frame_end = instance.data["frameEnd"] + instance.data["handleEnd"]
+
+ # exception for retimes
+ if instance.data.get("retime"):
+ source_in_h = instance.data["sourceInH"]
+ source_in = instance.data["sourceIn"]
+ source_handle_start = source_in_h - source_in
+ frame_start = instance.data["frameStart"] + source_handle_start
+ duration = instance.data["sourceOutH"] - instance.data["sourceInH"]
+ frame_end = frame_start + duration
+
plates_representation = {
'files': files,
'stagingDir': staging_dir,
'name': ext,
'ext': ext,
- 'startFrame': frame_start - handle_start,
- 'endFrame': frame_end + handle_end,
+ "frameStart": frame_start,
+ "frameEnd": frame_end,
}
instance.data["representations"].append(plates_representation)
+ # deal with retimed clip
+ if instance.data.get("retime"):
+ version_data.update({
+ "retime": True,
+ "speed": instance.data.get("speed", 1),
+ "timewarps": instance.data.get("timeWarpNodes", []),
+ "frameStart": frame_start,
+ "frameEnd": frame_end,
+ })
+
+ instance.data["versionData"] = version_data
+
# testing families
family = instance.data["family"]
families = instance.data["families"]
diff --git a/pype/plugins/nukestudio/publish/collect_remove_clip_instances.py b/pype/plugins/nukestudio/publish/collect_remove_clip_instances.py
new file mode 100644
index 0000000000..d41dc50ab1
--- /dev/null
+++ b/pype/plugins/nukestudio/publish/collect_remove_clip_instances.py
@@ -0,0 +1,17 @@
+from pyblish import api
+
+class CollectClipSubsets(api.InstancePlugin):
+ """Collect Subsets from selected Clips, Tags, Preset."""
+
+ order = api.CollectorOrder + 0.103
+ label = "Collect Remove Clip Instaces"
+ hosts = ["nukestudio"]
+ families = ['clip']
+
+ def process(self, instance):
+ context = instance.context
+
+ # removing original instance
+ self.log.info("Removing instance.name: `{}`".format(instance.data["name"]))
+
+ context.remove(instance)
diff --git a/pype/plugins/nukestudio/publish/collect_reviews.py b/pype/plugins/nukestudio/publish/collect_reviews.py
index 7b18c605a7..9fab0f0741 100644
--- a/pype/plugins/nukestudio/publish/collect_reviews.py
+++ b/pype/plugins/nukestudio/publish/collect_reviews.py
@@ -13,7 +13,7 @@ class CollectReviews(api.InstancePlugin):
"""
# Run just before CollectSubsets
- order = api.CollectorOrder + 0.1025
+ order = api.CollectorOrder + 0.1022
label = "Collect Reviews"
hosts = ["nukestudio"]
families = ["clip"]
@@ -41,30 +41,22 @@ class CollectReviews(api.InstancePlugin):
)
return
+ # add to representations
+ if not instance.data.get("representations"):
+ instance.data["representations"] = list()
+
if track in instance.data["track"]:
- self.log.debug("Track item on the track: {}".format(
- instance.data["track"]))
- # Collect data.
- subset = ""
- data = {}
- for key, value in instance.data.iteritems():
- data[key] = value
+ self.log.debug("Review will work on `subset`: {}".format(
+ instance.data["subset"]))
- data["family"] = family.lower()
- data["ftrackFamily"] = "img"
- data["families"] = ["ftrack"]
+ # change families
+ instance.data["family"] = "plate"
+ instance.data["families"] = ["review", "ftrack"]
- data["subset"] = family.lower() + subset.title()
- data["name"] = data["subset"] + "_" + data["asset"]
+ self.version_data(instance)
+ self.create_thumbnail(instance)
- data["label"] = "{} - {}".format(
- data['asset'], data["subset"]
- )
-
- data["source"] = data["sourcePath"]
-
- # self.log.debug("Creating instance with data: {}".format(data))
- instance.context.create_instance(**data)
+ rev_inst = instance
else:
self.log.debug("Track item on plateMain")
@@ -80,35 +72,89 @@ class CollectReviews(api.InstancePlugin):
"TrackItem from track name `{}` has to be also selected".format(
track)
)
-
- # add to representations
- if not instance.data.get("representations"):
- instance.data["representations"] = list()
-
- self.log.debug("Instance review: {}".format(rev_inst.data["name"]))
-
- # getting file path parameters
- file_path = rev_inst.data.get("sourcePath")
- file_dir = os.path.dirname(file_path)
- file = os.path.basename(file_path)
- ext = os.path.splitext(file)[-1][1:]
-
- # adding annotation to lablel
- instance.data["label"] += " + review (.{})".format(ext)
instance.data["families"].append("review")
- # adding representation for review mov
- representation = {
- "files": file,
- "stagingDir": file_dir,
- "startFrame": rev_inst.data.get("sourceIn"),
- "endFrame": rev_inst.data.get("sourceOut"),
- "step": 1,
- "frameRate": rev_inst.data.get("fps"),
- "preview": True,
- "thumbnail": False,
- "name": "preview",
- "ext": ext
- }
- instance.data["representations"].append(representation)
- self.log.debug("Added representation: {}".format(representation))
+ file_path = rev_inst.data.get("sourcePath")
+ file_dir = os.path.dirname(file_path)
+ file = os.path.basename(file_path)
+ ext = os.path.splitext(file)[-1][1:]
+
+ # change label
+ instance.data["label"] = "{0} - {1} - ({2}) - review".format(
+ instance.data['asset'], instance.data["subset"], ext
+ )
+
+ self.log.debug("Instance review: {}".format(rev_inst.data["name"]))
+
+
+ # adding representation for review mov
+ representation = {
+ "files": file,
+ "stagingDir": file_dir,
+ "frameStart": rev_inst.data.get("sourceIn"),
+ "frameEnd": rev_inst.data.get("sourceOut"),
+ "step": 1,
+ "fps": rev_inst.data.get("fps"),
+ "preview": True,
+ "thumbnail": False,
+ "name": "preview",
+ "ext": ext
+ }
+ instance.data["representations"].append(representation)
+
+ self.log.debug("Added representation: {}".format(representation))
+
+ def create_thumbnail(self, instance):
+ item = instance.data["item"]
+ source_in = instance.data["sourceIn"]
+
+ source_path = instance.data["sourcePath"]
+ source_file = os.path.basename(source_path)
+ head, ext = os.path.splitext(source_file)
+
+ # staging dir creation
+ staging_dir = os.path.dirname(
+ source_path)
+
+ thumb_file = head + ".png"
+ thumb_path = os.path.join(staging_dir, thumb_file)
+ self.log.debug("__ thumb_path: {}".format(thumb_path))
+ self.log.debug("__ source_in: {}".format(source_in))
+ thumbnail = item.thumbnail(source_in).save(
+ thumb_path,
+ format='png'
+ )
+ self.log.debug("__ thumbnail: {}".format(thumbnail))
+
+ thumb_representation = {
+ 'files': thumb_file,
+ 'stagingDir': staging_dir,
+ 'name': "thumbnail",
+ 'thumbnail': True,
+ 'ext': "png"
+ }
+ instance.data["representations"].append(
+ thumb_representation)
+
+ def version_data(self, instance):
+ item = instance.data["item"]
+
+ transfer_data = [
+ "handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart", "frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track", "version"
+ ]
+
+ version_data = dict()
+ # pass data to version
+ version_data.update({k: instance.data[k] for k in transfer_data})
+
+ # add to data of representation
+ version_data.update({
+ "handles": version_data['handleStart'],
+ "colorspace": item.sourceMediaColourTransform(),
+ "families": instance.data["families"],
+ "subset": instance.data["subset"],
+ "fps": instance.context.data["fps"]
+ })
+ instance.data["versionData"] = version_data
+
+ instance.data["source"] = instance.data["sourcePath"]
diff --git a/pype/plugins/nukestudio/publish/collect_selection.py b/pype/plugins/nukestudio/publish/collect_selection.py
index e87f9d03ec..28a529d560 100644
--- a/pype/plugins/nukestudio/publish/collect_selection.py
+++ b/pype/plugins/nukestudio/publish/collect_selection.py
@@ -14,12 +14,4 @@ class CollectSelection(pyblish.api.ContextPlugin):
self.log.debug("selection: {}".format(selection))
- if not selection:
- self.log.debug(
- "Nothing is selected. Collecting all items from sequence "
- "\"{}\"".format(hiero.ui.activeSequence())
- )
- for track in hiero.ui.activeSequence().items():
- selection.extend(track.items())
-
context.data["selection"] = selection
diff --git a/pype/plugins/nukestudio/publish/collect_shots.py b/pype/plugins/nukestudio/publish/collect_shots.py
index 26a4c7fb6b..c1fcf05b89 100644
--- a/pype/plugins/nukestudio/publish/collect_shots.py
+++ b/pype/plugins/nukestudio/publish/collect_shots.py
@@ -1,74 +1,48 @@
from pyblish import api
-class CollectShots(api.ContextPlugin):
+class CollectShots(api.InstancePlugin):
"""Collect Shot from Clip."""
# Run just before CollectClipSubsets
- order = api.CollectorOrder + 0.1025
+ order = api.CollectorOrder + 0.1021
label = "Collect Shots"
hosts = ["nukestudio"]
families = ["clip"]
- def process(self, context):
- for instance in context[:]:
- # Exclude non-tagged instances.
- tagged = False
- for tag in instance.data["tags"]:
- if tag["name"].lower() == "hierarchy":
- tagged = True
+ def process(self, instance):
+ self.log.debug(
+ "Skipping \"{}\" because its not tagged with "
+ "\"Hierarchy\"".format(instance))
+ # Exclude non-tagged instances.
+ tagged = False
+ for tag in instance.data["tags"]:
+ if tag["name"].lower() == "hierarchy":
+ tagged = True
- if not tagged:
- self.log.debug(
- "Skipping \"{}\" because its not tagged with "
- "\"Hierarchy\"".format(instance)
- )
- continue
-
- # Collect data.
- data = {}
- for key, value in instance.data.iteritems():
- data[key] = value
-
- data["family"] = "shot"
- data["families"] = []
- data["frameStart"] = instance.data.get("frameStart", 1)
-
- data["subset"] = data["family"] + "Main"
-
- data["name"] = data["subset"] + "_" + data["asset"]
-
- data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
- data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
+ if not tagged:
+ self.log.debug(
+ "Skipping \"{}\" because its not tagged with "
+ "\"Hierarchy\"".format(instance)
)
+ return
- # Get handles.
- data["handleStart"] = instance.data["handleStart"]
- data["handleEnd"] = instance.data["handleEnd"]
+ # Collect data.
+ data = {}
+ for key, value in instance.data.iteritems():
+ data[key] = value
- # Frame-ranges with handles.
- data["sourceInH"] = data["sourceIn"] - data["handleStart"]
- data["sourceOutH"] = data["sourceOut"] + data["handleEnd"]
+ data["family"] = "shot"
+ data["families"] = []
- # Get timeline frames.
- data["timelineIn"] = int(data["item"].timelineIn())
- data["timelineOut"] = int(data["item"].timelineOut())
+ data["subset"] = data["family"] + "Main"
- # Frame-ranges with handles.
- data["timelineInHandles"] = data["timelineIn"]
- data["timelineInHandles"] -= data["handleStart"]
- data["timelineOutHandles"] = data["timelineOut"]
- data["timelineOutHandles"] += data["handleEnd"]
+ data["name"] = data["subset"] + "_" + data["asset"]
- # Creating comp frame range.
- data["endFrame"] = (
- data["frameStart"] + (data["sourceOut"] - data["sourceIn"])
- )
+ data["label"] = data["asset"] + " - " + data["subset"] + " - tasks: {} - assetbuilds: {}".format(
+ data["tasks"], [x["name"] for x in data.get("assetbuilds", [])]
+ )
- # Get fps.
- sequence = instance.context.data["activeSequence"]
- data["fps"] = sequence.framerate()
-
- # Create instance.
- self.log.debug("Creating instance with: {}".format(data["name"]))
- instance.context.create_instance(**data)
+ # Create instance.
+ self.log.debug("Creating instance with: {}".format(data["name"]))
+ instance.context.create_instance(**data)
diff --git a/pype/plugins/nukestudio/publish/collect_subsets.py b/pype/plugins/nukestudio/publish/collect_subsets.py
deleted file mode 100644
index 95476b4db7..0000000000
--- a/pype/plugins/nukestudio/publish/collect_subsets.py
+++ /dev/null
@@ -1,208 +0,0 @@
-from pyblish import api
-from copy import deepcopy
-
-
-class CollectClipSubsets(api.InstancePlugin):
- """Collect Subsets from selected Clips, Tags, Preset."""
-
- order = api.CollectorOrder + 0.103
- label = "Collect Subsets"
- hosts = ["nukestudio"]
- families = ['clip']
-
- def process(self, instance):
- context = instance.context
-
- asset_name = instance.data["asset"]
-
- # get all subsets from tags and match them with nks_presets >
- # > looks to rules for tasks, subsets, representations
- subsets_collection = self.get_subsets_from_presets(instance)
-
- # iterate trough subsets and create instances
- for subset, attrs in subsets_collection.items():
- self.log.info((subset, attrs))
- # create families
- item = instance.data["item"]
- family = instance.data["family"]
- families = attrs["families"] + [str(subset)]
- task = attrs["task"]
- subset = "{0}{1}".format(
- subset,
- instance.data.get("subsetType") or "Default")
- instance_name = "{0}_{1}_{2}".format(asset_name, task, subset)
- self.log.info("Creating instance with name: {}".format(
- instance_name))
-
- # get handles
- handles = int(instance.data["handles"])
- handle_start = int(instance.data["handleStart"] + handles)
- handle_end = int(instance.data["handleEnd"] + handles)
-
- # get source frames
- source_first = int(instance.data["sourceFirst"])
- source_in = int(instance.data["sourceIn"])
- source_out = int(instance.data["sourceOut"])
-
- # frame-ranges with handles
- source_in_h = source_in - handle_start
- source_out_h = source_out + handle_end
-
- # get timeline frames
- timeline_in = int(item.timelineIn())
- timeline_out = int(item.timelineOut())
-
- # frame-ranges with handles
- timeline_frame_start = timeline_in - handle_start
- timeline_frame_end = timeline_out + handle_end
-
- # creating comp frame range
- frame_start = instance.data["frameStart"]
- frame_end = frame_start + (source_out - source_in)
-
- # get sequence from context, and fps
- sequence = context.data["activeSequence"]
- fps = sequence.framerate()
-
- context.create_instance(
- name=instance_name,
- subset=subset,
- asset=asset_name,
- track=instance.data.get("track"),
- item=item,
- task=task,
- sourcePath=instance.data.get("sourcePath"),
- family=family,
- families=families,
- sourceFirst=source_first,
- sourceIn=source_in,
- sourceOut=source_out,
- sourceInH=source_in_h,
- sourceOutH=source_out_h,
- frameStart=frame_start,
- startFrame=frame_start,
- endFrame=frame_end,
- timelineIn=timeline_in,
- timelineOut=timeline_out,
- timelineInHandles=timeline_frame_start,
- timelineOutHandles=timeline_frame_end,
- fps=fps,
- handles=instance.data["handles"],
- handleStart=handle_start,
- handleEnd=handle_end,
- attributes=attrs,
- version=instance.data["version"],
- hierarchy=instance.data.get("hierarchy", None),
- parents=instance.data.get("parents", None),
- publish=True
- )
-
- # removing original instance
- context.remove(instance)
-
- def get_subsets_from_presets(self, instance):
-
- family = instance.data["family"]
- # get presets and tags
- tag_tasks = instance.data["tasks"]
- presets = instance.context.data['presets']
- nks_presets = presets[instance.context.data['host']]
- family_default_preset = nks_presets["asset_default"].get(family)
-
- if family_default_preset:
- frame_start = family_default_preset.get("fstart", 1)
- instance.data["frameStart"] = int(frame_start)
-
- # get specific presets
- pr_host_tasks = deepcopy(
- nks_presets["rules_tasks"]).get("hostTasks", None)
-
- subsets_collect = dict()
- # iterate tags and collect subset properities from presets
- for task in tag_tasks:
- self.log.info("__ task: {}".format(task))
- try:
- # get host for task
- host = None
- host = [h for h, tasks in pr_host_tasks.items()
- if task in tasks][0]
- except IndexError:
- pass
-
- try:
- # get subsets for task
- subsets = None
- #subsets = pr_host_subsets[host]
- except KeyError:
- pass
-
- if not subsets:
- continue
-
- # get subsets for task
- for sub in subsets:
- # get specific presets
- pr_subsets = deepcopy(nks_presets["rules_subsets"])
- pr_representations = deepcopy(
- nks_presets["rules_representations"])
-
- # initialise collection dictionary
- subs_data = dict()
-
- # gets subset properities
- subs_data[sub] = None
- subs_data[sub] = pr_subsets.get(sub, None)
-
- # gets representation if in keys
- if subs_data[sub] and (
- "representation" in subs_data[sub].keys()
- ):
- repr_name = subs_data[sub]["representation"]
-
- # owerwrite representation key with values from preset
- subs_data[sub]["representation"] = pr_representations[
- repr_name
- ]
- subs_data[sub]["representation"]["name"] = repr_name
-
- # gets nodes and presets data if in keys
- # gets nodes if any
- if subs_data[sub] and (
- "nodes" in subs_data[sub].keys()
- ):
- # iterate trough each node
- for k in subs_data[sub]["nodes"]:
- pr_node = k
- pr_family = subs_data[sub]["nodes"][k]["family"]
-
- # create attribute dict for later filling
- subs_data[sub]["nodes"][k]["attributes"] = dict()
-
- # iterate presets for the node
- for p, path in subs_data[sub]["nodes"][k][
- "presets"].items():
-
- # adds node type and family for preset path
- nPath = path + [pr_node, pr_family]
-
- # create basic iternode to be wolked trough until
- # found presets at the end
- iternode = presets[p]
- for part in nPath:
- iternode = iternode[part]
-
- iternode = {k: v for k, v in iternode.items()
- if not k.startswith("_")}
- # adds found preset to attributes of the node
- subs_data[sub]["nodes"][k][
- "attributes"].update(iternode)
-
- # removes preset key
- subs_data[sub]["nodes"][k].pop("presets")
-
- # add all into dictionary
- self.log.info("__ subs_data[sub]: {}".format(subs_data[sub]))
- subs_data[sub]["task"] = task.lower()
- subsets_collect.update(subs_data)
-
- return subsets_collect
diff --git a/pype/plugins/nukestudio/publish/collect_tag_framestart.py b/pype/plugins/nukestudio/publish/collect_tag_framestart.py
index 244a86e9f4..1342d996ab 100644
--- a/pype/plugins/nukestudio/publish/collect_tag_framestart.py
+++ b/pype/plugins/nukestudio/publish/collect_tag_framestart.py
@@ -1,5 +1,5 @@
from pyblish import api
-
+import os
class CollectClipTagFrameStart(api.InstancePlugin):
"""Collect FrameStart from Tags of selected track items."""
@@ -19,5 +19,22 @@ class CollectClipTagFrameStart(api.InstancePlugin):
# gets only task family tags and collect labels
if "frameStart" in t_family:
- t_number = t_metadata.get("tag.number", "")
- instance.data["frameStart"] = int(t_number)
+ t_value = t_metadata.get("tag.value", None)
+
+ # backward compatibility
+ t_number = t_metadata.get("tag.number", None)
+ start_frame = t_number or t_value
+
+ try:
+ start_frame = int(start_frame)
+ except ValueError:
+ if "source" in t_value:
+ source_first = instance.data["sourceFirst"]
+ source_in = instance.data["sourceIn"]
+ handle_start = instance.data["handleStart"]
+ start_frame = (source_first + source_in) - handle_start
+
+ instance.data["startingFrame"] = start_frame
+ self.log.info("Start frame on `{0}` set to `{1}`".format(
+ instance, start_frame
+ ))
diff --git a/pype/plugins/nukestudio/publish/collect_tag_main.py b/pype/plugins/nukestudio/publish/collect_tag_main.py
deleted file mode 100644
index 36d9b95554..0000000000
--- a/pype/plugins/nukestudio/publish/collect_tag_main.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from pyblish import api
-
-
-class CollectClipTagTypes(api.InstancePlugin):
- """Collect Types from Tags of selected track items."""
-
- order = api.CollectorOrder + 0.012
- label = "Collect main flag"
- hosts = ["nukestudio"]
- families = ['clip']
-
- def process(self, instance):
- # gets tags
- tags = instance.data["tags"]
-
- for t in tags:
- t_metadata = dict(t["metadata"])
- t_family = t_metadata.get("tag.family", "")
-
- # gets only task family tags and collect labels
- if "plate" in t_family:
- t_subset = t_metadata.get("tag.subset", "")
- subset_name = "{0}{1}".format(
- t_family,
- t_subset.capitalize())
-
- if "plateMain" in subset_name:
- if not instance.data.get("main"):
- instance.data["main"] = True
- self.log.info("`plateMain` found in instance.name: `{}`".format(
- instance.data["name"]))
- return
diff --git a/pype/plugins/nukestudio/publish/collect_tag_retime.py b/pype/plugins/nukestudio/publish/collect_tag_retime.py
new file mode 100644
index 0000000000..32e49e1b2a
--- /dev/null
+++ b/pype/plugins/nukestudio/publish/collect_tag_retime.py
@@ -0,0 +1,32 @@
+from pyblish import api
+
+
+class CollectTagRetime(api.InstancePlugin):
+ """Collect Retiming from Tags of selected track items."""
+
+ order = api.CollectorOrder + 0.014
+ label = "Collect Retiming Tag"
+ hosts = ["nukestudio"]
+ families = ['clip']
+
+ def process(self, instance):
+ # gets tags
+ tags = instance.data["tags"]
+
+ for t in tags:
+ t_metadata = dict(t["metadata"])
+ t_family = t_metadata.get("tag.family", "")
+
+ # gets only task family tags and collect labels
+ if "retiming" in t_family:
+ margin_in = t_metadata.get("tag.marginIn", "")
+ margin_out = t_metadata.get("tag.marginOut", "")
+
+ instance.data["retimeMarginIn"] = int(margin_in)
+ instance.data["retimeMarginOut"] = int(margin_out)
+ instance.data["retime"] = True
+
+ self.log.info("retimeMarginIn: `{}`".format(margin_in))
+ self.log.info("retimeMarginOut: `{}`".format(margin_out))
+
+ instance.data["families"] += ["retime"]
diff --git a/pype/plugins/nukestudio/publish/collect_tag_subsets.py b/pype/plugins/nukestudio/publish/collect_tag_subsets.py
new file mode 100644
index 0000000000..0d42000896
--- /dev/null
+++ b/pype/plugins/nukestudio/publish/collect_tag_subsets.py
@@ -0,0 +1,28 @@
+from pyblish import api
+
+
+class CollectClipSubsetsTags(api.InstancePlugin):
+ """Collect Subsets from Tags of selected track items."""
+
+ order = api.CollectorOrder + 0.012
+ label = "Collect Tags Subsets"
+ hosts = ["nukestudio"]
+ families = ['clip']
+
+ def process(self, instance):
+ # gets tags
+ tags = instance.data["tags"]
+
+ for t in tags:
+ t_metadata = dict(t["metadata"])
+ t_family = t_metadata.get("tag.family", None)
+ t_subset = t_metadata.get("tag.subset", None)
+
+ # gets only task family tags and collect labels
+ if t_subset and t_family:
+ subset_name = "{0}{1}".format(
+ t_family,
+ t_subset.capitalize())
+ instance.data['subset'] = subset_name
+
+ self.log.info("`subset`: {0} found in `instance.name`: `{1}`".format(subset_name, instance.data["name"]))
diff --git a/pype/plugins/nukestudio/publish/extract_effects.py b/pype/plugins/nukestudio/publish/extract_effects.py
new file mode 100644
index 0000000000..5e2d95d943
--- /dev/null
+++ b/pype/plugins/nukestudio/publish/extract_effects.py
@@ -0,0 +1,231 @@
+# from pype import plugins
+import os
+import json
+import re
+import pyblish.api
+import tempfile
+from avalon import io, api
+
+class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
+ """Collect video tracks effects into context."""
+
+ order = pyblish.api.ExtractorOrder
+ label = "Export Soft Lut Effects"
+ families = ["lut"]
+
+ def process(self, instance):
+ item = instance.data["item"]
+ effects = instance.data.get("effectTrackItems")
+
+ instance.data["families"] = [f for f in instance.data.get("families", []) if f not in ["lut"]]
+
+ self.log.debug("___ instance.data[families]: `{}`".format(instance.data["families"]))
+
+ # skip any without effects
+ if not effects:
+ return
+
+ subset = instance.data.get("subset")
+ subset_split = re.findall(r'[A-Z][^A-Z]*', subset)
+
+ if len(subset_split) > 0:
+ root_name = subset.replace(subset_split[0], "")
+ subset_split.insert(0, root_name.capitalize())
+
+ subset_split.insert(0, "lut")
+
+ self.log.debug("creating staging dir")
+ # staging_dir = self.staging_dir(instance)
+
+ # TODO: only provisory will be replace by function
+ staging_dir = instance.data.get('stagingDir', None)
+
+ if not staging_dir:
+ staging_dir = os.path.normpath(
+ tempfile.mkdtemp(prefix="pyblish_tmp_")
+ )
+ instance.data['stagingDir'] = staging_dir
+
+ self.log.debug("creating staging dir: `{}`".format(staging_dir))
+
+ transfers = list()
+ if "transfers" not in instance.data:
+ instance.data["transfers"] = list()
+
+ name = "".join(subset_split)
+ ext = "json"
+ file = name + "." + ext
+
+ # create new instance and inherit data
+ data = {}
+ for key, value in instance.data.iteritems():
+ data[key] = value
+
+ # change names
+ data["subset"] = name
+ data["family"] = "lut"
+ data["families"] = []
+ data["name"] = data["subset"] + "_" + data["asset"]
+ data["label"] = "{} - {} - ({})".format(
+ data['asset'], data["subset"], os.path.splitext(file)[1]
+ )
+ data["source"] = data["sourcePath"]
+
+ # create new instance
+ instance = instance.context.create_instance(**data)
+
+ dst_dir = self.resource_destination_dir(instance)
+
+ # change paths in effects to files
+ for k, effect in effects["effects"].items():
+ trn = self.copy_linked_files(effect, dst_dir)
+ if trn:
+ transfers.append((trn[0], trn[1]))
+
+ instance.data["transfers"].extend(transfers)
+ self.log.debug("_ transfers: `{}`".format(
+ instance.data["transfers"]))
+
+ # create representations
+ instance.data["representations"] = list()
+
+ transfer_data = [
+ "handleStart", "handleEnd", "sourceIn", "sourceOut",
+ "frameStart", "frameEnd", "sourceInH", "sourceOutH",
+ "clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track",
+ "version"
+ ]
+
+ # pass data to version
+ version_data = dict()
+ version_data.update({k: instance.data[k] for k in transfer_data})
+
+ # add to data of representation
+ version_data.update({
+ "handles": version_data['handleStart'],
+ "colorspace": item.sourceMediaColourTransform(),
+ "colorspaceScript": instance.context.data["colorspace"],
+ "families": ["plate", "lut"],
+ "subset": name,
+ "fps": instance.context.data["fps"]
+ })
+ instance.data["versionData"] = version_data
+
+ representation = {
+ 'files': file,
+ 'stagingDir': staging_dir,
+ 'name': "lut" + ext.title(),
+ 'ext': ext
+ }
+ instance.data["representations"].append(representation)
+
+ self.log.debug("_ representations: `{}`".format(
+ instance.data["representations"]))
+
+ self.log.debug("_ version_data: `{}`".format(
+ instance.data["versionData"]))
+
+ with open(os.path.join(staging_dir, file), "w") as outfile:
+ outfile.write(json.dumps(effects, indent=4, sort_keys=True))
+
+ return
+
+ def copy_linked_files(self, effect, dst_dir):
+ for k, v in effect["node"].items():
+ if k in "file" and v is not '':
+ base_name = os.path.basename(v)
+ dst = os.path.join(dst_dir, base_name).replace("\\", "/")
+
+ # add it to the json
+ effect["node"][k] = dst
+ return (v, dst)
+
+ def resource_destination_dir(self, instance):
+ anatomy = instance.context.data['anatomy']
+ self.create_destination_template(instance, anatomy)
+
+ return os.path.join(
+ instance.data["assumedDestination"],
+ "resources"
+ )
+
+ def create_destination_template(self, instance, anatomy):
+ """Create a filepath based on the current data available
+
+ Example template:
+ {root}/{project}/{silo}/{asset}/publish/{subset}/v{version:0>3}/
+ {subset}.{representation}
+ Args:
+ instance: the instance to publish
+
+ Returns:
+ file path (str)
+ """
+
+ # get all the stuff from the database
+ subset_name = instance.data["subset"]
+ self.log.info(subset_name)
+ asset_name = instance.data["asset"]
+ project_name = api.Session["AVALON_PROJECT"]
+ a_template = anatomy.templates
+
+ project = io.find_one({"type": "project",
+ "name": project_name},
+ projection={"config": True, "data": True})
+
+ template = a_template['publish']['path']
+ # anatomy = instance.context.data['anatomy']
+
+ asset = io.find_one({"type": "asset",
+ "name": asset_name,
+ "parent": project["_id"]})
+
+ assert asset, ("No asset found by the name '{}' "
+ "in project '{}'".format(asset_name, project_name))
+ silo = asset['silo']
+
+ subset = io.find_one({"type": "subset",
+ "name": subset_name,
+ "parent": asset["_id"]})
+
+ # assume there is no version yet, we start at `1`
+ version = None
+ version_number = 1
+ if subset is not None:
+ version = io.find_one({"type": "version",
+ "parent": subset["_id"]},
+ sort=[("name", -1)])
+
+ # if there is a subset there ought to be version
+ if version is not None:
+ version_number += version["name"]
+
+ if instance.data.get('version'):
+ version_number = int(instance.data.get('version'))
+
+ padding = int(a_template['render']['padding'])
+
+ hierarchy = asset['data']['parents']
+ if hierarchy:
+ # hierarchy = os.path.sep.join(hierarchy)
+ hierarchy = "/".join(hierarchy)
+
+ template_data = {"root": api.Session["AVALON_PROJECTS"],
+ "project": {"name": project_name,
+ "code": project['data']['code']},
+ "silo": silo,
+ "family": instance.data['family'],
+ "asset": asset_name,
+ "subset": subset_name,
+ "frame": ('#' * padding),
+ "version": version_number,
+ "hierarchy": hierarchy,
+ "representation": "TEMP"}
+
+ instance.data["assumedTemplateData"] = template_data
+ self.log.info(template_data)
+ instance.data["template"] = template
+ # We take the parent folder of representation 'filepath'
+ instance.data["assumedDestination"] = os.path.dirname(
+ anatomy.format(template_data)["publish"]["path"]
+ )
diff --git a/pype/premiere/__init__.py b/pype/premiere/__init__.py
index a331ef6514..05dca3d277 100644
--- a/pype/premiere/__init__.py
+++ b/pype/premiere/__init__.py
@@ -6,6 +6,7 @@ from avalon import api as avalon
from pyblish import api as pyblish
from pypeapp import Logger
from .. import api
+from pype.aport.lib import set_avalon_workdir
from ..widgets.message_window import message
@@ -75,7 +76,7 @@ def extensions_sync():
def install():
- api.set_avalon_workdir()
+ set_avalon_workdir()
log.info("Registering Premiera plug-ins..")
reg_paths = request_aport("/api/register_plugin_path",
diff --git a/pype/scripts/fusion_switch_shot.py b/pype/scripts/fusion_switch_shot.py
index 92271a3b7c..26a93b9b9a 100644
--- a/pype/scripts/fusion_switch_shot.py
+++ b/pype/scripts/fusion_switch_shot.py
@@ -138,8 +138,8 @@ def update_frame_range(comp, representations):
versions = io.find({"type": "version", "_id": {"$in": version_ids}})
versions = list(versions)
- start = min(v["data"]["startFrame"] for v in versions)
- end = max(v["data"]["endFrame"] for v in versions)
+ start = min(v["data"]["frameStart"] for v in versions)
+ end = max(v["data"]["frameEnd"] for v in versions)
fusion_lib.update_frame_range(start, end, comp=comp)
diff --git a/pype/scripts/publish_filesequence.py b/pype/scripts/publish_filesequence.py
index 123c35cf70..25ed4135c3 100644
--- a/pype/scripts/publish_filesequence.py
+++ b/pype/scripts/publish_filesequence.py
@@ -51,11 +51,18 @@ def __main__():
elif platform.system().lower() == "windows":
pype_command = "pype.bat"
- args = [os.path.join(pype_root, pype_command),
- "--node", "--publish", "--paths", " ".join(paths)]
+ args = [
+ os.path.join(pype_root, pype_command),
+ "publish",
+ " ".join(paths)
+ ]
print("Pype command: {}".format(" ".join(args)))
- subprocess.call(args, shell=True)
+ # Forcing forwaring the environment because environment inheritance does
+ # not always work.
+ exit_code = subprocess.call(args, env=os.environ)
+ if exit_code != 0:
+ raise ValueError("Publishing failed.")
if __name__ == '__main__':
diff --git a/pype/services/timers_manager/timers_manager.py b/pype/services/timers_manager/timers_manager.py
index e1980d3d90..2259dfc34d 100644
--- a/pype/services/timers_manager/timers_manager.py
+++ b/pype/services/timers_manager/timers_manager.py
@@ -78,7 +78,15 @@ class TimersManager(metaclass=Singleton):
'task_name': 'Lookdev BG'
}
'''
+ if len(data['hierarchy']) < 1:
+ self.log.error((
+ 'Not allowed action in Pype!!'
+ ' Timer has been launched on task which is child of Project.'
+ ))
+ return
+
self.last_task = data
+
for module in self.modules:
module.start_timer_manager(data)
self.is_running = True
diff --git a/pype/standalonepublish/widgets/widget_component_item.py b/pype/standalonepublish/widgets/widget_component_item.py
index a58a292ec5..9631fed258 100644
--- a/pype/standalonepublish/widgets/widget_component_item.py
+++ b/pype/standalonepublish/widgets/widget_component_item.py
@@ -301,11 +301,11 @@ class ComponentItem(QtWidgets.QFrame):
'preview': self.is_preview()
}
- if ('startFrame' in self.in_data and 'endFrame' in self.in_data):
- data['startFrame'] = self.in_data['startFrame']
- data['endFrame'] = self.in_data['endFrame']
+ if ("frameStart" in self.in_data and "frameEnd" in self.in_data):
+ data["frameStart"] = self.in_data["frameStart"]
+ data["frameEnd"] = self.in_data["frameEnd"]
- if 'frameRate' in self.in_data:
- data['frameRate'] = self.in_data['frameRate']
+ if 'fps' in self.in_data:
+ data['fps'] = self.in_data['fps']
return data
diff --git a/pype/standalonepublish/widgets/widget_drop_frame.py b/pype/standalonepublish/widgets/widget_drop_frame.py
index c792813a22..e60db892db 100644
--- a/pype/standalonepublish/widgets/widget_drop_frame.py
+++ b/pype/standalonepublish/widgets/widget_drop_frame.py
@@ -185,8 +185,8 @@ class DropDataFrame(QtWidgets.QFrame):
'name': file_base,
'ext': file_ext,
'file_info': range,
- 'startFrame': startFrame,
- 'endFrame': endFrame,
+ "frameStart": startFrame,
+ "frameEnd": endFrame,
'representation': repr_name,
'folder_path': folder_path,
'is_sequence': True,
@@ -253,24 +253,24 @@ class DropDataFrame(QtWidgets.QFrame):
ext in self.presets['extensions']['video_file']
):
probe_data = self.load_data_with_probe(filepath)
- if 'frameRate' not in data:
+ if 'fps' not in data:
# default value
- frameRate = 25
- frameRate_string = probe_data.get('r_frame_rate')
- if frameRate_string:
- frameRate = int(frameRate_string.split('/')[0])
+ fps = 25
+ fps_string = probe_data.get('r_frame_rate')
+ if fps_string:
+ fps = int(fps_string.split('/')[0])
- output['frameRate'] = frameRate
+ output['fps'] = fps
- if 'startFrame' not in data or 'endFrame' not in data:
+ if "frameStart" not in data or "frameEnd" not in data:
startFrame = endFrame = 1
endFrame_string = probe_data.get('nb_frames')
if endFrame_string:
endFrame = int(endFrame_string)
- output['startFrame'] = startFrame
- output['endFrame'] = endFrame
+ output["frameStart"] = startFrame
+ output["frameEnd"] = endFrame
if (ext == '.mov') and (not file_info):
file_info = probe_data.get('codec_name')
diff --git a/pype/templates.py b/pype/templates.py
deleted file mode 100644
index 9fe2e8c68c..0000000000
--- a/pype/templates.py
+++ /dev/null
@@ -1,246 +0,0 @@
-import os
-import re
-import sys
-from avalon import io, api as avalon, lib as avalonlib
-from . import lib
-# from pypeapp.api import (Templates, Logger, format)
-from pypeapp import Logger, Anatomy
-log = Logger().get_logger(__name__, os.getenv("AVALON_APP", "pype-config"))
-
-
-self = sys.modules[__name__]
-self.SESSION = None
-
-
-def set_session():
- lib.set_io_database()
- self.SESSION = avalon.session
-
-
-def get_version_from_path(file):
- """
- Finds version number in file path string
-
- Args:
- file (string): file path
-
- Returns:
- v: version number in string ('001')
-
- """
- pattern = re.compile(r"[\._]v([0-9]*)")
- try:
- v = pattern.findall(file)[0]
- return v
- except IndexError:
- log.error("templates:get_version_from_workfile:"
- "`{}` missing version string."
- "Example `v004`".format(file))
-
-
-def get_project_code():
- """
- Obtain project code from database
-
- Returns:
- string: project code
- """
-
- return io.find_one({"type": "project"})["data"].get("code", '')
-
-
-def set_project_code(code):
- """
- Set project code into os.environ
-
- Args:
- code (string): project code
-
- Returns:
- os.environ[KEY]: project code
- avalon.sesion[KEY]: project code
- """
- if self.SESSION is None:
- set_session()
- self.SESSION["AVALON_PROJECTCODE"] = code
- os.environ["AVALON_PROJECTCODE"] = code
-
-
-def get_project_name():
- """
- Obtain project name from environment variable
-
- Returns:
- string: project name
-
- """
- if self.SESSION is None:
- set_session()
- project_name = self.SESSION.get("AVALON_PROJECT", None) \
- or os.getenv("AVALON_PROJECT", None)
- assert project_name, log.error("missing `AVALON_PROJECT`"
- "in avalon session "
- "or os.environ!")
- return project_name
-
-
-def get_asset():
- """
- Obtain Asset string from session or environment variable
-
- Returns:
- string: asset name
-
- Raises:
- log: error
- """
- if self.SESSION is None:
- set_session()
- asset = self.SESSION.get("AVALON_ASSET", None) \
- or os.getenv("AVALON_ASSET", None)
- log.info("asset: {}".format(asset))
- assert asset, log.error("missing `AVALON_ASSET`"
- "in avalon session "
- "or os.environ!")
- return asset
-
-
-def get_task():
- """
- Obtain Task string from session or environment variable
-
- Returns:
- string: task name
-
- Raises:
- log: error
- """
- if self.SESSION is None:
- set_session()
- task = self.SESSION.get("AVALON_TASK", None) \
- or os.getenv("AVALON_TASK", None)
- assert task, log.error("missing `AVALON_TASK`"
- "in avalon session "
- "or os.environ!")
- return task
-
-
-def get_hierarchy():
- """
- Obtain asset hierarchy path string from mongo db
-
- Returns:
- string: asset hierarchy path
-
- """
- parents = io.find_one({
- "type": 'asset',
- "name": get_asset()}
- )['data']['parents']
-
- hierarchy = ""
- if parents and len(parents) > 0:
- # hierarchy = os.path.sep.join(hierarchy)
- hierarchy = os.path.join(*parents).replace("\\", "/")
- return hierarchy
-
-
-def set_hierarchy(hierarchy):
- """
- Updates os.environ and session with asset hierarchy
-
- Args:
- hierarchy (string): hierarchy path ("silo/folder/seq")
- """
- if self.SESSION is None:
- set_session()
- self.SESSION["AVALON_HIERARCHY"] = hierarchy
- os.environ["AVALON_HIERARCHY"] = hierarchy
-
-
-def get_context_data(project=None,
- hierarchy=None,
- asset=None,
- task=None):
- """
- Collect all main contextual data
-
- Args:
- project (string, optional): project name
- hierarchy (string, optional): hierarchy path
- asset (string, optional): asset name
- task (string, optional): task name
-
- Returns:
- dict: contextual data
-
- """
- application = avalonlib.get_application(os.environ["AVALON_APP_NAME"])
- data = {
- "task": task or get_task(),
- "asset": asset or get_asset(),
- "project": {"name": project or get_project_name(),
- "code": get_project_code()},
- "hierarchy": hierarchy or get_hierarchy(),
- "app": application["application_dir"]
- }
- return data
-
-
-def set_avalon_workdir(project=None,
- hierarchy=None,
- asset=None,
- task=None):
- """
- Updates os.environ and session with filled workdir
-
- Args:
- project (string, optional): project name
- hierarchy (string, optional): hierarchy path
- asset (string, optional): asset name
- task (string, optional): task name
-
- Returns:
- os.environ[AVALON_WORKDIR]: workdir path
- avalon.session[AVALON_WORKDIR]: workdir path
-
- """
- if self.SESSION is None:
- set_session()
-
- awd = self.SESSION.get("AVALON_WORKDIR", None) or \
- os.getenv("AVALON_WORKDIR", None)
-
- data = get_context_data(project, hierarchy, asset, task)
-
- if (not awd) or ("{" not in awd):
- awd = get_workdir_template(data)
-
- awd_filled = os.path.normpath(format(awd, data))
-
- self.SESSION["AVALON_WORKDIR"] = awd_filled
- os.environ["AVALON_WORKDIR"] = awd_filled
- log.info("`AVALON_WORKDIR` fixed to: {}".format(awd_filled))
-
-
-def get_workdir_template(data=None):
- """
- Obtain workdir templated path from Anatomy()
-
- Args:
- data (dict, optional): basic contextual data
-
- Returns:
- string: template path
- """
-
- anatomy = Anatomy()
- anatomy_filled = anatomy.format(data or get_context_data())
-
- try:
- work = anatomy_filled["work"]
- except Exception as e:
- log.error("{0} Error in "
- "get_workdir_template(): {1}".format(__name__, e))
-
- return work["folder"]
diff --git a/pype/tools/assetcreator/app.py b/pype/tools/assetcreator/app.py
index 6f0effbf5f..fea810c78f 100644
--- a/pype/tools/assetcreator/app.py
+++ b/pype/tools/assetcreator/app.py
@@ -6,6 +6,7 @@ try:
import ftrack_api_old as ftrack_api
except Exception:
import ftrack_api
+from pypeapp import config
from pype import lib as pypelib
from avalon.vendor.Qt import QtWidgets, QtCore
from avalon import io, api, style, schema
@@ -194,18 +195,15 @@ class Window(QtWidgets.QDialog):
ft_project = session.query(project_query).one()
schema_name = ft_project['project_schema']['name']
# Load config
- preset_path = pypelib.get_presets_path()
- schemas_items = [preset_path, 'ftrack', 'project_schemas']
- schema_dir = os.path.sep.join(schemas_items)
+ schemas_items = config.get_presets().get('ftrack', {}).get(
+ 'project_schemas', {}
+ )
- config_file = 'default.json'
- for filename in os.listdir(schema_dir):
- if filename.startswith(schema_name):
- config_file = filename
- break
- config_file = os.path.sep.join([schema_dir, config_file])
- with open(config_file) as data_file:
- self.config_data = json.load(data_file)
+ key = "default"
+ if schema_name in schemas_items:
+ key = schema_name
+
+ self.config_data = schemas_items[key]
# set outlink
input_outlink = self.data['inputs']['outlink']
@@ -396,7 +394,7 @@ class Window(QtWidgets.QDialog):
new_asset_info = {
'parent': av_project['_id'],
'name': name,
- 'schema': pypelib.get_avalon_asset_template_schema(),
+ 'schema': "avalon-core:asset-2.0",
'silo': silo,
'type': 'asset',
'data': new_asset_data
diff --git a/pype/widgets/popup.py b/pype/widgets/popup.py
index 8f28dc5269..7c0fa0f5c5 100644
--- a/pype/widgets/popup.py
+++ b/pype/widgets/popup.py
@@ -124,6 +124,26 @@ class Popup2(Popup):
fix = self.widgets["show"]
fix.setText("Fix")
+ def calculate_window_geometry(self):
+ """Respond to status changes
+
+ On creation, align window with screen bottom right.
+
+ """
+ parent_widget = self.parent()
+
+ app = QtWidgets.QApplication.instance()
+ if parent_widget:
+ screen = app.desktop().screenNumber(parent_widget)
+ else:
+ screen = app.desktop().screenNumber(app.desktop().cursor().pos())
+ center_point = app.desktop().screenGeometry(screen).center()
+
+ frame_geo = self.frameGeometry()
+ frame_geo.moveCenter(center_point)
+
+ return frame_geo
+
@contextlib.contextmanager
def application():
diff --git a/pype/widgets/project_settings.py b/pype/widgets/project_settings.py
index 3aa2fc06b6..28c6215915 100644
--- a/pype/widgets/project_settings.py
+++ b/pype/widgets/project_settings.py
@@ -274,10 +274,10 @@ class Project_name_getUI(QtWidgets.QWidget):
# update all values in resolution
if self.resolution_w.text():
self.projects[self.new_index]['custom_attributes'][
- 'resolution_width'] = int(self.resolution_w.text())
+ "resolutionWidth"] = int(self.resolution_w.text())
if self.resolution_h.text():
self.projects[self.new_index]['custom_attributes'][
- 'resolution_height'] = int(self.resolution_h.text())
+ "resolutionHeight"] = int(self.resolution_h.text())
def _update_attributes_by_list_selection(self):
# generate actual selection index
@@ -451,8 +451,8 @@ class Project_name_get(Project_name_getUI):
# int(self.projects[self.new_index]['custom_attributes']['fps']))
# project.projectRoot()
# print 'handles: {}'.format(self.projects[self.new_index]['custom_attributes']['handles'])
- # print 'resolution_width: {}'.format(self.projects[self.new_index]['custom_attributes']['resolution_width'])
- # print 'resolution_width: {}'.format(self.projects[self.new_index]['custom_attributes']['resolution_height'])
+ # print 'resolution_width: {}'.format(self.projects[self.new_index]['custom_attributes']["resolutionWidth"])
+ # print 'resolution_width: {}'.format(self.projects[self.new_index]['custom_attributes']["resolutionHeight"])
# print "<< {}".format(self.projects[self.new_index])
# get path for the hrox file
diff --git a/res/ftrack/action_icons/ActionAskWhereIRun.svg b/res/ftrack/action_icons/ActionAskWhereIRun.svg
new file mode 100644
index 0000000000..c02b8f83d8
--- /dev/null
+++ b/res/ftrack/action_icons/ActionAskWhereIRun.svg
@@ -0,0 +1,131 @@
+
+
+
+
diff --git a/res/ftrack/action_icons/CustomAttributes.svg b/res/ftrack/action_icons/CustomAttributes.svg
deleted file mode 100644
index 6d73746ed0..0000000000
--- a/res/ftrack/action_icons/CustomAttributes.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/res/ftrack/action_icons/JobKiller.svg b/res/ftrack/action_icons/JobKiller.svg
deleted file mode 100644
index 595c780a9b..0000000000
--- a/res/ftrack/action_icons/JobKiller.svg
+++ /dev/null
@@ -1,374 +0,0 @@
-
-
diff --git a/res/ftrack/action_icons/PrepareProject.svg b/res/ftrack/action_icons/PrepareProject.svg
new file mode 100644
index 0000000000..bd6b460ce3
--- /dev/null
+++ b/res/ftrack/action_icons/PrepareProject.svg
@@ -0,0 +1,88 @@
+
+
diff --git a/res/ftrack/action_icons/PypeAdmin.svg b/res/ftrack/action_icons/PypeAdmin.svg
new file mode 100644
index 0000000000..c95a29dacb
--- /dev/null
+++ b/res/ftrack/action_icons/PypeAdmin.svg
@@ -0,0 +1,173 @@
+
+
+
+
diff --git a/res/ftrack/action_icons/PypeDoctor.svg b/res/ftrack/action_icons/PypeDoctor.svg
new file mode 100644
index 0000000000..e921d99ee5
--- /dev/null
+++ b/res/ftrack/action_icons/PypeDoctor.svg
@@ -0,0 +1,114 @@
+
+
diff --git a/res/ftrack/action_icons/SyncHierarchicalAttrsLocal.svg b/res/ftrack/action_icons/SyncHierarchicalAttrsLocal.svg
deleted file mode 100644
index f58448ac06..0000000000
--- a/res/ftrack/action_icons/SyncHierarchicalAttrsLocal.svg
+++ /dev/null
@@ -1 +0,0 @@
-
diff --git a/res/ftrack/action_icons/SyncToAvalon-local.svg b/res/ftrack/action_icons/SyncToAvalon-local.svg
deleted file mode 100644
index bf4708e8a5..0000000000
--- a/res/ftrack/action_icons/SyncToAvalon-local.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/res/ftrack/action_icons/Thumbnail.svg b/res/ftrack/action_icons/Thumbnail.svg
new file mode 100644
index 0000000000..a8780b9a04
--- /dev/null
+++ b/res/ftrack/action_icons/Thumbnail.svg
@@ -0,0 +1,136 @@
+
+
diff --git a/res/ftrack/action_icons/thumbToChildren.svg b/res/ftrack/action_icons/thumbToChildren.svg
deleted file mode 100644
index 30b146803e..0000000000
--- a/res/ftrack/action_icons/thumbToChildren.svg
+++ /dev/null
@@ -1,88 +0,0 @@
-
-
-
-
diff --git a/res/ftrack/action_icons/thumbToParent.svg b/res/ftrack/action_icons/thumbToParent.svg
deleted file mode 100644
index 254b650306..0000000000
--- a/res/ftrack/action_icons/thumbToParent.svg
+++ /dev/null
@@ -1,95 +0,0 @@
-
-
-
-
diff --git a/setup/nuke/nuke_path/atom_server.py b/setup/nuke/nuke_path/atom_server.py
new file mode 100644
index 0000000000..1742c290c1
--- /dev/null
+++ b/setup/nuke/nuke_path/atom_server.py
@@ -0,0 +1,54 @@
+'''
+ Simple socket server using threads
+'''
+
+import socket
+import sys
+import threading
+import StringIO
+import contextlib
+
+import nuke
+
+HOST = ''
+PORT = 8888
+
+
+@contextlib.contextmanager
+def stdoutIO(stdout=None):
+ old = sys.stdout
+ if stdout is None:
+ stdout = StringIO.StringIO()
+ sys.stdout = stdout
+ yield stdout
+ sys.stdout = old
+
+
+def _exec(data):
+ with stdoutIO() as s:
+ exec(data)
+ return s.getvalue()
+
+
+def server_start():
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.bind((HOST, PORT))
+ s.listen(5)
+
+ while 1:
+ client, address = s.accept()
+ try:
+ data = client.recv(4096)
+ if data:
+ result = nuke.executeInMainThreadWithResult(_exec, args=(data))
+ client.send(str(result))
+ except SystemExit:
+ result = self.encode('SERVER: Shutting down...')
+ client.send(str(result))
+ raise
+ finally:
+ client.close()
+
+t = threading.Thread(None, server_start)
+t.setDaemon(True)
+t.start()
diff --git a/setup/nuke/nuke_path/menu.py b/setup/nuke/nuke_path/menu.py
index 82438084aa..fd87c98246 100644
--- a/setup/nuke/nuke_path/menu.py
+++ b/setup/nuke/nuke_path/menu.py
@@ -1,3 +1,4 @@
+import atom_server
from pype.nuke.lib import (
writes_version_sync,
@@ -15,5 +16,6 @@ log = Logger().get_logger(__name__, "nuke")
nuke.addOnScriptSave(onScriptLoad)
nuke.addOnScriptLoad(checkInventoryVersions)
nuke.addOnScriptSave(checkInventoryVersions)
+nuke.addOnScriptSave(writes_version_sync)
log.info('Automatic syncing of write file knob to script version')
diff --git a/setup/nuke/nuke_path/write_to_read.py b/setup/nuke/nuke_path/write_to_read.py
new file mode 100644
index 0000000000..9667dccab6
--- /dev/null
+++ b/setup/nuke/nuke_path/write_to_read.py
@@ -0,0 +1,141 @@
+import re
+import os
+import glob
+import nuke
+from pype import api as pype
+log = pype.Logger().get_logger(__name__, "nuke")
+
+SINGLE_FILE_FORMATS = ['avi', 'mp4', 'mxf', 'mov', 'mpg', 'mpeg', 'wmv', 'm4v',
+ 'm2v']
+
+
+def evaluate_filepath_new(k_value, k_eval, project_dir, first_frame):
+ # get combined relative path
+ combined_relative_path = None
+ if k_eval is not None and project_dir is not None:
+ combined_relative_path = os.path.abspath(
+ os.path.join(project_dir, k_eval))
+ combined_relative_path = combined_relative_path.replace('\\', '/')
+ filetype = combined_relative_path.split('.')[-1]
+ frame_number = re.findall(r'\d+', combined_relative_path)[-1]
+ basename = combined_relative_path[: combined_relative_path.rfind(
+ frame_number)]
+ filepath_glob = basename + '*' + filetype
+ glob_search_results = glob.glob(filepath_glob)
+ if len(glob_search_results) <= 0:
+ combined_relative_path = None
+
+ try:
+ k_value = k_value % first_frame
+ if os.path.exists(k_value):
+ filepath = k_value
+ elif os.path.exists(k_eval):
+ filepath = k_eval
+ elif not isinstance(project_dir, type(None)) and \
+ not isinstance(combined_relative_path, type(None)):
+ filepath = combined_relative_path
+
+ filepath = os.path.abspath(filepath)
+ except Exception as E:
+ log.error("Cannot create Read node. Perhaps it needs to be rendered first :) Error: `{}`".format(E))
+ return
+
+ filepath = filepath.replace('\\', '/')
+ current_frame = re.findall(r'\d+', filepath)[-1]
+ padding = len(current_frame)
+ basename = filepath[: filepath.rfind(current_frame)]
+ filetype = filepath.split('.')[-1]
+
+ # sequence or not?
+ if filetype in SINGLE_FILE_FORMATS:
+ pass
+ else:
+ # Image sequence needs hashes
+ filepath = basename + '#' * padding + '.' + filetype
+
+ # relative path? make it relative again
+ if not isinstance(project_dir, type(None)):
+ filepath = filepath.replace(project_dir, '.')
+
+ # get first and last frame from disk
+ frames = []
+ firstframe = 0
+ lastframe = 0
+ filepath_glob = basename + '*' + filetype
+ glob_search_results = glob.glob(filepath_glob)
+ for f in glob_search_results:
+ frame = re.findall(r'\d+', f)[-1]
+ frames.append(frame)
+ frames = sorted(frames)
+ firstframe = frames[0]
+ lastframe = frames[len(frames) - 1]
+ if lastframe < 0:
+ lastframe = firstframe
+
+ return filepath, firstframe, lastframe
+
+
+def create_read_node(ndata, comp_start):
+ read = nuke.createNode('Read', 'file ' + ndata['filepath'])
+ read.knob('colorspace').setValue(int(ndata['colorspace']))
+ read.knob('raw').setValue(ndata['rawdata'])
+ read.knob('first').setValue(int(ndata['firstframe']))
+ read.knob('last').setValue(int(ndata['lastframe']))
+ read.knob('origfirst').setValue(int(ndata['firstframe']))
+ read.knob('origlast').setValue(int(ndata['lastframe']))
+ if comp_start == int(ndata['firstframe']):
+ read.knob('frame_mode').setValue("1")
+ read.knob('frame').setValue(str(comp_start))
+ else:
+ read.knob('frame_mode').setValue("0")
+ read.knob('xpos').setValue(ndata['new_xpos'])
+ read.knob('ypos').setValue(ndata['new_ypos'])
+ nuke.inputs(read, 0)
+ return
+
+
+def write_to_read(gn):
+ comp_start = nuke.Root().knob('first_frame').value()
+ comp_end = nuke.Root().knob('last_frame').value()
+ project_dir = nuke.Root().knob('project_directory').getValue()
+ if not os.path.exists(project_dir):
+ project_dir = nuke.Root().knob('project_directory').evaluate()
+
+ group_read_nodes = []
+
+ with gn:
+ height = gn.screenHeight() # get group height and position
+ new_xpos = int(gn.knob('xpos').value())
+ new_ypos = int(gn.knob('ypos').value()) + height + 20
+ group_writes = [n for n in nuke.allNodes() if n.Class() == "Write"]
+ print("__ group_writes: {}".format(group_writes))
+ if group_writes != []:
+ # there can be only 1 write node, taking first
+ n = group_writes[0]
+
+ if n.knob('file') is not None:
+ myfiletranslated, firstFrame, lastFrame = evaluate_filepath_new(
+ n.knob('file').getValue(),
+ n.knob('file').evaluate(),
+ project_dir,
+ comp_start
+ )
+ # get node data
+ ndata = {
+ 'filepath': myfiletranslated,
+ 'firstframe': firstFrame,
+ 'lastframe': lastFrame,
+ 'new_xpos': new_xpos,
+ 'new_ypos': new_ypos,
+ 'colorspace': n.knob('colorspace').getValue(),
+ 'rawdata': n.knob('raw').value(),
+ 'write_frame_mode': str(n.knob('frame_mode').value()),
+ 'write_frame': n.knob('frame').value()
+ }
+ group_read_nodes.append(ndata)
+
+
+ # create reads in one go
+ for oneread in group_read_nodes:
+ # create read node
+ create_read_node(oneread, comp_start)
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/1_add_handles_end.png b/setup/nukestudio/hiero_plugin_path/Icons/1_add_handles_end.png
index 31c41d1ac6..4561745d66 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/1_add_handles_end.png and b/setup/nukestudio/hiero_plugin_path/Icons/1_add_handles_end.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/2_add_handles.png b/setup/nukestudio/hiero_plugin_path/Icons/2_add_handles.png
index ab911c5ebc..bb4c1802aa 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/2_add_handles.png and b/setup/nukestudio/hiero_plugin_path/Icons/2_add_handles.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/3D.png b/setup/nukestudio/hiero_plugin_path/Icons/3D.png
index 4ace8911df..2de7a72775 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/3D.png and b/setup/nukestudio/hiero_plugin_path/Icons/3D.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/3_add_handles_start.png b/setup/nukestudio/hiero_plugin_path/Icons/3_add_handles_start.png
index 4cdc09b541..c98e4f74f1 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/3_add_handles_start.png and b/setup/nukestudio/hiero_plugin_path/Icons/3_add_handles_start.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/4_2D.png b/setup/nukestudio/hiero_plugin_path/Icons/4_2D.png
index 418272517f..18555698fe 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/4_2D.png and b/setup/nukestudio/hiero_plugin_path/Icons/4_2D.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/edit.png b/setup/nukestudio/hiero_plugin_path/Icons/edit.png
index e0ba3c102f..97e42054e7 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/edit.png and b/setup/nukestudio/hiero_plugin_path/Icons/edit.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/fusion.png b/setup/nukestudio/hiero_plugin_path/Icons/fusion.png
index 208c1279cf..2e498edd69 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/fusion.png and b/setup/nukestudio/hiero_plugin_path/Icons/fusion.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/hierarchy.png b/setup/nukestudio/hiero_plugin_path/Icons/hierarchy.png
index 68ea352885..6acf39ced5 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/hierarchy.png and b/setup/nukestudio/hiero_plugin_path/Icons/hierarchy.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/houdini.png b/setup/nukestudio/hiero_plugin_path/Icons/houdini.png
index 128eac262a..d8c842dd17 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/houdini.png and b/setup/nukestudio/hiero_plugin_path/Icons/houdini.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/lense.png b/setup/nukestudio/hiero_plugin_path/Icons/lense.png
index 2eb2da982f..255b1753ed 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/lense.png and b/setup/nukestudio/hiero_plugin_path/Icons/lense.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/lense1.png b/setup/nukestudio/hiero_plugin_path/Icons/lense1.png
index f76354f48c..1ad1264807 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/lense1.png and b/setup/nukestudio/hiero_plugin_path/Icons/lense1.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/maya.png b/setup/nukestudio/hiero_plugin_path/Icons/maya.png
index 7dd1453c60..fcfa47ae4f 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/maya.png and b/setup/nukestudio/hiero_plugin_path/Icons/maya.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/nuke.png b/setup/nukestudio/hiero_plugin_path/Icons/nuke.png
index 9d9dc4104c..107796914b 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/nuke.png and b/setup/nukestudio/hiero_plugin_path/Icons/nuke.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/resolution.png b/setup/nukestudio/hiero_plugin_path/Icons/resolution.png
index 9904a60532..83803fc36d 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/resolution.png and b/setup/nukestudio/hiero_plugin_path/Icons/resolution.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/retiming.png b/setup/nukestudio/hiero_plugin_path/Icons/retiming.png
new file mode 100644
index 0000000000..1c6f22e02c
Binary files /dev/null and b/setup/nukestudio/hiero_plugin_path/Icons/retiming.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/retiming.psd b/setup/nukestudio/hiero_plugin_path/Icons/retiming.psd
new file mode 100644
index 0000000000..bac6fc6b58
Binary files /dev/null and b/setup/nukestudio/hiero_plugin_path/Icons/retiming.psd differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/review.png b/setup/nukestudio/hiero_plugin_path/Icons/review.png
index 49f28c492c..0d894b6987 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/review.png and b/setup/nukestudio/hiero_plugin_path/Icons/review.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/volume.png b/setup/nukestudio/hiero_plugin_path/Icons/volume.png
index 47119dc98b..e5e1200653 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/volume.png and b/setup/nukestudio/hiero_plugin_path/Icons/volume.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/z_layer_bg.png b/setup/nukestudio/hiero_plugin_path/Icons/z_layer_bg.png
index d01fe683e5..51742b5df2 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/z_layer_bg.png and b/setup/nukestudio/hiero_plugin_path/Icons/z_layer_bg.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/z_layer_fg.png b/setup/nukestudio/hiero_plugin_path/Icons/z_layer_fg.png
index a1d5751622..01e5f4f816 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/z_layer_fg.png and b/setup/nukestudio/hiero_plugin_path/Icons/z_layer_fg.png differ
diff --git a/setup/nukestudio/hiero_plugin_path/Icons/z_layer_main.png b/setup/nukestudio/hiero_plugin_path/Icons/z_layer_main.png
index 0fe806d86e..0ffb939a7f 100644
Binary files a/setup/nukestudio/hiero_plugin_path/Icons/z_layer_main.png and b/setup/nukestudio/hiero_plugin_path/Icons/z_layer_main.png differ