hiero moving code from 2xdev branch

https://github.com/pypeclub/pype/tree/feature/611-simplify-hiero-tag-workflow
This commit is contained in:
Jakub Jezek 2021-01-21 17:33:28 +01:00
parent da5bd99138
commit ec224bafa2
No known key found for this signature in database
GPG key ID: C4B96E101D2A47F3
64 changed files with 3512 additions and 3231 deletions

View file

@ -1,9 +1,3 @@
import os
from pype.api import Logger
from avalon import api as avalon
from pyblish import api as pyblish
import pype
from .workio import (
open_file,
save_file,
@ -13,14 +7,63 @@ from .workio import (
work_root
)
from .menu import (
install as menu_install,
_update_menu_task_label
from .pipeline import (
launch_workfiles_app,
ls,
install,
uninstall,
reload_config,
containerise,
publish,
maintained_selection,
parse_container,
update_container,
reset_selection
)
from .events import register_hiero_events
from .lib import (
get_track_items,
get_current_project,
get_current_sequence,
get_current_track,
get_track_item_pype_tag,
set_track_item_pype_tag,
get_track_item_pype_data,
set_publish_attribute,
get_publish_attribute,
imprint,
get_selected_track_items,
set_selected_track_items,
create_nuke_workfile_clips,
create_bin,
apply_colorspace_project,
apply_colorspace_clips,
is_overlapping,
get_sequence_pattern_and_padding
)
from .plugin import (
CreatorWidget,
Creator,
PublishClip,
SequenceLoader,
ClipLoader
)
__all__ = [
# avalon pipeline module
"launch_workfiles_app",
"ls",
"install",
"uninstall",
"reload_config",
"containerise",
"publish",
"maintained_selection",
"parse_container",
"update_container",
"reset_selection",
# Workfiles API
"open_file",
"save_file",
@ -28,96 +71,31 @@ __all__ = [
"has_unsaved_changes",
"file_extensions",
"work_root",
# Lib functions
"get_track_items",
"get_current_project",
"get_current_sequence",
"get_current_track",
"get_track_item_pype_tag",
"set_track_item_pype_tag",
"get_track_item_pype_data",
"set_publish_attribute",
"get_publish_attribute",
"imprint",
"get_selected_track_items",
"set_selected_track_items",
"create_nuke_workfile_clips",
"create_bin",
"is_overlapping",
"apply_colorspace_project",
"apply_colorspace_clips",
"get_sequence_pattern_and_padding",
# plugins
"CreatorWidget",
"Creator",
"PublishClip",
"SequenceLoader",
"ClipLoader"
]
# get logger
log = Logger().get_logger(__name__)
''' Creating all important host related variables '''
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
# plugin root path
HOST_DIR = os.path.dirname(os.path.abspath(pype.hosts.hiero.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
# registering particular pyblish gui but `lite` is recomended!!
if os.getenv("PYBLISH_GUI", None):
pyblish.register_gui(os.getenv("PYBLISH_GUI", None))
def install():
"""
Installing Hiero integration for avalon
Args:
config (obj): avalon config module `pype` in our case, it is not
used but required by avalon.api.install()
"""
# adding all events
_register_events()
log.info("Registering Hiero plug-ins..")
pyblish.register_host("hiero")
pyblish.register_plugin_path(PUBLISH_PATH)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
# Disable all families except for the ones we explicitly want to see
family_states = [
"write",
"review",
"plate"
]
avalon.data["familiesStateDefault"] = False
avalon.data["familiesStateToggled"] = family_states
# install menu
menu_install()
# register hiero events
register_hiero_events()
def uninstall():
"""
Uninstalling Hiero integration for avalon
"""
log.info("Deregistering Hiero plug-ins..")
pyblish.deregister_host("hiero")
pyblish.deregister_plugin_path(PUBLISH_PATH)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
def _register_events():
"""
Adding all callbacks.
"""
# if task changed then change notext of hiero
avalon.on("taskChanged", _update_menu_task_label)
log.info("Installed event callback for 'taskChanged'..")
def ls():
"""List available containers.
This function is used by the Container Manager in Nuke. You'll
need to implement a for-loop that then *yields* one Container at
a time.
See the `container.json` schema for details on how it should look,
and the Maya equivalent, which is in `avalon.maya.pipeline`
"""
# TODO: listing all availabe containers form sequence
return

View file

@ -1,10 +1,12 @@
import os
import hiero.core.events
import avalon.api as avalon
from pype.api import Logger
from .lib import sync_avalon_data_to_workfile, launch_workfiles_app
from .tags import add_tags_from_presets
from .tags import add_tags_to_workfile
from .menu import update_menu_task_label
log = Logger().get_logger(__name__)
log = Logger().get_logger(__name__, "hiero")
def startupCompleted(event):
@ -28,7 +30,7 @@ def afterNewProjectCreated(event):
sync_avalon_data_to_workfile()
# add tags from preset
add_tags_from_presets()
add_tags_to_workfile()
# Workfiles.
if int(os.environ.get("WORKFILES_STARTUP", "0")):
@ -48,7 +50,7 @@ def afterProjectLoad(event):
sync_avalon_data_to_workfile()
# add tags from preset
add_tags_from_presets()
add_tags_to_workfile()
def beforeProjectClosed(event):
@ -77,7 +79,7 @@ def register_hiero_events():
"kAfterNewProjectCreated, kBeforeProjectLoad, kAfterProjectLoad, "
"kBeforeProjectSave, kAfterProjectSave, kBeforeProjectClose, "
"kAfterProjectClose, kShutdown, kStartup"
)
)
# hiero.core.events.registerInterest(
# "kBeforeNewProjectCreated", beforeNewProjectCreated)
@ -105,3 +107,13 @@ def register_hiero_events():
# workfiles
hiero.core.events.registerEventType("kStartWorkfiles")
hiero.core.events.registerInterest("kStartWorkfiles", launch_workfiles_app)
def register_events():
"""
Adding all callbacks.
"""
# if task changed then change notext of hiero
avalon.on("taskChanged", update_menu_task_label)
log.info("Installed event callback for 'taskChanged'..")

File diff suppressed because it is too large Load diff

View file

@ -5,20 +5,15 @@ from pype.api import Logger
from avalon.api import Session
from hiero.ui import findMenuAction
from .tags import add_tags_from_presets
from . import tags
from .lib import (
reload_config,
set_workfiles
)
log = Logger().get_logger(__name__)
log = Logger().get_logger(__name__, "hiero")
self = sys.modules[__name__]
self._change_context_menu = None
def _update_menu_task_label(*args):
def update_menu_task_label(*args):
"""Update the task label in Avalon menu to current session"""
object_name = self._change_context_menu
@ -36,14 +31,17 @@ def _update_menu_task_label(*args):
menu.setTitle(label)
def install():
def menu_install():
"""
Installing menu into Hiero
"""
from . import (
publish, launch_workfiles_app, reload_config,
apply_colorspace_project, apply_colorspace_clips
)
# here is the best place to add menu
from avalon.tools import publish, cbloader
from avalon.tools import cbloader, creator, sceneinventory
from avalon.vendor.Qt import QtGui
menu_name = os.environ['AVALON_LABEL']
@ -72,36 +70,45 @@ def install():
workfiles_action = menu.addAction("Work Files...")
workfiles_action.setIcon(QtGui.QIcon("icons:Position.png"))
workfiles_action.triggered.connect(set_workfiles)
workfiles_action.triggered.connect(launch_workfiles_app)
default_tags_action = menu.addAction("Create Default Tags...")
default_tags_action.setIcon(QtGui.QIcon("icons:Position.png"))
default_tags_action.triggered.connect(add_tags_from_presets)
default_tags_action.triggered.connect(tags.add_tags_to_workfile)
menu.addSeparator()
publish_action = menu.addAction("Publish...")
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
publish_action.triggered.connect(
lambda *args: publish.show(hiero.ui.mainWindow())
lambda *args: publish(hiero.ui.mainWindow())
)
creator_action = menu.addAction("Create...")
creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
creator_action.triggered.connect(creator.show)
loader_action = menu.addAction("Load...")
loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
loader_action.triggered.connect(cbloader.show)
sceneinventory_action = menu.addAction("Manage...")
sceneinventory_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
sceneinventory_action.triggered.connect(sceneinventory.show)
menu.addSeparator()
reload_action = menu.addAction("Reload pipeline...")
reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
reload_action.triggered.connect(reload_config)
# Is this required?
# hiero.ui.registerAction(context_label_action)
# hiero.ui.registerAction(workfiles_action)
# hiero.ui.registerAction(default_tags_action)
# hiero.ui.registerAction(publish_action)
# hiero.ui.registerAction(loader_action)
# hiero.ui.registerAction(reload_action)
menu.addSeparator()
apply_colorspace_p_action = menu.addAction("Apply Colorspace Project...")
apply_colorspace_p_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
apply_colorspace_p_action.triggered.connect(apply_colorspace_project)
apply_colorspace_c_action = menu.addAction("Apply Colorspace Clips...")
apply_colorspace_c_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
apply_colorspace_c_action.triggered.connect(apply_colorspace_clips)
self.context_label_action = context_label_action
self.workfile_actions = workfiles_action

View file

@ -0,0 +1,301 @@
"""
Basic avalon integration
"""
import os
import contextlib
from collections import OrderedDict
from avalon.tools import (
workfiles,
publish as _publish
)
from avalon.pipeline import AVALON_CONTAINER_ID
from avalon import api as avalon
from avalon import schema
from pyblish import api as pyblish
import pype
from pype.api import Logger
from . import lib, menu, events
log = Logger().get_logger(__name__, "hiero")
AVALON_CONFIG = os.getenv("AVALON_CONFIG", "pype")
# plugin paths
HOST_DIR = os.path.dirname(os.path.abspath(pype.hosts.hiero.__file__))
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish").replace("\\", "/")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load").replace("\\", "/")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create").replace("\\", "/")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory").replace("\\", "/")
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
def install():
"""
Installing Hiero integration for avalon
Args:
config (obj): avalon config module `pype` in our case, it is not
used but required by avalon.api.install()
"""
# adding all events
events.register_events()
log.info("Registering Hiero plug-ins..")
pyblish.register_host("hiero")
pyblish.register_plugin_path(PUBLISH_PATH)
avalon.register_plugin_path(avalon.Loader, LOAD_PATH)
avalon.register_plugin_path(avalon.Creator, CREATE_PATH)
avalon.register_plugin_path(avalon.InventoryAction, INVENTORY_PATH)
# register callback for switching publishable
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
# Disable all families except for the ones we explicitly want to see
family_states = [
"write",
"review",
"plate"
]
avalon.data["familiesStateDefault"] = False
avalon.data["familiesStateToggled"] = family_states
# install menu
menu.menu_install()
# register hiero events
events.register_hiero_events()
def uninstall():
"""
Uninstalling Hiero integration for avalon
"""
log.info("Deregistering Hiero plug-ins..")
pyblish.deregister_host("hiero")
pyblish.deregister_plugin_path(PUBLISH_PATH)
avalon.deregister_plugin_path(avalon.Loader, LOAD_PATH)
avalon.deregister_plugin_path(avalon.Creator, CREATE_PATH)
# register callback for switching publishable
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
def containerise(track_item,
name,
namespace,
context,
loader=None,
data=None):
"""Bundle Hiero's object into an assembly and imprint it with metadata
Containerisation enables a tracking of version, author and origin
for loaded assets.
Arguments:
track_item (hiero.core.TrackItem): object to imprint as container
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
context (dict): Asset information
loader (str, optional): Name of node used to produce this container.
Returns:
track_item (hiero.core.TrackItem): containerised object
"""
data_imprint = OrderedDict({
"schema": "avalon-core:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": str(name),
"namespace": str(namespace),
"loader": str(loader),
"representation": str(context["representation"]["_id"]),
})
if data:
for k, v in data.items():
data_imprint.update({k: v})
log.debug("_ data_imprint: {}".format(data_imprint))
lib.set_track_item_pype_tag(track_item, data_imprint)
return track_item
def ls():
"""List available containers.
This function is used by the Container Manager in Nuke. You'll
need to implement a for-loop that then *yields* one Container at
a time.
See the `container.json` schema for details on how it should look,
and the Maya equivalent, which is in `avalon.maya.pipeline`
"""
# get all track items from current timeline
all_track_items = lib.get_track_items()
for track_item in all_track_items:
container = parse_container(track_item)
if container:
yield container
def parse_container(track_item, validate=True):
"""Return container data from track_item's pype tag.
Args:
track_item (hiero.core.TrackItem): A containerised track item.
validate (bool)[optional]: validating with avalon scheme
Returns:
dict: The container schema data for input containerized track item.
"""
# convert tag metadata to normal keys names
data = lib.get_track_item_pype_data(track_item)
if validate and data and data.get("schema"):
schema.validate(data)
if not isinstance(data, dict):
return
# If not all required data return the empty container
required = ['schema', 'id', 'name',
'namespace', 'loader', 'representation']
if not all(key in data for key in required):
return
container = {key: data[key] for key in required}
container["objectName"] = track_item.name()
# Store reference to the node object
container["_track_item"] = track_item
return container
def update_container(track_item, data=None):
"""Update container data to input track_item's pype tag.
Args:
track_item (hiero.core.TrackItem): A containerised track item.
data (dict)[optional]: dictionery with data to be updated
Returns:
bool: True if container was updated correctly
"""
data = data or dict()
container = lib.get_track_item_pype_data(track_item)
for _key, _value in container.items():
try:
container[_key] = data[_key]
except KeyError:
pass
log.info("Updating container: `{}`".format(track_item.name()))
return bool(lib.set_track_item_pype_tag(track_item, container))
def launch_workfiles_app(*args):
''' Wrapping function for workfiles launcher '''
workdir = os.environ["AVALON_WORKDIR"]
# show workfile gui
workfiles.show(workdir)
def publish(parent):
"""Shorthand to publish from within host"""
return _publish.show(parent)
@contextlib.contextmanager
def maintained_selection():
"""Maintain selection during context
Example:
>>> with maintained_selection():
... for track_item in track_items:
... < do some stuff >
"""
from .lib import (
set_selected_track_items,
get_selected_track_items
)
previous_selection = get_selected_track_items()
reset_selection()
try:
# do the operation
yield
finally:
reset_selection()
set_selected_track_items(previous_selection)
def reset_selection():
"""Deselect all selected nodes
"""
from .lib import set_selected_track_items
set_selected_track_items([])
def reload_config():
"""Attempt to reload pipeline at run-time.
CAUTION: This is primarily for development and debugging purposes.
"""
import importlib
for module in (
"avalon",
"avalon.lib",
"avalon.pipeline",
"pyblish",
"pypeapp",
"{}.api".format(AVALON_CONFIG),
"{}.hosts.hiero.lib".format(AVALON_CONFIG),
"{}.hosts.hiero.menu".format(AVALON_CONFIG),
"{}.hosts.hiero.tags".format(AVALON_CONFIG)
):
log.info("Reloading module: {}...".format(module))
try:
module = importlib.import_module(module)
import imp
imp.reload(module)
except Exception as e:
log.warning("Cannot reload module: {}".format(e))
importlib.reload(module)
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle node passthrough states on instance toggles."""
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
instance, old_value, new_value))
from pype.hosts.hiero.api import (
get_track_item_pype_tag,
set_publish_attribute
)
# Whether instances should be passthrough based on new value
track_item = instance.data["item"]
tag = get_track_item_pype_tag(track_item)
set_publish_attribute(tag, new_value)

View file

@ -0,0 +1,910 @@
import re
import os
import hiero
from Qt import QtWidgets, QtCore
from avalon.vendor import qargparse
import avalon.api as avalon
import pype.api as pype
from . import lib
log = pype.Logger().get_logger(__name__, "hiero")
def load_stylesheet():
path = os.path.join(os.path.dirname(__file__), "style.css")
if not os.path.exists(path):
log.warning("Unable to load stylesheet, file not found in resources")
return ""
with open(path, "r") as file_stream:
stylesheet = file_stream.read()
return stylesheet
class CreatorWidget(QtWidgets.QDialog):
# output items
items = dict()
def __init__(self, name, info, ui_inputs, parent=None):
super(CreatorWidget, self).__init__(parent)
self.setObjectName(name)
self.setWindowFlags(
QtCore.Qt.Window
| QtCore.Qt.CustomizeWindowHint
| QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowCloseButtonHint
| QtCore.Qt.WindowStaysOnTopHint
)
self.setWindowTitle(name or "Pype Creator Input")
self.resize(500, 700)
# Where inputs and labels are set
self.content_widget = [QtWidgets.QWidget(self)]
top_layout = QtWidgets.QFormLayout(self.content_widget[0])
top_layout.setObjectName("ContentLayout")
top_layout.addWidget(Spacer(5, self))
# first add widget tag line
top_layout.addWidget(QtWidgets.QLabel(info))
# main dynamic layout
self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True)
self.scroll_area.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAsNeeded)
self.scroll_area.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
self.scroll_area.setHorizontalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOff)
self.scroll_area.setWidgetResizable(True)
self.content_widget.append(self.scroll_area)
scroll_widget = QtWidgets.QWidget(self)
in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget)
self.content_layout = [in_scroll_area]
# add preset data into input widget layout
self.items = self.populate_widgets(ui_inputs)
self.scroll_area.setWidget(scroll_widget)
# Confirmation buttons
btns_widget = QtWidgets.QWidget(self)
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
cancel_btn = QtWidgets.QPushButton("Cancel")
btns_layout.addWidget(cancel_btn)
ok_btn = QtWidgets.QPushButton("Ok")
btns_layout.addWidget(ok_btn)
# Main layout of the dialog
main_layout = QtWidgets.QVBoxLayout(self)
main_layout.setContentsMargins(10, 10, 10, 10)
main_layout.setSpacing(0)
# adding content widget
for w in self.content_widget:
main_layout.addWidget(w)
main_layout.addWidget(btns_widget)
ok_btn.clicked.connect(self._on_ok_clicked)
cancel_btn.clicked.connect(self._on_cancel_clicked)
stylesheet = load_stylesheet()
self.setStyleSheet(stylesheet)
def _on_ok_clicked(self):
self.result = self.value(self.items)
self.close()
def _on_cancel_clicked(self):
self.result = None
self.close()
def value(self, data, new_data=None):
new_data = new_data or dict()
for k, v in data.items():
new_data[k] = {
"target": None,
"value": None
}
if v["type"] == "dict":
new_data[k]["target"] = v["target"]
new_data[k]["value"] = self.value(v["value"])
if v["type"] == "section":
new_data.pop(k)
new_data = self.value(v["value"], new_data)
elif getattr(v["value"], "currentText", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].currentText()
elif getattr(v["value"], "isChecked", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].isChecked()
elif getattr(v["value"], "value", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].value()
elif getattr(v["value"], "text", None):
new_data[k]["target"] = v["target"]
new_data[k]["value"] = v["value"].text()
return new_data
def camel_case_split(self, text):
matches = re.finditer(
'.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text)
return " ".join([str(m.group(0)).capitalize() for m in matches])
def create_row(self, layout, type, text, **kwargs):
# get type attribute from qwidgets
attr = getattr(QtWidgets, type)
# convert label text to normal capitalized text with spaces
label_text = self.camel_case_split(text)
# assign the new text to lable widget
label = QtWidgets.QLabel(label_text)
label.setObjectName("LineLabel")
# create attribute name text strip of spaces
attr_name = text.replace(" ", "")
# create attribute and assign default values
setattr(
self,
attr_name,
attr(parent=self))
# assign the created attribute to variable
item = getattr(self, attr_name)
for func, val in kwargs.items():
if getattr(item, func):
func_attr = getattr(item, func)
func_attr(val)
# add to layout
layout.addRow(label, item)
return item
def populate_widgets(self, data, content_layout=None):
"""
Populate widget from input dict.
Each plugin has its own set of widget rows defined in dictionary
each row values should have following keys: `type`, `target`,
`label`, `order`, `value` and optionally also `toolTip`.
Args:
data (dict): widget rows or organized groups defined
by types `dict` or `section`
content_layout (QtWidgets.QFormLayout)[optional]: used when nesting
Returns:
dict: redefined data dict updated with created widgets
"""
content_layout = content_layout or self.content_layout[-1]
# fix order of process by defined order value
ordered_keys = data.keys()
for k, v in data.items():
try:
# try removing a key from index which should
# be filled with new
ordered_keys.pop(v["order"])
except IndexError:
pass
# add key into correct order
ordered_keys.insert(v["order"], k)
# process ordered
for k in ordered_keys:
v = data[k]
tool_tip = v.get("toolTip", "")
if v["type"] == "dict":
# adding spacer between sections
self.content_layout.append(QtWidgets.QWidget(self))
content_layout.addWidget(self.content_layout[-1])
self.content_layout[-1].setObjectName("sectionHeadline")
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
headline.addWidget(Spacer(20, self))
headline.addWidget(QtWidgets.QLabel(v["label"]))
# adding nested layout with label
self.content_layout.append(QtWidgets.QWidget(self))
self.content_layout[-1].setObjectName("sectionContent")
nested_content_layout = QtWidgets.QFormLayout(
self.content_layout[-1])
nested_content_layout.setObjectName("NestedContentLayout")
content_layout.addWidget(self.content_layout[-1])
# add nested key as label
data[k]["value"] = self.populate_widgets(
v["value"], nested_content_layout)
if v["type"] == "section":
# adding spacer between sections
self.content_layout.append(QtWidgets.QWidget(self))
content_layout.addWidget(self.content_layout[-1])
self.content_layout[-1].setObjectName("sectionHeadline")
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
headline.addWidget(Spacer(20, self))
headline.addWidget(QtWidgets.QLabel(v["label"]))
# adding nested layout with label
self.content_layout.append(QtWidgets.QWidget(self))
self.content_layout[-1].setObjectName("sectionContent")
nested_content_layout = QtWidgets.QFormLayout(
self.content_layout[-1])
nested_content_layout.setObjectName("NestedContentLayout")
content_layout.addWidget(self.content_layout[-1])
# add nested key as label
data[k]["value"] = self.populate_widgets(
v["value"], nested_content_layout)
elif v["type"] == "QLineEdit":
data[k]["value"] = self.create_row(
content_layout, "QLineEdit", v["label"],
setText=v["value"], setToolTip=tool_tip)
elif v["type"] == "QComboBox":
data[k]["value"] = self.create_row(
content_layout, "QComboBox", v["label"],
addItems=v["value"], setToolTip=tool_tip)
elif v["type"] == "QCheckBox":
data[k]["value"] = self.create_row(
content_layout, "QCheckBox", v["label"],
setChecked=v["value"], setToolTip=tool_tip)
elif v["type"] == "QSpinBox":
data[k]["value"] = self.create_row(
content_layout, "QSpinBox", v["label"],
setValue=v["value"], setMaximum=10000, setToolTip=tool_tip)
return data
class Spacer(QtWidgets.QWidget):
def __init__(self, height, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.setFixedHeight(height)
real_spacer = QtWidgets.QWidget(self)
real_spacer.setObjectName("Spacer")
real_spacer.setFixedHeight(height)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(real_spacer)
self.setLayout(layout)
def get_reference_node_parents(ref):
"""Return all parent reference nodes of reference node
Args:
ref (str): reference node.
Returns:
list: The upstream parent reference nodes.
"""
parents = []
return parents
class SequenceLoader(avalon.Loader):
"""A basic SequenceLoader for Resolve
This will implement the basic behavior for a loader to inherit from that
will containerize the reference and will implement the `remove` and
`update` logic.
"""
options = [
qargparse.Boolean(
"handles",
label="Include handles",
default=0,
help="Load with handles or without?"
),
qargparse.Choice(
"load_to",
label="Where to load clips",
items=[
"Current timeline",
"New timeline"
],
default="Current timeline",
help="Where do you want clips to be loaded?"
),
qargparse.Choice(
"load_how",
label="How to load clips",
items=[
"Original timing",
"Sequentially in order"
],
default="Original timing",
help="Would you like to place it at orignal timing?"
)
]
def load(
self,
context,
name=None,
namespace=None,
options=None
):
pass
def update(self, container, representation):
"""Update an existing `container`
"""
pass
def remove(self, container):
"""Remove an existing `container`
"""
pass
class ClipLoader:
active_bin = None
data = dict()
def __init__(self, cls, context, **options):
""" Initialize object
Arguments:
cls (avalon.api.Loader): plugin object
context (dict): loader plugin context
options (dict)[optional]: possible keys:
projectBinPath: "path/to/binItem"
"""
self.__dict__.update(cls.__dict__)
self.context = context
self.active_project = lib.get_current_project()
# try to get value from options or evaluate key value for `handles`
self.with_handles = options.get("handles") or bool(
options.get("handles") is True)
# try to get value from options or evaluate key value for `load_how`
self.sequencial_load = options.get("sequencially") or bool(
"Sequentially in order" in options.get("load_how", ""))
# try to get value from options or evaluate key value for `load_to`
self.new_sequence = options.get("newSequence") or bool(
"New timeline" in options.get("load_to", ""))
assert self._populate_data(), str(
"Cannot Load selected data, look into database "
"or call your supervisor")
# inject asset data to representation dict
self._get_asset_data()
log.debug("__init__ self.data: `{}`".format(self.data))
# add active components to class
if self.new_sequence:
if options.get("sequence"):
# if multiselection is set then use options sequence
self.active_sequence = options["sequence"]
else:
# create new sequence
self.active_sequence = lib.get_current_sequence(new=True)
self.active_sequence.setFramerate(
hiero.core.TimeBase.fromString(
str(self.data["assetData"]["fps"])))
else:
self.active_sequence = lib.get_current_sequence()
if options.get("track"):
# if multiselection is set then use options track
self.active_track = options["track"]
else:
self.active_track = lib.get_current_track(
self.active_sequence, self.data["track_name"])
def _populate_data(self):
""" Gets context and convert it to self.data
data structure:
{
"name": "assetName_subsetName_representationName"
"path": "path/to/file/created/by/get_repr..",
"binPath": "projectBinPath",
}
"""
# create name
repr = self.context["representation"]
repr_cntx = repr["context"]
asset = str(repr_cntx["asset"])
subset = str(repr_cntx["subset"])
representation = str(repr_cntx["representation"])
self.data["clip_name"] = "_".join([asset, subset, representation])
self.data["track_name"] = "_".join([subset, representation])
# gets file path
file = self.fname
if not file:
repr_id = repr["_id"]
log.warning(
"Representation id `{}` is failing to load".format(repr_id))
return None
self.data["path"] = file.replace("\\", "/")
# convert to hashed path
if repr_cntx.get("frame"):
self._fix_path_hashes()
# solve project bin structure path
hierarchy = str("/".join((
"Loader",
repr_cntx["hierarchy"].replace("\\", "/"),
asset
)))
self.data["binPath"] = hierarchy
return True
def _fix_path_hashes(self):
""" Convert file path where it is needed padding with hashes
"""
file = self.data["path"]
if "#" not in file:
frame = self.context["representation"]["context"].get("frame")
padding = len(frame)
file = file.replace(frame, "#" * padding)
self.data["path"] = file
def _get_asset_data(self):
""" Get all available asset data
joint `data` key with asset.data dict into the representaion
"""
asset_name = self.context["representation"]["context"]["asset"]
self.data["assetData"] = pype.get_asset(asset_name)["data"]
def _make_track_item(self, source_bin_item, audio=False):
""" Create track item with """
clip = source_bin_item.activeItem()
# add to track as clip item
if not audio:
track_item = hiero.core.TrackItem(
self.data["clip_name"], hiero.core.TrackItem.kVideo)
else:
track_item = hiero.core.TrackItem(
self.data["clip_name"], hiero.core.TrackItem.kAudio)
track_item.setSource(clip)
track_item.setSourceIn(self.handle_start)
track_item.setTimelineIn(self.timeline_in)
track_item.setSourceOut(self.media_duration - self.handle_end)
track_item.setTimelineOut(self.timeline_out)
track_item.setPlaybackSpeed(1)
self.active_track.addTrackItem(track_item)
return track_item
def load(self):
# create project bin for the media to be imported into
self.active_bin = lib.create_bin(self.data["binPath"])
# create mediaItem in active project bin
# create clip media
self.media = hiero.core.MediaSource(self.data["path"])
self.media_duration = int(self.media.duration())
self.handle_start = int(self.data["assetData"]["handleStart"])
self.handle_end = int(self.data["assetData"]["handleEnd"])
if self.sequencial_load:
last_track_item = lib.get_track_items(
sequence_name=self.active_sequence.name(),
track_name=self.active_track.name())
if len(last_track_item) == 0:
last_timeline_out = 0
else:
last_track_item = last_track_item[-1]
last_timeline_out = int(last_track_item.timelineOut()) + 1
self.timeline_in = last_timeline_out
self.timeline_out = last_timeline_out + int(
self.data["assetData"]["clipOut"]
- self.data["assetData"]["clipIn"])
else:
self.timeline_in = int(self.data["assetData"]["clipIn"])
self.timeline_out = int(self.data["assetData"]["clipOut"])
# check if slate is included
# either in version data families or by calculating frame diff
slate_on = next(
# check iterate if slate is in families
(f for f in self.context["version"]["data"]["families"]
if "slate" in f),
# if nothing was found then use default None
# so other bool could be used
None) or bool(((
# put together duration of clip attributes
self.timeline_out - self.timeline_in + 1) \
+ self.handle_start \
+ self.handle_end
# and compare it with meda duration
) - self.media_duration)
log.debug("__ slate_on: `{}`".format(slate_on))
# if slate is on then remove the slate frame from begining
if slate_on:
self.media_duration -= 1
self.handle_start += 1
# create Clip from Media
clip = hiero.core.Clip(self.media)
clip.setName(self.data["clip_name"])
# add Clip to bin if not there yet
if self.data["clip_name"] not in [
b.name() for b in self.active_bin.items()]:
bin_item = hiero.core.BinItem(clip)
self.active_bin.addItem(bin_item)
# just make sure the clip is created
# there were some cases were hiero was not creating it
source_bin_item = None
for item in self.active_bin.items():
if self.data["clip_name"] in item.name():
source_bin_item = item
if not source_bin_item:
log.warning("Problem with created Source clip: `{}`".format(
self.data["clip_name"]))
# include handles
if self.with_handles:
self.timeline_in -= self.handle_start
self.timeline_out += self.handle_end
self.handle_start = 0
self.handle_end = 0
# make track item from source in bin as item
track_item = self._make_track_item(source_bin_item)
log.info("Loading clips: `{}`".format(self.data["clip_name"]))
return track_item
class Creator(avalon.Creator):
"""Creator class wrapper
"""
clip_color = "Purple"
rename_index = None
def __init__(self, *args, **kwargs):
from pype.hosts import hiero as phiero
super(Creator, self).__init__(*args, **kwargs)
self.presets = pype.config.get_presets(
)['plugins']["hiero"]["create"].get(self.__class__.__name__, {})
# adding basic current context resolve objects
self.project = phiero.get_current_project()
self.sequence = phiero.get_current_sequence()
if (self.options or {}).get("useSelection"):
self.selected = phiero.get_track_items(selected=True)
else:
self.selected = phiero.get_track_items()
self.widget = CreatorWidget
class PublishClip:
"""
Convert a track item to publishable instance
Args:
track_item (hiero.core.TrackItem): hiero track item object
kwargs (optional): additional data needed for rename=True (presets)
Returns:
hiero.core.TrackItem: hiero track item object with pype tag
"""
vertical_clip_match = dict()
tag_data = dict()
types = {
"shot": "shot",
"folder": "folder",
"episode": "episode",
"sequence": "sequence",
"track": "sequence",
}
# parents search patern
parents_search_patern = r"\{([a-z]*?)\}"
# default templates for non-ui use
rename_default = False
hierarchy_default = "{_folder_}/{_sequence_}/{_track_}"
clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}"
subset_name_default = "<track_name>"
review_track_default = "< none >"
subset_family_default = "plate"
count_from_default = 10
count_steps_default = 10
vertical_sync_default = False
driving_layer_default = ""
def __init__(self, cls, track_item, **kwargs):
# populate input cls attribute onto self.[attr]
self.__dict__.update(cls.__dict__)
# get main parent objects
self.track_item = track_item
sequence_name = lib.get_current_sequence().name()
self.sequence_name = str(sequence_name).replace(" ", "_")
# track item (clip) main attributes
self.ti_name = track_item.name()
self.ti_index = int(track_item.eventNumber())
# get track name and index
track_name = track_item.parent().name()
self.track_name = str(track_name).replace(" ", "_")
self.track_index = int(track_item.parent().trackIndex())
# adding tag.family into tag
if kwargs.get("avalon"):
self.tag_data.update(kwargs["avalon"])
# adding ui inputs if any
self.ui_inputs = kwargs.get("ui_inputs", {})
# populate default data before we get other attributes
self._populate_track_item_default_data()
# use all populated default data to create all important attributes
self._populate_attributes()
# create parents with correct types
self._create_parents()
def convert(self):
# solve track item data and add them to tag data
self._convert_to_tag_data()
# if track name is in review track name and also if driving track name
# is not in review track name: skip tag creation
if (self.track_name in self.review_layer) and (
self.driving_layer not in self.review_layer):
return
# deal with clip name
new_name = self.tag_data.pop("newClipName")
if self.rename:
# rename track item
self.track_item.setName(new_name)
self.tag_data["asset"] = new_name
else:
self.tag_data["asset"] = self.ti_name
# create pype tag on track_item and add data
lib.imprint(self.track_item, self.tag_data)
return self.track_item
def _populate_track_item_default_data(self):
""" Populate default formating data from track item. """
self.track_item_default_data = {
"_folder_": "shots",
"_sequence_": self.sequence_name,
"_track_": self.track_name,
"_clip_": self.ti_name,
"_trackIndex_": self.track_index,
"_clipIndex_": self.ti_index
}
def _populate_attributes(self):
""" Populate main object attributes. """
# track item frame range and parent track name for vertical sync check
self.clip_in = int(self.track_item.timelineIn())
self.clip_out = int(self.track_item.timelineOut())
# define ui inputs if non gui mode was used
self.shot_num = self.ti_index
log.debug(
"____ self.shot_num: {}".format(self.shot_num))
# ui_inputs data or default values if gui was not used
self.rename = self.ui_inputs.get(
"clipRename", {}).get("value") or self.rename_default
self.clip_name = self.ui_inputs.get(
"clipName", {}).get("value") or self.clip_name_default
self.hierarchy = self.ui_inputs.get(
"hierarchy", {}).get("value") or self.hierarchy_default
self.hierarchy_data = self.ui_inputs.get(
"hierarchyData", {}).get("value") or \
self.track_item_default_data.copy()
self.count_from = self.ui_inputs.get(
"countFrom", {}).get("value") or self.count_from_default
self.count_steps = self.ui_inputs.get(
"countSteps", {}).get("value") or self.count_steps_default
self.subset_name = self.ui_inputs.get(
"subsetName", {}).get("value") or self.subset_name_default
self.subset_family = self.ui_inputs.get(
"subsetFamily", {}).get("value") or self.subset_family_default
self.vertical_sync = self.ui_inputs.get(
"vSyncOn", {}).get("value") or self.vertical_sync_default
self.driving_layer = self.ui_inputs.get(
"vSyncTrack", {}).get("value") or self.driving_layer_default
self.review_track = self.ui_inputs.get(
"reviewTrack", {}).get("value") or self.review_track_default
self.audio = self.ui_inputs.get(
"audio", {}).get("value") or False
# build subset name from layer name
if self.subset_name == "<track_name>":
self.subset_name = self.track_name
# create subset for publishing
self.subset = self.subset_family + self.subset_name.capitalize()
def _replace_hash_to_expression(self, name, text):
""" Replace hash with number in correct padding. """
_spl = text.split("#")
_len = (len(_spl) - 1)
_repl = "{{{0}:0>{1}}}".format(name, _len)
new_text = text.replace(("#" * _len), _repl)
return new_text
def _convert_to_tag_data(self):
""" Convert internal data to tag data.
Populating the tag data into internal variable self.tag_data
"""
# define vertical sync attributes
master_layer = True
self.review_layer = ""
if self.vertical_sync:
# check if track name is not in driving layer
if self.track_name not in self.driving_layer:
# if it is not then define vertical sync as None
master_layer = False
# increasing steps by index of rename iteration
self.count_steps *= self.rename_index
hierarchy_formating_data = dict()
_data = self.track_item_default_data.copy()
if self.ui_inputs:
# adding tag metadata from ui
for _k, _v in self.ui_inputs.items():
if _v["target"] == "tag":
self.tag_data[_k] = _v["value"]
# driving layer is set as positive match
if master_layer or self.vertical_sync:
# mark review layer
if self.review_track and (
self.review_track not in self.review_track_default):
# if review layer is defined and not the same as defalut
self.review_layer = self.review_track
# shot num calculate
if self.rename_index == 0:
self.shot_num = self.count_from
else:
self.shot_num = self.count_from + self.count_steps
# clip name sequence number
_data.update({"shot": self.shot_num})
# solve # in test to pythonic expression
for _k, _v in self.hierarchy_data.items():
if "#" not in _v["value"]:
continue
self.hierarchy_data[
_k]["value"] = self._replace_hash_to_expression(
_k, _v["value"])
# fill up pythonic expresisons in hierarchy data
for k, _v in self.hierarchy_data.items():
hierarchy_formating_data[k] = _v["value"].format(**_data)
else:
# if no gui mode then just pass default data
hierarchy_formating_data = self.hierarchy_data
tag_hierarchy_data = self._solve_tag_hierarchy_data(
hierarchy_formating_data
)
if master_layer and self.vertical_sync:
tag_hierarchy_data.update({"masterLayer": True})
self.vertical_clip_match.update({
(self.clip_in, self.clip_out): tag_hierarchy_data
})
if not master_layer and self.vertical_sync:
# driving layer is set as negative match
for (_in, _out), master_data in self.vertical_clip_match.items():
master_data.update({
"masterLayer": False,
"review": False,
"audio": False
})
if _in == self.clip_in and _out == self.clip_out:
data_subset = master_data["subset"]
# add track index in case duplicity of names in master data
if self.subset in data_subset:
master_data["subset"] = self.subset + str(
self.track_index)
# in case track name and subset name is the same then add
if self.subset_name == self.track_name:
master_data["subset"] = self.subset
# assing data to return hierarchy data to tag
tag_hierarchy_data = master_data
# add data to return data dict
self.tag_data.update(tag_hierarchy_data)
if master_layer and self.review_layer:
self.tag_data.update({"review": self.review_layer})
def _solve_tag_hierarchy_data(self, hierarchy_formating_data):
""" Solve tag data from hierarchy data and templates. """
# fill up clip name and hierarchy keys
hierarchy_filled = self.hierarchy.format(**hierarchy_formating_data)
clip_name_filled = self.clip_name.format(**hierarchy_formating_data)
return {
"newClipName": clip_name_filled,
"hierarchy": hierarchy_filled,
"parents": self.parents,
"hierarchyData": hierarchy_formating_data,
"subset": self.subset,
"family": self.subset_family,
"families": [self.data["family"]]
}
def _convert_to_entity(self, key):
""" Converting input key to key with type. """
# convert to entity type
entity_type = self.types.get(key, None)
assert entity_type, "Missing entity type for `{}`".format(
key
)
return {
"entity_type": entity_type,
"entity_name": self.hierarchy_data[key]["value"].format(
**self.track_item_default_data
)
}
def _create_parents(self):
""" Create parents and return it in list. """
self.parents = list()
patern = re.compile(self.parents_search_patern)
par_split = [patern.findall(t).pop()
for t in self.hierarchy.split("/")]
for key in par_split:
parent = self._convert_to_entity(key)
self.parents.append(parent)

View file

@ -0,0 +1,26 @@
QWidget {
font-size: 13px;
}
QSpinBox {
padding: 2;
max-width: 8em;
}
QLineEdit {
padding: 2;
min-width: 15em;
}
QVBoxLayout {
min-width: 15em;
background-color: #201f1f;
}
QComboBox {
min-width: 8em;
}
#sectionContent {
background-color: #2E2D2D;
}

View file

@ -1,261 +0,0 @@
{
"Hierarchy": {
"editable": "1",
"note": "{folder}/{sequence}/{shot}",
"icon": {
"path": "hierarchy.png"
},
"metadata": {
"folder": "FOLDER_NAME",
"shot": "{clip}",
"track": "{track}",
"sequence": "{sequence}",
"episode": "EPISODE_NAME",
"root": "{projectroot}"
}
},
"Source Resolution": {
"editable": "1",
"note": "Use source resolution",
"icon": {
"path": "resolution.png"
},
"metadata": {
"family": "resolution"
}
},
"Retiming": {
"editable": "1",
"note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)",
"icon": {
"path": "retiming.png"
},
"metadata": {
"family": "retiming",
"marginIn": 1,
"marginOut": 1
}
},
"Frame start": {
"editable": "1",
"note": "Starting frame for comps. \n\n> Use `value` and add either number or write `source` (if you want to preserve source frame numbering)",
"icon": {
"path": "icons:TagBackground.png"
},
"metadata": {
"family": "frameStart",
"value": "1001"
}
},
"[Lenses]": {
"Set lense here": {
"editable": "1",
"note": "Adjust parameters of your lense and then drop to clip. Remember! You can always overwrite on clip",
"icon": {
"path": "lense.png"
},
"metadata": {
"focalLengthMm": 57
}
}
},
"[Subsets]": {
"Audio": {
"editable": "1",
"note": "Export with Audio",
"icon": {
"path": "volume.png"
},
"metadata": {
"family": "audio",
"subset": "main"
}
},
"plateFg": {
"editable": "1",
"note": "Add to publish to \"forground\" subset. Change metadata subset name if different order number",
"icon": {
"path": "z_layer_fg.png"
},
"metadata": {
"family": "plate",
"subset": "Fg01"
}
},
"plateBg": {
"editable": "1",
"note": "Add to publish to \"background\" subset. Change metadata subset name if different order number",
"icon": {
"path": "z_layer_bg.png"
},
"metadata": {
"family": "plate",
"subset": "Bg01"
}
},
"plateRef": {
"editable": "1",
"note": "Add to publish to \"reference\" subset.",
"icon": {
"path": "icons:Reference.png"
},
"metadata": {
"family": "plate",
"subset": "Ref"
}
},
"plateMain": {
"editable": "1",
"note": "Add to publish to \"main\" subset.",
"icon": {
"path": "z_layer_main.png"
},
"metadata": {
"family": "plate",
"subset": "main"
}
},
"plateProxy": {
"editable": "1",
"note": "Add to publish to \"proxy\" subset.",
"icon": {
"path": "z_layer_main.png"
},
"metadata": {
"family": "plate",
"subset": "proxy"
}
},
"review": {
"editable": "1",
"note": "Upload to Ftrack as review component.",
"icon": {
"path": "review.png"
},
"metadata": {
"family": "review",
"track": "review"
}
}
},
"[Handles]": {
"start: add 20 frames": {
"editable": "1",
"note": "Adding frames to start of selected clip",
"icon": {
"path": "3_add_handles_start.png"
},
"metadata": {
"family": "handles",
"value": "20",
"args": "{'op':'add','where':'start'}"
}
},
"start: add 10 frames": {
"editable": "1",
"note": "Adding frames to start of selected clip",
"icon": {
"path": "3_add_handles_start.png"
},
"metadata": {
"family": "handles",
"value": "10",
"args": "{'op':'add','where':'start'}"
}
},
"start: add 5 frames": {
"editable": "1",
"note": "Adding frames to start of selected clip",
"icon": {
"path": "3_add_handles_start.png"
},
"metadata": {
"family": "handles",
"value": "5",
"args": "{'op':'add','where':'start'}"
}
},
"start: add 0 frames": {
"editable": "1",
"note": "Adding frames to start of selected clip",
"icon": {
"path": "3_add_handles_start.png"
},
"metadata": {
"family": "handles",
"value": "0",
"args": "{'op':'add','where':'start'}"
}
},
"end: add 20 frames": {
"editable": "1",
"note": "Adding frames to end of selected clip",
"icon": {
"path": "1_add_handles_end.png"
},
"metadata": {
"family": "handles",
"value": "20",
"args": "{'op':'add','where':'end'}"
}
},
"end: add 10 frames": {
"editable": "1",
"note": "Adding frames to end of selected clip",
"icon": {
"path": "1_add_handles_end.png"
},
"metadata": {
"family": "handles",
"value": "10",
"args": "{'op':'add','where':'end'}"
}
},
"end: add 5 frames": {
"editable": "1",
"note": "Adding frames to end of selected clip",
"icon": {
"path": "1_add_handles_end.png"
},
"metadata": {
"family": "handles",
"value": "5",
"args": "{'op':'add','where':'end'}"
}
},
"end: add 0 frames": {
"editable": "1",
"note": "Adding frames to end of selected clip",
"icon": {
"path": "1_add_handles_end.png"
},
"metadata": {
"family": "handles",
"value": "0",
"args": "{'op':'add','where':'end'}"
}
}
},
"NukeScript": {
"editable": "1",
"note": "Collecting track items to Nuke scripts.",
"icon": {
"path": "icons:TagNuke.png"
},
"metadata": {
"family": "nukescript",
"subset": "main"
}
},
"Comment": {
"editable": "1",
"note": "Comment on a shot.",
"icon": {
"path": "icons:TagComment.png"
},
"metadata": {
"family": "comment",
"subset": "main"
}
}
}

View file

@ -1,66 +1,107 @@
import re
import os
import json
import hiero
from pprint import pformat
from pype.api import Logger
from avalon import io
log = Logger().get_logger(__name__)
log = Logger().get_logger(__name__, "hiero")
def tag_data():
current_dir = os.path.dirname(__file__)
json_path = os.path.join(current_dir, "tags.json")
with open(json_path, "r") as json_stream:
data = json.load(json_stream)
return data
return {
"Retiming": {
"editable": "1",
"note": "Clip has retime or TimeWarp effects (or multiple effects stacked on the clip)", # noqa
"icon": "retiming.png",
"metadata": {
"family": "retiming",
"marginIn": 1,
"marginOut": 1
}
},
"[Lenses]": {
"Set lense here": {
"editable": "1",
"note": "Adjust parameters of your lense and then drop to clip. Remember! You can always overwrite on clip", # noqa
"icon": "lense.png",
"metadata": {
"focalLengthMm": 57
}
}
},
"NukeScript": {
"editable": "1",
"note": "Collecting track items to Nuke scripts.",
"icon": "icons:TagNuke.png",
"metadata": {
"family": "nukescript",
"subset": "main"
}
},
"Comment": {
"editable": "1",
"note": "Comment on a shot.",
"icon": "icons:TagComment.png",
"metadata": {
"family": "comment",
"subset": "main"
}
}
}
def create_tag(key, value):
def create_tag(key, data):
"""
Creating Tag object.
Args:
key (str): name of tag
value (dict): parameters of tag
data (dict): parameters of tag
Returns:
object: Tag object
"""
tag = hiero.core.Tag(str(key))
return update_tag(tag, value)
return update_tag(tag, data)
def update_tag(tag, value):
def update_tag(tag, data):
"""
Fixing Tag object.
Args:
tag (obj): Tag object
value (dict): parameters of tag
data (dict): parameters of tag
"""
tag.setNote(value["note"])
tag.setIcon(str(value["icon"]["path"]))
# set icon if any available in input data
if data.get("icon"):
tag.setIcon(str(data["icon"]))
# set note description of tag
tag.setNote(data["note"])
# get metadata of tag
mtd = tag.metadata()
pres_mtd = value.get("metadata", None)
if pres_mtd:
[mtd.setValue("tag.{}".format(str(k)), str(v))
for k, v in pres_mtd.items()]
# get metadata key from data
data_mtd = data.get("metadata", {})
# set all data metadata to tag metadata
for k, v in data_mtd.items():
mtd.setValue(
"tag.{}".format(str(k)),
str(v)
)
return tag
def add_tags_from_presets():
def add_tags_to_workfile():
"""
Will create default tags from presets.
"""
project = hiero.core.projects()[-1]
from .lib import get_current_project
# get project and root bin object
project = get_current_project()
root_bin = project.tagsBin()
if "Tag Presets" in project.name():
return
@ -73,7 +114,7 @@ def add_tags_from_presets():
# Get project task types.
tasks = io.find_one({"type": "project"})["config"]["tasks"]
nks_pres_tags["[Tasks]"] = {}
log.debug("__ tasks: {}".format(pformat(tasks)))
log.debug("__ tasks: {}".format(tasks))
for task_type in tasks.keys():
nks_pres_tags["[Tasks]"][task_type.lower()] = {
"editable": "1",
@ -104,24 +145,32 @@ def add_tags_from_presets():
}
}
# get project and root bin object
project = hiero.core.projects()[-1]
root_bin = project.tagsBin()
# loop trough tag data dict and create deep tag structure
for _k, _val in nks_pres_tags.items():
# check if key is not decorated with [] so it is defined as bin
bin_find = None
pattern = re.compile(r"\[(.*)\]")
bin_find = pattern.findall(_k)
bin_finds = pattern.findall(_k)
# if there is available any then pop it to string
if bin_finds:
bin_find = bin_finds.pop()
# if bin was found then create or update
if bin_find:
# check what is in root bin
root_add = False
# first check if in root lever is not already created bins
bins = [b for b in root_bin.items()
if b.name() in str(bin_find[0])]
if b.name() in str(bin_find)]
log.debug(">>> bins: {}".format(bins))
if bins:
bin = bins[0]
bin = bins.pop()
else:
# create Bin object
bin = hiero.core.Bin(str(bin_find[0]))
root_add = True
# create Bin object for processing
bin = hiero.core.Bin(str(bin_find))
# update or create tags in the bin
for k, v in _val.items():
tags = [t for t in bin.items()
if str(k) in t.name()
@ -133,13 +182,15 @@ def add_tags_from_presets():
# adding Tag to Bin
bin.addItem(tag)
else:
update_tag(tags[0], v)
update_tag(tags.pop(), v)
if not bins:
# finally add the Bin object to the root level Bin
if root_add:
# adding Tag to Root Bin
root_bin.addItem(bin)
else:
# for Tags to be created in root level Bin
# at first check if any of input data tag is not already created
tags = None
tags = [t for t in root_bin.items()
if str(_k) in t.name()]
@ -151,16 +202,18 @@ def add_tags_from_presets():
# adding Tag to Root Bin
root_bin.addItem(tag)
else:
# check if Hierarchy in name
# update Tag if already exists
# update Tags if they already exists
for _t in tags:
# skip bin objects
if isinstance(_t, hiero.core.Bin):
continue
# check if Hierarchy in name and skip it
# because hierarchy could be edited
if "hierarchy" in _t.name().lower():
continue
# update only non hierarchy tags
# because hierarchy could be edited
update_tag(_t, _val)
log.info("Default Tags were set...")

View file

@ -4,7 +4,7 @@ from avalon import api
from pype.api import Logger
log = Logger().get_logger(__name__)
log = Logger().get_logger(__name__, "hiero")
def file_extensions():

View file

@ -1,14 +0,0 @@
import pyblish.api
class CollectSubmission(pyblish.api.ContextPlugin):
"""Collect submisson children."""
order = pyblish.api.CollectorOrder - 0.1
def process(self, context):
import hiero
if hasattr(hiero, "submission"):
context.data["submission"] = hiero.submission
self.log.debug("__ submission: {}".format(context.data["submission"]))

View file

@ -1,15 +0,0 @@
import pyblish.api
import pype.api as pype
class CollectWorkfileVersion(pyblish.api.ContextPlugin):
"""Inject the current working file version into context"""
order = pyblish.api.CollectorOrder - 0.1
label = "Collect workfile version"
def process(self, context):
project = context.data('activeProject')
path = project.path()
context.data["version"] = int(pype.get_version_from_path(path))
self.log.info("version: {}".format(context.data["version"]))

View file

@ -1,30 +0,0 @@
from pyblish import api
import os
import time
class ExtractPlateCheck(api.ContextPlugin):
"""Collect all Track items selection."""
order = api.ExtractorOrder + 0.01
label = "Plates Export Waiting"
hosts = ["hiero"]
families = ["encode"]
def process(self, context):
plate_path = context.data.get("platesCheck", None)
self.log.info("Chacking plate: `{}`".format(plate_path))
if not plate_path:
return
while not os.path.exists(plate_path):
self.log.info("Waiting for plates to be rendered")
time.sleep(5)
if os.path.isfile(plate_path):
self.log.info("Plates were rendered: `{}`".format(plate_path))
else:
raise ValueError("%s isn't a file!" % plate_path)

View file

@ -1,124 +0,0 @@
from pyblish import api
class ExtractTasks(api.InstancePlugin):
"""Extract tasks."""
order = api.ExtractorOrder
label = "Tasks"
hosts = ["hiero"]
families = ["clip"]
optional = True
def filelink(self, src, dst):
import filecmp
import os
import shutil
import filelink
# Compare files to check whether they are the same.
if os.path.exists(dst) and filecmp.cmp(src, dst):
return
# Remove existing destination file.
if os.path.exists(dst):
os.remove(dst)
try:
filelink.create(src, dst, filelink.HARDLINK)
self.log.debug("Linking: \"{0}\" to \"{1}\"".format(src, dst))
except WindowsError as e:
if e.winerror == 17:
self.log.warning(
"File linking failed due to: \"{0}\". "
"Resorting to copying instead.".format(e)
)
shutil.copy(src, dst)
else:
raise e
def process(self, instance):
import time
import os
import hiero.core.nuke as nuke
import hiero.exporters as he
import clique
task = instance.data["task"]
hiero_cls = he.FnSymLinkExporter.SymLinkExporter
if isinstance(task, hiero_cls):
src = os.path.join(
task.filepath(),
task.fileName()
)
# Filelink each image file
if "img" in instance.data["families"]:
collection = clique.parse(src + " []")
for f in os.listdir(os.path.dirname(src)):
f = os.path.join(os.path.dirname(src), f)
frame_offset = task.outputRange()[0] - task.inputRange()[0]
input_range = (
int(task.inputRange()[0]), int(task.inputRange()[1]) + 1
)
for index in range(*input_range):
dst = task.resolvedExportPath() % (index + frame_offset)
self.filelink(src % index, dst)
# Filelink movie file
if "mov" in instance.data["families"]:
dst = task.resolvedExportPath()
self.filelink(src, dst)
hiero_cls = he.FnTranscodeExporter.TranscodeExporter
if isinstance(task, hiero_cls):
task.startTask()
while task.taskStep():
time.sleep(1)
script_path = task._scriptfile
log_path = script_path.replace(".nk", ".log")
log_file = open(log_path, "w")
process = nuke.executeNukeScript(script_path, log_file, True)
self.poll(process)
log_file.close()
if not task._preset.properties()["keepNukeScript"]:
os.remove(script_path)
os.remove(log_path)
hiero_cls = he.FnNukeShotExporter.NukeShotExporter
if isinstance(task, hiero_cls):
task.startTask()
while task.taskStep():
time.sleep(1)
hiero_cls = he.FnAudioExportTask.AudioExportTask
if isinstance(task, hiero_cls):
task.startTask()
while task.taskStep():
time.sleep(1)
# Fill collection with output
if "img" in instance.data["families"]:
collection = instance.data["collection"]
path = os.path.dirname(collection.format())
for f in os.listdir(path):
file_path = os.path.join(path, f).replace("\\", "/")
if collection.match(file_path):
collection.add(file_path)
def poll(self, process):
import time
returnCode = process.poll()
# if the return code hasn't been set, Nuke is still running
if returnCode is None:
time.sleep(1)
self.poll(process)

View file

@ -1,26 +0,0 @@
- tags get tasks
- collect_subset(instance):
- gets presets for subset by tasks
- creates instances for comp .nk, plates (main instance converted to plates)
- add families:
- .nk compositing script [workfile, ftrack]
- plates [plates]
- audio [audio]
- extract_submit_frameserver(instance)
- families [plates]
- adds .nk script created only for encoding (plates write) no color correction
- adds .nk script created only for encoding (mov write)
- add .nk script created only for encoding (jpg, thumbnail)
- _______
- from hiero.ui.nuke_bridge import FnNsFrameServer
- FnNsFrameServer.renderFrames(nks, "1-10", "Write_exr", ["main"])
- dict(script(str), framerange(str), writeNode(str), views(list))
# next step ########################################################
- submit_exporting_task(instance)
- families [workfile]
- create compositing scripts
- create inventory containers for Reads
- create publishable write nodes

View file

@ -1,40 +0,0 @@
from pyblish import api
class RepairProjectRoot(api.Action):
label = "Repair"
icon = "wrench"
on = "failed"
def process(self, context, plugin):
import os
project_root = os.path.join(
os.path.dirname(context.data["currentFile"])
)
context.data["activeProject"].setProjectRoot(project_root)
class ValidateProjectRoot(api.ContextPlugin):
"""Validate the project root to the workspace directory."""
order = api.ValidatorOrder
label = "Project Root"
hosts = ["hiero"]
actions = [RepairProjectRoot]
def process(self, context):
import os
workspace = os.path.join(
os.path.dirname(context.data["currentFile"])
)
project_root = context.data["activeProject"].projectRoot()
failure_message = (
'The project root needs to be "{0}", its currently: "{1}"'
).format(workspace, project_root)
assert project_root == workspace, failure_message

View file

@ -1,27 +0,0 @@
from pyblish import api
class ValidateResolvedPaths(api.ContextPlugin):
"""Validate there are no overlapping resolved paths."""
order = api.ValidatorOrder
label = "Resolved Paths"
hosts = ["hiero"]
def process(self, context):
import os
import collections
paths = []
for instance in context:
if "trackItem.task" == instance.data["family"]:
paths.append(
os.path.abspath(instance.data["task"].resolvedExportPath())
)
duplicates = []
for item, count in collections.Counter(paths).items():
if count > 1:
duplicates.append(item)
msg = "Duplicate output paths found: {0}".format(duplicates)
assert not duplicates, msg

View file

@ -1,57 +0,0 @@
from pyblish import api
class ValidateOutputRange(api.InstancePlugin):
"""Validate the output range of the task.
This compares the output range and clip associated with the task, so see
whether there is a difference. This difference indicates that the user has
selected to export the clip length for the task which is very uncommon to
do.
"""
order = api.ValidatorOrder
families = ["trackItem.task"]
label = "Output Range"
hosts = ["hiero"]
optional = True
def process(self, instance):
task = instance.data["task"]
item = instance.data["parent"]
output_range = task.outputRange()
first_frame = int(item.data["item"].source().sourceIn())
last_frame = int(item.data["item"].source().sourceOut())
clip_duration = last_frame - first_frame + 1
difference = clip_duration - output_range[1]
failure_message = (
'Looks like you are rendering the clip length for the task '
'rather than the cut length. If this is intended, just uncheck '
'this validator after resetting, else adjust the export range in '
'the "Handles" section of the export dialog.'
)
assert difference, failure_message
class ValidateImageSequence(api.InstancePlugin):
"""Validate image sequence output path is setup correctly."""
order = api.ValidatorOrder
families = ["trackItem.task", "img"]
match = api.Subset
label = "Image Sequence"
hosts = ["hiero"]
optional = True
def process(self, instance):
resolved_path = instance.data["task"].resolvedExportPath()
msg = (
"Image sequence output is missing a padding. Please add \"####\" "
"or \"%04d\" to the output templates."
)
assert "#" in resolved_path or "%" in resolved_path, msg

View file

@ -1,46 +0,0 @@
from pyblish import api
class ValidateClip(api.InstancePlugin):
"""Validate the track item to the sequence.
Exact matching to optimize processing.
"""
order = api.ValidatorOrder
families = ["clip"]
# match = api.Exact
label = "Validate Track Item"
hosts = ["hiero"]
optional = True
def process(self, instance):
item = instance.data["item"]
self.log.info("__ item: {}".format(item))
media_source = item.source().mediaSource()
self.log.info("__ media_source: {}".format(media_source))
msg = (
'A setting does not match between track item "{0}" and sequence '
'"{1}".'.format(item.name(), item.sequence().name()) +
'\n\nSetting: "{0}".''\n\nTrack item: "{1}".\n\nSequence: "{2}".'
)
# Validate format settings.
fmt = item.sequence().format()
assert fmt.width() == media_source.width(), msg.format(
"width", fmt.width(), media_source.width()
)
assert fmt.height() == media_source.height(), msg.format(
"height", fmt.height(), media_source.height()
)
assert fmt.pixelAspect() == media_source.pixelAspect(), msg.format(
"pixelAspect", fmt.pixelAspect(), media_source.pixelAspect()
)
# Validate framerate setting.
sequence = item.sequence()
source_framerate = media_source.metadata()["foundry.source.framerate"]
assert sequence.framerate() == source_framerate, msg.format(
"framerate", source_framerate, sequence.framerate()
)

View file

@ -1,21 +0,0 @@
from pyblish import api
class ValidateViewerLut(api.ContextPlugin):
"""Validate viewer lut in Hiero is the same as in Nuke."""
order = api.ValidatorOrder
label = "Viewer LUT"
hosts = ["hiero"]
optional = True
def process(self, context):
import nuke
import hiero
# nuke_lut = nuke.ViewerProcess.node()["current"].value()
hiero_lut = context.data["activeProject"].lutSettingViewer()
self.log.info("__ hiero_lut: {}".format(hiero_lut))
msg = "Viewer LUT can only be RGB"
assert "RGB" in hiero_lut, msg

View file

@ -0,0 +1,254 @@
import pype.hosts.hiero.api as phiero
# from pype.hosts.hiero.api import plugin, lib
# reload(lib)
# reload(plugin)
# reload(phiero)
class CreateShotClip(phiero.Creator):
"""Publishable clip"""
label = "Create Publishable Clip"
family = "clip"
icon = "film"
defaults = ["Main"]
gui_tracks = [track.name()
for track in phiero.get_current_sequence().videoTracks()]
gui_name = "Pype publish attributes creator"
gui_info = "Define sequential rename and fill hierarchy data."
gui_inputs = {
"renameHierarchy": {
"type": "section",
"label": "Shot Hierarchy And Rename Settings",
"target": "ui",
"order": 0,
"value": {
"hierarchy": {
"value": "{folder}/{sequence}",
"type": "QLineEdit",
"label": "Shot Parent Hierarchy",
"target": "tag",
"toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa
"order": 0},
"clipRename": {
"value": False,
"type": "QCheckBox",
"label": "Rename clips",
"target": "ui",
"toolTip": "Renaming selected clips on fly", # noqa
"order": 1},
"clipName": {
"value": "{sequence}{shot}",
"type": "QLineEdit",
"label": "Clip Name Template",
"target": "ui",
"toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa
"order": 2},
"countFrom": {
"value": 10,
"type": "QSpinBox",
"label": "Count sequence from",
"target": "ui",
"toolTip": "Set when the sequence number stafrom", # noqa
"order": 3},
"countSteps": {
"value": 10,
"type": "QSpinBox",
"label": "Stepping number",
"target": "ui",
"toolTip": "What number is adding every new step", # noqa
"order": 4},
}
},
"hierarchyData": {
"type": "dict",
"label": "Shot Template Keywords",
"target": "tag",
"order": 1,
"value": {
"folder": {
"value": "shots",
"type": "QLineEdit",
"label": "{folder}",
"target": "tag",
"toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 0},
"episode": {
"value": "ep01",
"type": "QLineEdit",
"label": "{episode}",
"target": "tag",
"toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 1},
"sequence": {
"value": "sq01",
"type": "QLineEdit",
"label": "{sequence}",
"target": "tag",
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 2},
"track": {
"value": "{_track_}",
"type": "QLineEdit",
"label": "{track}",
"target": "tag",
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 3},
"shot": {
"value": "sh###",
"type": "QLineEdit",
"label": "{shot}",
"target": "tag",
"toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 4}
}
},
"verticalSync": {
"type": "section",
"label": "Vertical Synchronization Of Attributes",
"target": "ui",
"order": 2,
"value": {
"vSyncOn": {
"value": True,
"type": "QCheckBox",
"label": "Enable Vertical Sync",
"target": "ui",
"toolTip": "Switch on if you want clips above each other to share its attributes", # noqa
"order": 0},
"vSyncTrack": {
"value": gui_tracks, # noqa
"type": "QComboBox",
"label": "Master track",
"target": "ui",
"toolTip": "Select driving track name which should be mastering all others", # noqa
"order": 1}
}
},
"publishSettings": {
"type": "section",
"label": "Publish Settings",
"target": "ui",
"order": 3,
"value": {
"subsetName": {
"value": ["<track_name>", "main", "bg", "fg", "bg",
"animatic"],
"type": "QComboBox",
"label": "Subset Name",
"target": "ui",
"toolTip": "chose subset name patern, if <track_name> is selected, name of track layer will be used", # noqa
"order": 0},
"subsetFamily": {
"value": ["plate", "take"],
"type": "QComboBox",
"label": "Subset Family",
"target": "ui", "toolTip": "What use of this subset is for", # noqa
"order": 1},
"reviewTrack": {
"value": ["< none >"] + gui_tracks,
"type": "QComboBox",
"label": "Use Review Track",
"target": "ui",
"toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa
"order": 2},
"audio": {
"value": False,
"type": "QCheckBox",
"label": "Include audio",
"target": "tag",
"toolTip": "Process subsets with corresponding audio", # noqa
"order": 3},
"sourceResolution": {
"value": False,
"type": "QCheckBox",
"label": "Source resolution",
"target": "tag",
"toolTip": "Is resloution taken from timeline or source?", # noqa
"order": 4},
}
},
"frameRangeAttr": {
"type": "section",
"label": "Shot Attributes",
"target": "ui",
"order": 4,
"value": {
"workfileFrameStart": {
"value": 1001,
"type": "QSpinBox",
"label": "Workfiles Start Frame",
"target": "tag",
"toolTip": "Set workfile starting frame number", # noqa
"order": 0},
"handleStart": {
"value": 0,
"type": "QSpinBox",
"label": "Handle Start",
"target": "tag",
"toolTip": "Handle at start of clip", # noqa
"order": 1},
"handleEnd": {
"value": 0,
"type": "QSpinBox",
"label": "Handle End",
"target": "tag",
"toolTip": "Handle at end of clip", # noqa
"order": 2},
}
}
}
presets = None
def process(self):
# get key pares from presets and match it on ui inputs
for k, v in self.gui_inputs.items():
if v["type"] in ("dict", "section"):
# nested dictionary (only one level allowed
# for sections and dict)
for _k, _v in v["value"].items():
if self.presets.get(_k):
self.gui_inputs[k][
"value"][_k]["value"] = self.presets[_k]
if self.presets.get(k):
self.gui_inputs[k]["value"] = self.presets[k]
# open widget for plugins inputs
widget = self.widget(self.gui_name, self.gui_info, self.gui_inputs)
widget.exec_()
if len(self.selected) < 1:
return
if not widget.result:
print("Operation aborted")
return
self.rename_add = 0
# get ui output for track name for vertical sync
v_sync_track = widget.result["vSyncTrack"]["value"]
# sort selected trackItems by
sorted_selected_track_items = list()
unsorted_selected_track_items = list()
for _ti in self.selected:
if _ti.parent().name() in v_sync_track:
sorted_selected_track_items.append(_ti)
else:
unsorted_selected_track_items.append(_ti)
sorted_selected_track_items.extend(unsorted_selected_track_items)
kwargs = {
"ui_inputs": widget.result,
"avalon": self.data
}
for i, track_item in enumerate(sorted_selected_track_items):
self.rename_index = i
# convert track item to timeline media pool item
phiero.PublishClip(self, track_item, **kwargs).convert()

View file

@ -0,0 +1,167 @@
from avalon import io, api
import pype.hosts.hiero.api as phiero
# reload(phiero)
class LoadClip(phiero.SequenceLoader):
"""Load a subset to timeline as clip
Place clip to timeline on its asset origin timings collected
during conforming to project
"""
families = ["render2d", "source", "plate", "render", "review"]
representations = ["exr", "dpx", "jpg", "jpeg", "png", "h264"]
label = "Load as clip"
order = -10
icon = "code-fork"
color = "orange"
# for loader multiselection
sequence = None
track = None
# presets
clip_color_last = "green"
clip_color = "red"
def load(self, context, name, namespace, options):
# in case loader uses multiselection
if self.track and self.sequence:
options.update({
"sequence": self.sequence,
"track": self.track
})
# load clip to timeline and get main variables
track_item = phiero.ClipLoader(self, context, **options).load()
namespace = namespace or track_item.name()
version = context['version']
version_data = version.get("data", {})
version_name = version.get("name", None)
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
# add additional metadata from the version to imprint Avalon knob
add_keys = [
"frameStart", "frameEnd", "source", "author",
"fps", "handleStart", "handleEnd"
]
# move all version data keys to tag data
data_imprint = {}
for key in add_keys:
data_imprint.update({
key: version_data.get(key, str(None))
})
# add variables related to version context
data_imprint.update({
"version": version_name,
"colorspace": colorspace,
"objectName": object_name
})
# update color of clip regarding the version order
self.set_item_color(track_item, version)
# deal with multiselection
self.multiselection(track_item)
self.log.info("Loader done: `{}`".format(name))
return phiero.containerise(
track_item,
name, namespace, context,
self.__class__.__name__,
data_imprint)
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
""" Updating previously loaded clips
"""
# load clip to timeline and get main variables
name = container['name']
namespace = container['namespace']
track_item = phiero.get_track_items(
track_item_name=namespace)
version = io.find_one({
"type": "version",
"_id": representation["parent"]
})
version_data = version.get("data", {})
version_name = version.get("name", None)
colorspace = version_data.get("colorspace", None)
object_name = "{}_{}".format(name, namespace)
file = api.get_representation_path(representation).replace("\\", "/")
# reconnect media to new path
track_item.source().reconnectMedia(file)
# add additional metadata from the version to imprint Avalon knob
add_keys = [
"frameStart", "frameEnd", "source", "author",
"fps", "handleStart", "handleEnd"
]
# move all version data keys to tag data
data_imprint = {}
for key in add_keys:
data_imprint.update({
key: version_data.get(key, str(None))
})
# add variables related to version context
data_imprint.update({
"representation": str(representation["_id"]),
"version": version_name,
"colorspace": colorspace,
"objectName": object_name
})
# update color of clip regarding the version order
self.set_item_color(track_item, version)
return phiero.update_container(track_item, data_imprint)
def remove(self, container):
""" Removing previously loaded clips
"""
# load clip to timeline and get main variables
namespace = container['namespace']
track_item = phiero.get_track_items(
track_item_name=namespace)
track = track_item.parent()
# remove track item from track
track.removeItem(track_item)
@classmethod
def multiselection(cls, track_item):
if not cls.track:
cls.track = track_item.parent()
cls.sequence = cls.track.parent()
@classmethod
def set_item_color(cls, track_item, version):
# define version name
version_name = version.get("name", None)
# get all versions in list
versions = io.find({
"type": "version",
"parent": version["parent"]
}).distinct('name')
max_version = max(versions)
# set clip colour
if version_name == max_version:
track_item.source().binItem().setColor(cls.clip_color_last)
else:
track_item.source().binItem().setColor(cls.clip_color)

View file

@ -1,49 +0,0 @@
from avalon import api
import hiero
from pype.hosts.hiero.api import lib
reload(lib)
class LoadSequencesToTimelineAssetOrigin(api.Loader):
"""Load image sequence into Hiero timeline
Place clip to timeline on its asset origin timings collected
during conforming to project
"""
families = ["render2d", "source", "plate", "render"]
representations = ["exr", "dpx", "jpg", "jpeg", "png"]
label = "Load to timeline with shot origin timing"
order = -10
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, data):
data.update({
# "projectBinPath": "Loaded",
"hieroWorkfileName": hiero.ui.activeProject().name()
})
self.log.debug("_ context: `{}`".format(context))
self.log.debug("_ representation._id: `{}`".format(
context["representation"]["_id"]))
clip_loader = lib.ClipLoader(self, context, **data)
clip_loader.load()
self.log.info("Loader done: `{}`".format(name))
def switch(self, container, representation):
self.update(container, representation)
def update(self, container, representation):
""" Updating previously loaded clips
"""
pass
def remove(self, container):
""" Removing previously loaded clips
"""
pass

View file

@ -1,14 +0,0 @@
import pyblish.api
class CollectActiveProject(pyblish.api.ContextPlugin):
"""Inject the active project into context"""
label = "Collect Active Project"
order = pyblish.api.CollectorOrder - 0.2
def process(self, context):
import hiero
context.data["activeProject"] = hiero.ui.activeSequence().project()
self.log.info("activeProject: {}".format(context.data["activeProject"]))

View file

@ -30,9 +30,12 @@ class CollectAssetBuilds(api.ContextPlugin):
# Exclude non-tagged instances.
tagged = False
asset_names = []
for tag in instance.data["tags"]:
family = dict(tag["metadata"]).get("tag.family", "")
if family.lower() == "assetbuild":
t_metadata = dict(tag.metadata())
t_family = t_metadata.get("tag.family", "")
if t_family.lower() == "assetbuild":
asset_names.append(tag["name"])
tagged = True

View file

@ -1,55 +0,0 @@
from pyblish import api
import os
class CollectAudio(api.InstancePlugin):
"""Collect audio from tags.
Tag is expected to have metadata:
{
"family": "audio",
"subset": "main"
}
"""
# Run just before CollectSubsets
order = api.CollectorOrder + 0.1021
label = "Collect Audio"
hosts = ["hiero"]
families = ["clip"]
def process(self, instance):
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
tag_data = dict(tag["metadata"])
family = tag_data.get("tag.family", "")
if family.lower() == "audio":
subset = tag_data.get("tag.subset", "Main")
tagged = True
if not tagged:
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"audio\"".format(instance)
)
return
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
data["family"] = "audio"
data["families"] = ["ftrack"]
data["subset"] = "audio" + subset.title()
data["source"] = data["sourcePath"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(data["sourcePath"])[
1]
)
self.log.debug("Creating instance with data: {}".format(data))
instance.context.create_instance(**data)

View file

@ -0,0 +1,164 @@
import re
import pyblish.api
class CollectClipEffects(pyblish.api.InstancePlugin):
"""Collect soft effects instances."""
order = pyblish.api.CollectorOrder - 0.508
label = "Collect Clip Effects Instances"
families = ["clip"]
def process(self, instance):
family = "effect"
effects = {}
review = instance.data["review"]
review_track_index = instance.context.data.get("reviewTrackIndex")
item = instance.data["item"]
# frame range
self.handle_start = instance.data["handleStart"]
self.handle_end = instance.data["handleEnd"]
self.clip_in = int(item.timelineIn())
self.clip_out = int(item.timelineOut())
self.clip_in_h = self.clip_in - self.handle_start
self.clip_out_h = self.clip_out + self.handle_end
track = instance.data["trackItem"]
track_index = track.trackIndex()
tracks_effect_items = instance.context.data.get("tracksEffectItems")
clip_effect_items = instance.data.get("clipEffectItems")
# add clips effects to track's:
if clip_effect_items:
tracks_effect_items[track_index] = clip_effect_items
# process all effects and devide them to instance
for _track_index, sub_track_items in tracks_effect_items.items():
# skip if track index is the same as review track index
if review and review_track_index == _track_index:
continue
for sitem in sub_track_items:
if not (track_index <= _track_index):
continue
effect = self.add_effect(_track_index, sitem)
if effect:
effects.update(effect)
# skip any without effects
if not effects:
return
subset = instance.data.get("subset")
effects.update({"assignTo": subset})
subset_split = re.findall(r'[A-Z][^A-Z]*', subset)
if len(subset_split) > 0:
root_name = subset.replace(subset_split[0], "")
subset_split.insert(0, root_name.capitalize())
subset_split.insert(0, "effect")
name = "".join(subset_split)
# create new instance and inherit data
data = {}
for key, value in instance.data.items():
if "clipEffectItems" in key:
continue
data[key] = value
# change names
data["subset"] = name
data["family"] = family
data["families"] = [family]
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {}".format(
data['asset'], data["subset"]
)
data["effects"] = effects
# create new instance
_instance = instance.context.create_instance(**data)
self.log.info("Created instance `{}`".format(_instance))
self.log.debug("instance.data `{}`".format(_instance.data))
def test_overlap(self, effect_t_in, effect_t_out):
covering_exp = bool(
(effect_t_in <= self.clip_in)
and (effect_t_out >= self.clip_out)
)
overlaying_right_exp = bool(
(effect_t_in < self.clip_out)
and (effect_t_out >= self.clip_out)
)
overlaying_left_exp = bool(
(effect_t_out > self.clip_in)
and (effect_t_in <= self.clip_in)
)
return any((
covering_exp,
overlaying_right_exp,
overlaying_left_exp
))
def add_effect(self, track_index, sitem):
track = sitem.parentTrack().name()
# node serialization
node = sitem.node()
node_serialized = self.node_serialisation(node)
node_name = sitem.name()
node_class = re.sub(r"\d+", "", node_name)
# collect timelineIn/Out
effect_t_in = int(sitem.timelineIn())
effect_t_out = int(sitem.timelineOut())
if not self.test_overlap(effect_t_in, effect_t_out):
return
self.log.debug("node_name: `{}`".format(node_name))
return {node_name: {
"class": node_class,
"timelineIn": effect_t_in,
"timelineOut": effect_t_out,
"subTrackIndex": sitem.subTrackIndex(),
"trackIndex": track_index,
"track": track,
"node": node_serialized
}}
def node_serialisation(self, node):
node_serialized = {}
# adding ignoring knob keys
_ignoring_keys = ['invert_mask', 'help', 'mask',
'xpos', 'ypos', 'layer', 'process_mask', 'channel',
'channels', 'maskChannelMask', 'maskChannelInput',
'note_font', 'note_font_size', 'unpremult',
'postage_stamp_frame', 'maskChannel', 'export_cc',
'select_cccid', 'mix', 'version', 'matrix']
# loop trough all knobs and collect not ignored
# and any with any value
for knob in node.knobs().keys():
# skip nodes in ignore keys
if knob in _ignoring_keys:
continue
# get animation if node is animated
if node[knob].isAnimated():
# grab animation including handles
knob_anim = [node[knob].getValueAt(i)
for i in range(
self.clip_in_h, self.clip_in_h + 1)]
node_serialized[knob] = knob_anim
else:
node_serialized[knob] = node[knob].value()
return node_serialized

View file

@ -4,18 +4,35 @@ import pyblish.api
class CollectClipResolution(pyblish.api.InstancePlugin):
"""Collect clip geometry resolution"""
order = pyblish.api.CollectorOrder + 0.101
order = pyblish.api.CollectorOrder - 0.1
label = "Collect Clip Resoluton"
hosts = ["hiero"]
families = ["clip"]
def process(self, instance):
sequence = instance.context.data['activeSequence']
item = instance.data["item"]
source_resolution = instance.data.get("sourceResolution", None)
resolution_width = int(sequence.format().width())
resolution_height = int(sequence.format().height())
pixel_aspect = sequence.format().pixelAspect()
instance.data.update({
# source exception
if source_resolution:
resolution_width = int(item.source().mediaSource().width())
resolution_height = int(item.source().mediaSource().height())
pixel_aspect = item.source().mediaSource().pixelAspect()
resolution_data = {
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect
})
}
# add to instacne data
instance.data.update(resolution_data)
self.log.info("Resolution of instance '{}' is: {}".format(
instance,
resolution_data
))

View file

@ -1,181 +0,0 @@
import os
from pyblish import api
import hiero
import nuke
class CollectClips(api.ContextPlugin):
"""Collect all Track items selection."""
order = api.CollectorOrder + 0.01
label = "Collect Clips"
hosts = ["hiero"]
def process(self, context):
# create asset_names conversion table
if not context.data.get("assetsShared"):
self.log.debug("Created `assetsShared` in context")
context.data["assetsShared"] = dict()
projectdata = context.data["projectEntity"]["data"]
sequence = context.data.get("activeSequence")
selection = context.data.get("selection")
track_effects = dict()
# collect all trackItems as instances
for track_index, video_track in enumerate(sequence.videoTracks()):
items = video_track.items()
sub_items = video_track.subTrackItems()
for item in items:
data = dict()
# compare with selection or if disabled
if item not in selection or not item.isEnabled():
continue
# Skip audio track items
# Try/Except is to handle items types, like EffectTrackItem
try:
media_type = "core.Hiero.Python.TrackItem.MediaType.kVideo"
if str(item.mediaType()) != media_type:
continue
except Exception:
continue
asset = item.name()
track = item.parent()
source = item.source().mediaSource()
source_path = source.firstpath()
clip_in = int(item.timelineIn())
clip_out = int(item.timelineOut())
file_head = source.filenameHead()
file_info = next((f for f in source.fileinfos()), None)
source_first_frame = int(file_info.startFrame())
is_sequence = False
self.log.debug(
"__ assets_shared: {}".format(
context.data["assetsShared"]))
# Check for clips with the same range
# this is for testing if any vertically neighbouring
# clips has been already processed
clip_matching_with_range = next(
(k for k, v in context.data["assetsShared"].items()
if (v.get("_clipIn", 0) == clip_in)
and (v.get("_clipOut", 0) == clip_out)
), False)
# check if clip name is the same in matched
# vertically neighbouring clip
# if it is then it is correct and resent variable to False
# not to be rised wrong name exception
if asset in str(clip_matching_with_range):
clip_matching_with_range = False
# rise wrong name exception if found one
assert (not clip_matching_with_range), (
"matching clip: {asset}"
" timeline range ({clip_in}:{clip_out})"
" conflicting with {clip_matching_with_range}"
" >> rename any of clips to be the same as the other <<"
).format(
**locals())
if not source.singleFile():
self.log.info("Single file")
is_sequence = True
source_path = file_info.filename()
effects = [f for f in item.linkedItems()
if f.isEnabled()
if isinstance(f, hiero.core.EffectTrackItem)]
# If source is *.nk its a comp effect and we need to fetch the
# write node output. This should be improved by parsing the script
# rather than opening it.
if source_path.endswith(".nk"):
nuke.scriptOpen(source_path)
# There should noly be one.
write_node = nuke.allNodes(filter="Write")[0]
path = nuke.filename(write_node)
if "%" in path:
# Get start frame from Nuke script and use the item source
# in/out, because you can have multiple shots covered with
# one nuke script.
start_frame = int(nuke.root()["first_frame"].getValue())
if write_node["use_limit"].getValue():
start_frame = int(write_node["first"].getValue())
path = path % (start_frame + item.sourceIn())
source_path = path
self.log.debug(
"Fetched source path \"{}\" from \"{}\" in "
"\"{}\".".format(
source_path, write_node.name(), source.firstpath()
)
)
data.update({
"name": "{0}_{1}".format(track.name(), item.name()),
"item": item,
"source": source,
"timecodeStart": str(source.timecodeStart()),
"timelineTimecodeStart": str(sequence.timecodeStart()),
"sourcePath": source_path,
"sourceFileHead": file_head,
"isSequence": is_sequence,
"track": track.name(),
"trackIndex": track_index,
"sourceFirst": source_first_frame,
"effects": effects,
"sourceIn": int(item.sourceIn()),
"sourceOut": int(item.sourceOut()),
"mediaDuration": int(source.duration()),
"clipIn": clip_in,
"clipOut": clip_out,
"clipDuration": (
int(item.timelineOut()) - int(
item.timelineIn())) + 1,
"asset": asset,
"family": "clip",
"families": [],
"handleStart": int(projectdata.get("handleStart", 0)),
"handleEnd": int(projectdata.get("handleEnd", 0)),
"fps": context.data["fps"]
})
instance = context.create_instance(**data)
self.log.info("Created instance: {}".format(instance))
self.log.info("Created instance.data: {}".format(instance.data))
self.log.debug(">> effects: {}".format(instance.data["effects"]))
context.data["assetsShared"][asset] = {
"_clipIn": clip_in,
"_clipOut": clip_out
}
# from now we are collecting only subtrackitems on
# track with no video items
if len(items) > 0:
continue
# create list in track key
# get all subTrackItems and add it to context
track_effects[track_index] = list()
# collect all subtrack items
for sitem in sub_items:
# unwrap from tuple >> it is always tuple with one item
sitem = sitem[0]
# checking if not enabled
if not sitem.isEnabled():
continue
track_effects[track_index].append(sitem)
context.data["trackEffects"] = track_effects
self.log.debug(">> sub_track_items: `{}`".format(track_effects))

View file

@ -1,27 +0,0 @@
import pyblish.api
class CollectProjectColorspace(pyblish.api.ContextPlugin):
"""get active project color settings"""
order = pyblish.api.CollectorOrder + 0.1
label = "Project's color settings"
def process(self, context):
import hiero
project = context.data["activeProject"]
colorspace = {}
colorspace["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride()
colorspace["lutSetting16Bit"] = project.lutSetting16Bit()
colorspace["lutSetting8Bit"] = project.lutSetting8Bit()
colorspace["lutSettingFloat"] = project.lutSettingFloat()
colorspace["lutSettingLog"] = project.lutSettingLog()
colorspace["lutSettingViewer"] = project.lutSettingViewer()
colorspace["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace()
colorspace["lutUseOCIOForExport"] = project.lutUseOCIOForExport()
colorspace["ocioConfigName"] = project.ocioConfigName()
colorspace["ocioConfigPath"] = project.ocioConfigPath()
context.data["colorspace"] = colorspace
self.log.info("context.data[colorspace]: {}".format(context.data["colorspace"]))

View file

@ -1,13 +0,0 @@
import pyblish.api
class CollectCurrentFile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
order = pyblish.api.CollectorOrder - 0.1
def process(self, context):
project = context.data('activeProject')
context.data["currentFile"] = path = project.path()
self.log.info("currentFile: {}".format(context.data["currentFile"]))

View file

@ -1,106 +0,0 @@
import pyblish.api
import re
class CollectVideoTracksLuts(pyblish.api.InstancePlugin):
"""Collect video tracks effects into context."""
order = pyblish.api.CollectorOrder + 0.1015
label = "Collect Soft Lut Effects"
families = ["clip"]
def process(self, instance):
self.log.debug(
"Finding soft effect for subset: `{}`".format(
instance.data.get("subset")))
# taking active sequence
subset = instance.data.get("subset")
if not subset:
return
track_effects = instance.context.data.get("trackEffects", {})
track_index = instance.data["trackIndex"]
effects = instance.data["effects"]
# creating context attribute
self.effects = {"assignTo": subset, "effects": dict()}
for sitem in effects:
self.add_effect(instance, track_index, sitem)
for t_index, sitems in track_effects.items():
for sitem in sitems:
if not t_index > track_index:
continue
self.log.debug(">> sitem: `{}`".format(sitem))
self.add_effect(instance, t_index, sitem)
if self.effects["effects"]:
instance.data["effectTrackItems"] = self.effects
if len(instance.data.get("effectTrackItems", {}).keys()) > 0:
instance.data["families"] += ["lut"]
self.log.debug(
"effects.keys: {}".format(
instance.data.get("effectTrackItems", {}).keys()))
self.log.debug(
"effects: {}".format(
instance.data.get("effectTrackItems", {})))
def add_effect(self, instance, track_index, item):
track = item.parentTrack().name()
# node serialization
node = item.node()
node_serialized = self.node_serialisation(instance, node)
# collect timelineIn/Out
effect_t_in = int(item.timelineIn())
effect_t_out = int(item.timelineOut())
node_name = item.name()
node_class = re.sub(r"\d+", "", node_name)
self.effects["effects"].update({node_name: {
"class": node_class,
"timelineIn": effect_t_in,
"timelineOut": effect_t_out,
"subTrackIndex": item.subTrackIndex(),
"trackIndex": track_index,
"track": track,
"node": node_serialized
}})
def node_serialisation(self, instance, node):
node_serialized = {}
timeline_in_h = instance.data["clipInH"]
timeline_out_h = instance.data["clipOutH"]
# adding ignoring knob keys
_ignoring_keys = ['invert_mask', 'help', 'mask',
'xpos', 'ypos', 'layer', 'process_mask', 'channel',
'channels', 'maskChannelMask', 'maskChannelInput',
'note_font', 'note_font_size', 'unpremult',
'postage_stamp_frame', 'maskChannel', 'export_cc',
'select_cccid', 'mix', 'version', 'matrix']
# loop trough all knobs and collect not ignored
# and any with any value
for knob in node.knobs().keys():
# skip nodes in ignore keys
if knob in _ignoring_keys:
continue
# get animation if node is animated
if node[knob].isAnimated():
# grab animation including handles
knob_anim = [node[knob].getValueAt(i)
for i in range(timeline_in_h, timeline_out_h + 1)]
node_serialized[knob] = knob_anim
else:
node_serialized[knob] = node[knob].value()
return node_serialized

View file

@ -1,50 +1,70 @@
import pyblish.api
class CollectClipFrameRanges(pyblish.api.InstancePlugin):
"""Collect all frame range data: source(In,Out), timeline(In,Out), edit_(in, out), f(start, end)"""
class CollectFrameRanges(pyblish.api.InstancePlugin):
""" Collect all framranges.
"""
order = pyblish.api.CollectorOrder + 0.101
order = pyblish.api.CollectorOrder
label = "Collect Frame Ranges"
hosts = ["hiero"]
families = ["clip", "effect"]
def process(self, instance):
data = dict()
track_item = instance.data["item"]
# Timeline data.
# handles
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
source_in_h = instance.data("sourceInH",
instance.data("sourceIn") - handle_start)
source_out_h = instance.data("sourceOutH",
instance.data("sourceOut") + handle_end)
# source frame ranges
source_in = int(track_item.sourceIn())
source_out = int(track_item.sourceOut())
source_in_h = int(source_in - handle_start)
source_out_h = int(source_out + handle_end)
timeline_in = instance.data["clipIn"]
timeline_out = instance.data["clipOut"]
# timeline frame ranges
clip_in = int(track_item.timelineIn())
clip_out = int(track_item.timelineOut())
clip_in_h = clip_in - handle_start
clip_out_h = clip_out + handle_end
timeline_in_h = timeline_in - handle_start
timeline_out_h = timeline_out + handle_end
# durations
clip_duration = (clip_out - clip_in) + 1
clip_duration_h = clip_duration + (handle_start + handle_end)
# set frame start with tag or take it from timeline
frame_start = instance.data.get("startingFrame")
# set frame start with tag or take it from timeline `startingFrame`
frame_start = instance.data.get("workfileFrameStart")
if not frame_start:
frame_start = timeline_in
frame_start = clip_in
frame_end = frame_start + (timeline_out - timeline_in)
frame_end = frame_start + (clip_out - clip_in)
data.update({
# media source frame range
"sourceIn": source_in,
"sourceOut": source_out,
"sourceInH": source_in_h,
"sourceOutH": source_out_h,
# timeline frame range
"clipIn": clip_in,
"clipOut": clip_out,
"clipInH": clip_in_h,
"clipOutH": clip_out_h,
# workfile frame range
"frameStart": frame_start,
"frameEnd": frame_end,
"clipInH": timeline_in_h,
"clipOutH": timeline_out_h,
"clipDurationH": instance.data.get(
"clipDuration") + handle_start + handle_end
}
)
self.log.debug("__ data: {}".format(data))
"clipDuration": clip_duration,
"clipDurationH": clip_duration_h,
"fps": instance.context.data["fps"]
})
self.log.info("Frame range data for instance `{}` are: {}".format(
instance, data))
instance.data.update(data)

View file

@ -1,61 +0,0 @@
from pyblish import api
class CollectClipHandles(api.ContextPlugin):
"""Collect Handles from all instanes and add to assetShared."""
order = api.CollectorOrder + 0.0121
label = "Collect Handles"
hosts = ["hiero"]
def process(self, context):
assets_shared = context.data.get("assetsShared")
# find all main types instances and add its handles to asset shared
instances = context[:]
filtered_instances = []
for instance in instances:
self.log.debug("_ instance.name: `{}`".format(instance.data["name"]))
families = instance.data.get("families", [])
families += [instance.data["family"]]
if "clip" in families:
filtered_instances.append(instance)
else:
continue
# get handles
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
if instance.data.get("main"):
name = instance.data["asset"]
if assets_shared.get(name):
asset_shared = assets_shared.get(name)
else:
asset_shared = assets_shared[name]
self.log.debug("Adding to shared assets: `{}`".format(
instance.data["name"]))
asset_shared.update({
"handleStart": handle_start,
"handleEnd": handle_end
})
for instance in filtered_instances:
if not instance.data.get("main") and not instance.data.get("handleTag"):
self.log.debug("Synchronize handles on: `{}`".format(
instance.data["name"]))
name = instance.data["asset"]
s_asset_data = assets_shared.get(name)
instance.data["handleStart"] = s_asset_data.get(
"handleStart", 0
)
instance.data["handleEnd"] = s_asset_data.get("handleEnd", 0)
# debug printing
self.log.debug("_ s_asset_data: `{}`".format(
s_asset_data))
self.log.debug("_ instance.data[handleStart]: `{}`".format(
instance.data["handleStart"]))
self.log.debug("_ instance.data[handleEnd]: `{}`".format(
instance.data["handleEnd"]))

View file

@ -1,10 +1,10 @@
import pyblish.api
import avalon.api as avalon
import re
class CollectHierarchyInstance(pyblish.api.ContextPlugin):
"""Collecting hierarchy context from `parents` and `hierarchy` data
class CollectHierarchy(pyblish.api.ContextPlugin):
"""Collecting hierarchy from `parents`.
present in `clip` family instances coming from the request json data file
It will add `hierarchical_context` into each instance for integrate
@ -12,336 +12,105 @@ class CollectHierarchyInstance(pyblish.api.ContextPlugin):
don't exist yet
"""
label = "Collect Hierarchy Clip"
order = pyblish.api.CollectorOrder + 0.102
label = "Collect Hierarchy"
order = pyblish.api.CollectorOrder
families = ["clip"]
def convert_to_entity(self, key, value):
# ftrack compatible entity types
types = {"shot": "Shot",
"folder": "Folder",
"episode": "Episode",
"sequence": "Sequence",
"track": "Sequence",
}
# convert to entity type
entity_type = types.get(key, None)
# return if any
if entity_type:
return {"entityType": entity_type, "entityName": value}
def process(self, context):
for instance in context[:]:
assets_shared = context.data.get("assetsShared")
tags = instance.data.get("tags", None)
clip = instance.data["item"]
asset = instance.data["asset"]
sequence = context.data['activeSequence']
resolution_width = instance.data["resolutionWidth"]
resolution_height = instance.data["resolutionHeight"]
pixel_aspect = instance.data["pixelAspect"]
clip_in = instance.data["clipIn"]
clip_out = instance.data["clipOut"]
fps = context.data["fps"]
# build data for inner hiero project property
data = {
"sequence": (
context.data['activeSequence'].name().replace(' ', '_')
),
"track": clip.parent().name().replace(' ', '_'),
"clip": asset
}
self.log.debug("__ data: {}".format(data))
# checking if tags are available
self.log.debug("__ instance.data[name]: {}".format(
instance.data["name"]))
self.log.debug("__ tags: {}".format(tags))
if not tags:
continue
# loop trough all tags
for t in tags:
t_metadata = dict(t["metadata"])
t_type = t_metadata.get("tag.label", "")
t_note = t_metadata.get("tag.note", "")
self.log.debug("__ t_type: {}".format(t_type))
# and finding only hierarchical tag
if "hierarchy" in t_type.lower():
# Check for clips with the same range
# this is for testing if any vertically neighbouring
# clips has been already processed
match = next((
k for k, v in assets_shared.items()
if (v["_clipIn"] == clip_in)
and (v["_clipOut"] == clip_out)
), False)
self.log.debug(
"__ assets_shared[match]: {}".format(
assets_shared[match]))
# check if hierarchy key is present in matched
# vertically neighbouring clip
if not assets_shared[match].get("hierarchy"):
match = False
# rise exception if multiple hierarchy tag found
assert not match, (
"Two clips above each other with"
" hierarchy tag are not allowed"
" >> keep hierarchy tag only in one of them <<"
)
d_metadata = dict()
parents = list()
# main template from Tag.note
template = t_note
# if shot in template then remove it
if "shot" in template.lower():
instance.data["asset"] = [
t for t in template.split('/')][-1]
template = "/".join(
[t for t in template.split('/')][0:-1])
# take template from Tag.note and break it into parts
template_split = template.split("/")
patern = re.compile(r"\{([a-z]*?)\}")
par_split = [patern.findall(t)
for t in template.split("/")]
# format all {} in two layers
for k, v in t_metadata.items():
new_k = k.split(".")[1]
# ignore all help strings
if 'help' in k:
continue
# self.log.info("__ new_k: `{}`".format(new_k))
try:
# first try all data and context data to
# add to individual properties
new_v = str(v).format(
**dict(context.data, **data))
d_metadata[new_k] = new_v
# create parents
# find matching index of order
p_match_i = [i for i, p in enumerate(par_split)
if new_k in p]
# if any is matching then convert to entity_types
if p_match_i:
parent = self.convert_to_entity(
new_k, template_split[p_match_i[0]])
parents.insert(p_match_i[0], parent)
except Exception:
d_metadata[new_k] = v
# create new shot asset name
instance.data["asset"] = instance.data["asset"].format(
**d_metadata)
self.log.debug(
"__ instance.data[asset]: "
"{}".format(instance.data["asset"])
)
# lastly fill those individual properties itno
# format the string with collected data
parents = [{"entityName": p["entityName"].format(
**d_metadata), "entityType": p["entityType"]}
for p in parents]
self.log.debug("__ parents: {}".format(parents))
hierarchy = template.format(
**d_metadata)
self.log.debug("__ hierarchy: {}".format(hierarchy))
# check if hierarchy attribute is already created
# it should not be so return warning if it is
hd = instance.data.get("hierarchy")
assert not hd, (
"Only one Hierarchy Tag is allowed. "
"Clip: `{}`".format(asset)
)
# add formated hierarchy path into instance data
instance.data["hierarchy"] = hierarchy
instance.data["parents"] = parents
self.log.info(
"clip: {asset}[{clip_in}:{clip_out}]".format(
**locals()))
# adding to asset shared dict
self.log.debug(
"__ assets_shared: {}".format(assets_shared))
if assets_shared.get(asset):
self.log.debug("Adding to shared assets: `{}`".format(
asset))
asset_shared = assets_shared.get(asset)
else:
asset_shared = assets_shared[asset]
asset_shared.update({
"asset": asset,
"hierarchy": hierarchy,
"parents": parents,
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect,
"fps": fps,
"tasks": instance.data["tasks"]
})
# adding frame start if any on instance
start_frame = instance.data.get("startingFrame")
if start_frame:
asset_shared.update({
"startingFrame": start_frame
})
self.log.debug(
"assets_shared: {assets_shared}".format(**locals()))
class CollectHierarchyContext(pyblish.api.ContextPlugin):
'''Collecting Hierarchy from instaces and building
context hierarchy tree
'''
label = "Collect Hierarchy Context"
order = pyblish.api.CollectorOrder + 0.103
def update_dict(self, ex_dict, new_dict):
for key in ex_dict:
if key in new_dict and isinstance(ex_dict[key], dict):
new_dict[key] = self.update_dict(ex_dict[key], new_dict[key])
else:
if ex_dict.get(key) and new_dict.get(key):
continue
else:
new_dict[key] = ex_dict[key]
return new_dict
def process(self, context):
instances = context[:]
# create hierarchyContext attr if context has none
temp_context = {}
for instance in instances:
if 'projectfile' in instance.data.get('family', ''):
continue
name = instance.data["asset"]
# get handles
handle_start = int(instance.data["handleStart"])
handle_end = int(instance.data["handleEnd"])
# inject assetsShared to other plates types
assets_shared = context.data.get("assetsShared")
if assets_shared:
s_asset_data = assets_shared.get(name)
if s_asset_data:
self.log.debug("__ s_asset_data: {}".format(s_asset_data))
name = instance.data["asset"] = s_asset_data["asset"]
instance.data["parents"] = s_asset_data["parents"]
instance.data["hierarchy"] = s_asset_data["hierarchy"]
instance.data["tasks"] = s_asset_data["tasks"]
instance.data["resolutionWidth"] = s_asset_data[
"resolutionWidth"]
instance.data["resolutionHeight"] = s_asset_data[
"resolutionHeight"]
instance.data["pixelAspect"] = s_asset_data["pixelAspect"]
instance.data["fps"] = s_asset_data["fps"]
# adding frame start if any on instance
start_frame = s_asset_data.get("startingFrame")
if start_frame:
instance.data["frameStart"] = start_frame
instance.data["frameEnd"] = start_frame + (
instance.data["clipOut"] -
instance.data["clipIn"])
self.log.debug(
"__ instance.data[parents]: {}".format(
instance.data["parents"]
)
)
self.log.debug(
"__ instance.data[hierarchy]: {}".format(
instance.data["hierarchy"]
)
)
self.log.debug(
"__ instance.data[name]: {}".format(instance.data["name"])
)
in_info = {}
in_info["inputs"] = [
x["_id"] for x in instance.data.get("assetbuilds", [])
]
# suppose that all instances are Shots
in_info['entity_type'] = 'Shot'
# get custom attributes of the shot
if instance.data.get("main"):
in_info['custom_attributes'] = {
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"clipIn": instance.data["clipIn"],
"clipOut": instance.data["clipOut"],
'fps': instance.context.data["fps"]
}
# adding SourceResolution if Tag was present
if instance.data.get("main"):
in_info['custom_attributes'].update({
"resolutionWidth": instance.data["resolutionWidth"],
"resolutionHeight": instance.data["resolutionHeight"],
"pixelAspect": instance.data["pixelAspect"]
})
in_info['tasks'] = instance.data['tasks']
in_info["comments"] = instance.data.get("comments", [])
parents = instance.data.get('parents', [])
self.log.debug("__ in_info: {}".format(in_info))
actual = {name: in_info}
for parent in reversed(parents):
next_dict = {}
parent_name = parent["entityName"]
next_dict[parent_name] = {}
next_dict[parent_name]["entity_type"] = parent["entityType"]
next_dict[parent_name]["childs"] = actual
actual = next_dict
temp_context = self.update_dict(temp_context, actual)
# TODO: 100% sure way of get project! Will be Name or Code?
project_name = avalon.Session["AVALON_PROJECT"]
final_context = {}
final_context[project_name] = {}
final_context[project_name]['entity_type'] = 'Project'
for instance in context:
self.log.info("Processing instance: `{}` ...".format(instance))
# shot data dict
shot_data = {}
families = instance.data.get("families")
# filter out all unepropriate instances
if not instance.data["publish"]:
continue
if not families:
continue
# exclude other families then self.families with intersection
if not set(self.families).intersection(families):
continue
# exclude if not masterLayer True
if not instance.data.get("masterLayer"):
continue
# update families to include `shot` for hierarchy integration
instance.data["families"] = families + ["shot"]
# get asset build data if any available
shot_data["inputs"] = [
x["_id"] for x in instance.data.get("assetbuilds", [])
]
# suppose that all instances are Shots
shot_data['entity_type'] = 'Shot'
shot_data['tasks'] = instance.data.get("tasks") or []
shot_data["comments"] = instance.data.get("comments", [])
shot_data['custom_attributes'] = {
"handleStart": instance.data["handleStart"],
"handleEnd": instance.data["handleEnd"],
"frameStart": instance.data["frameStart"],
"frameEnd": instance.data["frameEnd"],
"clipIn": instance.data["clipIn"],
"clipOut": instance.data["clipOut"],
'fps': instance.context.data["fps"],
"resolutionWidth": instance.data["resolutionWidth"],
"resolutionHeight": instance.data["resolutionHeight"],
"pixelAspect": instance.data["pixelAspect"]
}
actual = {instance.data["asset"]: shot_data}
for parent in reversed(instance.data["parents"]):
next_dict = {}
parent_name = parent["entity_name"]
next_dict[parent_name] = {}
next_dict[parent_name]["entity_type"] = parent[
"entity_type"].capitalize()
next_dict[parent_name]["childs"] = actual
actual = next_dict
temp_context = self._update_dict(temp_context, actual)
# skip if nothing for hierarchy available
if not temp_context:
return
final_context[project_name]['childs'] = temp_context
# adding hierarchy context to instance
# adding hierarchy context to context
context.data["hierarchyContext"] = final_context
self.log.debug("context.data[hierarchyContext] is: {}".format(
context.data["hierarchyContext"]))
def _update_dict(self, parent_dict, child_dict):
"""
Nesting each children into its parent.
Args:
parent_dict (dict): parent dict wich should be nested with children
child_dict (dict): children dict which should be injested
"""
for key in parent_dict:
if key in child_dict and isinstance(parent_dict[key], dict):
child_dict[key] = self._update_dict(
parent_dict[key], child_dict[key]
)
else:
if parent_dict.get(key) and child_dict.get(key):
continue
else:
child_dict[key] = parent_dict[key]
return child_dict

View file

@ -1,13 +0,0 @@
import pyblish.api
class CollectHost(pyblish.api.ContextPlugin):
"""Inject the host into context"""
order = pyblish.api.CollectorOrder
def process(self, context):
import pyblish.api
context.set_data("host", pyblish.api.current_host())
self.log.info("current host: {}".format(pyblish.api.current_host()))

View file

@ -4,9 +4,12 @@ import pyblish.api
class CollectHostVersion(pyblish.api.ContextPlugin):
"""Inject the hosts version into context"""
order = pyblish.api.CollectorOrder
label = "Collect Host and HostVersion"
order = pyblish.api.CollectorOrder - 0.5
def process(self, context):
import nuke
import pyblish.api
context.set_data("host", pyblish.api.current_host())
context.set_data('hostVersion', value=nuke.NUKE_VERSION_STRING)

View file

@ -1,18 +0,0 @@
from pyblish import api
class CollectInstanceVersion(api.InstancePlugin):
""" Collecting versions of Hiero project into instances
If activated then any subset version is created in
version of the actual project.
"""
order = api.CollectorOrder + 0.011
label = "Collect Instance Version"
def process(self, instance):
version = instance.context.data.get("version", "001")
instance.data.update({
"version": int(version)
})

View file

@ -0,0 +1,221 @@
from compiler.ast import flatten
from pyblish import api
from pype.hosts import hiero as phiero
import hiero
# from pype.hosts.hiero.api import lib
# reload(lib)
# reload(phiero)
class CollectInstances(api.ContextPlugin):
"""Collect all Track items selection."""
order = api.CollectorOrder - 0.509
label = "Collect Instances"
hosts = ["hiero"]
def process(self, context):
track_items = phiero.get_track_items(
selected=True, check_tagged=True, check_enabled=True)
# only return enabled track items
if not track_items:
track_items = phiero.get_track_items(
check_enabled=True, check_tagged=True)
# get sequence and video tracks
sequence = context.data["activeSequence"]
tracks = sequence.videoTracks()
# add collection to context
tracks_effect_items = self.collect_sub_track_items(tracks)
context.data["tracksEffectItems"] = tracks_effect_items
self.log.info(
"Processing enabled track items: {}".format(len(track_items)))
for _ti in track_items:
data = dict()
clip = _ti.source()
# get clips subtracks and anotations
annotations = self.clip_annotations(clip)
subtracks = self.clip_subtrack(_ti)
self.log.debug("Annotations: {}".format(annotations))
self.log.debug(">> Subtracks: {}".format(subtracks))
# get pype tag data
tag_parsed_data = phiero.get_track_item_pype_data(_ti)
# self.log.debug(pformat(tag_parsed_data))
if not tag_parsed_data:
continue
if tag_parsed_data.get("id") != "pyblish.avalon.instance":
continue
# add tag data to instance data
data.update({
k: v for k, v in tag_parsed_data.items()
if k not in ("id", "applieswhole", "label")
})
asset = tag_parsed_data["asset"]
subset = tag_parsed_data["subset"]
review = tag_parsed_data["review"]
audio = tag_parsed_data["audio"]
# remove audio attribute from data
data.pop("audio")
# insert family into families
family = tag_parsed_data["family"]
families = [str(f) for f in tag_parsed_data["families"]]
families.insert(0, str(family))
track = _ti.parent()
media_source = _ti.source().mediaSource()
source_path = media_source.firstpath()
file_head = media_source.filenameHead()
file_info = media_source.fileinfos().pop()
source_first_frame = int(file_info.startFrame())
# apply only for feview and master track instance
if review:
families += ["review", "ftrack"]
data.update({
"name": "{} {} {}".format(asset, subset, families),
"asset": asset,
"item": _ti,
"families": families,
# tags
"tags": _ti.tags(),
# track item attributes
"track": track.name(),
"trackItem": track,
# version data
"versionData": {
"colorspace": _ti.sourceMediaColourTransform()
},
# source attribute
"source": source_path,
"sourceMedia": media_source,
"sourcePath": source_path,
"sourceFileHead": file_head,
"sourceFirst": source_first_frame,
# clip's effect
"clipEffectItems": subtracks
})
instance = context.create_instance(**data)
self.log.info("Creating instance: {}".format(instance))
if audio:
a_data = dict()
# add tag data to instance data
a_data.update({
k: v for k, v in tag_parsed_data.items()
if k not in ("id", "applieswhole", "label")
})
# create main attributes
subset = "audioMain"
family = "audio"
families = ["clip", "ftrack"]
families.insert(0, str(family))
name = "{} {} {}".format(asset, subset, families)
a_data.update({
"name": name,
"subset": subset,
"asset": asset,
"family": family,
"families": families,
"item": _ti,
# tags
"tags": _ti.tags(),
})
a_instance = context.create_instance(**a_data)
self.log.info("Creating audio instance: {}".format(a_instance))
@staticmethod
def clip_annotations(clip):
"""
Returns list of Clip's hiero.core.Annotation
"""
annotations = []
subTrackItems = flatten(clip.subTrackItems())
annotations += [item for item in subTrackItems if isinstance(
item, hiero.core.Annotation)]
return annotations
@staticmethod
def clip_subtrack(clip):
"""
Returns list of Clip's hiero.core.SubTrackItem
"""
subtracks = []
subTrackItems = flatten(clip.parent().subTrackItems())
for item in subTrackItems:
# avoid all anotation
if isinstance(item, hiero.core.Annotation):
continue
# # avoid all not anaibled
if not item.isEnabled():
continue
subtracks.append(item)
return subtracks
@staticmethod
def collect_sub_track_items(tracks):
"""
Returns dictionary with track index as key and list of subtracks
"""
# collect all subtrack items
sub_track_items = dict()
for track in tracks:
items = track.items()
# skip if no clips on track > need track with effect only
if items:
continue
# skip all disabled tracks
if not track.isEnabled():
continue
track_index = track.trackIndex()
_sub_track_items = flatten(track.subTrackItems())
# continue only if any subtrack items are collected
if len(_sub_track_items) < 1:
continue
enabled_sti = list()
# loop all found subtrack items and check if they are enabled
for _sti in _sub_track_items:
# checking if not enabled
if not _sti.isEnabled():
continue
if isinstance(_sti, hiero.core.Annotation):
continue
# collect the subtrack item
enabled_sti.append(_sti)
# continue only if any subtrack items are collected
if len(enabled_sti) < 1:
continue
# add collection of subtrackitems to dict
sub_track_items[track_index] = enabled_sti
return sub_track_items

View file

@ -1,24 +0,0 @@
from pyblish import api
class CollectLeaderClip(api.InstancePlugin):
"""Collect Leader clip from selected track items. Clip with hierarchy Tag is defining sharable data attributes between other clips with `subset` tags. So `handle_start/end`, `frame_start`, etc"""
order = api.CollectorOrder + 0.0111
label = "Collect Leader Clip"
hosts = ["hiero"]
families = ['clip']
def process(self, instance):
# gets tags
tags = instance.data["tags"]
for t in tags:
t_metadata = dict(t["metadata"])
t_type = t_metadata.get("tag.label", "")
self.log.info("`hierarhy`: `{}`".format(t_type))
# gets only task family tags and collect labels
if "hierarchy" in t_type.lower():
if not instance.data.get("main"):
instance.data["main"] = True
self.log.info("`Leader Clip` found in instance.name: `{}`".format(instance.data["name"]))

View file

@ -1,264 +1,142 @@
import os
from pyblish import api
import os
import re
import clique
class CollectPlates(api.InstancePlugin):
"""Collect plates from tags.
Tag is expected to have metadata:
{
"family": "plate"
"subset": "main"
}
"""Collect plate representations.
"""
# Run just before CollectSubsets
order = api.CollectorOrder + 0.1021
order = api.CollectorOrder + 0.1020
label = "Collect Plates"
hosts = ["hiero"]
families = ["clip"]
def process(self, instance):
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
tag_data = dict(tag["metadata"])
family = tag_data.get("tag.family", "")
if family.lower() == "plate":
subset = tag_data.get("tag.subset", "Main")
tagged = True
break
if not tagged:
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"plate\"".format(instance)
)
return
self.log.debug("__ subset: `{}`".format(instance.data["subset"]))
# if "audio" in instance.data["subset"]:
# return
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
self.log.debug("__ family: `{}`".format(family))
self.log.debug("__ subset: `{}`".format(subset))
data["family"] = family.lower()
data["families"] = ["ftrack"] + instance.data["families"][1:]
data["source"] = data["sourcePath"]
data["subset"] = family + subset.title()
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(
data["sourcePath"])[1])
if "review" in instance.data["families"]:
data["label"] += " - review"
# adding SourceResolution if Tag was present
if instance.data.get("sourceResolution") and instance.data.get("main"):
item = instance.data["item"]
resolution_width = int(item.source().mediaSource().width())
resolution_height = int(item.source().mediaSource().height())
pixel_aspect = int(item.source().mediaSource().pixelAspect())
self.log.info("Source Width and Height are: `{0} x {1} : {2}`".format(
resolution_width, resolution_height, pixel_aspect))
data.update({
"resolutionWidth": resolution_width,
"resolutionHeight": resolution_height,
"pixelAspect": pixel_aspect
})
self.log.debug("Creating instance with name: {}".format(data["name"]))
instance.context.create_instance(**data)
class CollectPlatesData(api.InstancePlugin):
"""Collect plates"""
order = api.CollectorOrder + 0.48
label = "Collect Plates Data"
hosts = ["hiero"]
families = ["plate"]
def process(self, instance):
import os
if "review" in instance.data.get("track", ""):
self.log.debug(
"Skipping \"{}\" because its `review` track "
"\"plate\"".format(instance)
)
return
# add to representations
if not instance.data.get("representations"):
instance.data["representations"] = list()
version_data = dict()
name = instance.data["subset"]
# get plate source attributes
source_media = instance.data["sourceMedia"]
source_path = instance.data["sourcePath"]
source_file = os.path.basename(source_path)
source_first = instance.data["sourceFirst"]
frame_start = instance.data["frameStart"]
frame_end = instance.data["frameEnd"]
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
source_in_h = instance.data.get("sourceInH")
source_out_h = instance.data.get("sourceOutH")
# Filter out "clip" family.
families = instance.data["families"] + [instance.data["family"]]
families = list(set(families))
if "clip" in families:
families.remove("clip")
family = families[-1]
# define if review media is sequence
is_sequence = bool(not source_media.singleFile())
self.log.debug("is_sequence: {}".format(is_sequence))
# staging dir creation
staging_dir = os.path.dirname(
source_path)
file_dir = os.path.dirname(source_path)
file = os.path.basename(source_path)
ext = os.path.splitext(file)[-1]
item = instance.data["item"]
# detect if sequence
if not is_sequence:
# is video file
files = file
else:
files = list()
spliter, padding = self.detect_sequence(file)
self.log.debug("_ spliter, padding: {}, {}".format(
spliter, padding))
base_name = file.split(spliter)[0]
# define collection and calculate frame range
collection = clique.Collection(
base_name,
ext,
padding,
set(range(
int(source_first + source_in_h),
int(source_first + source_out_h) + 1
))
)
self.log.debug("_ collection: {}".format(collection))
real_files = os.listdir(file_dir)
self.log.debug("_ real_files: {}".format(real_files))
# collect frames to repre files list
for item in collection:
if item not in real_files:
self.log.debug("_ item: {}".format(item))
continue
files.append(item)
# change label
instance.data["label"] = "{0} - ({1})".format(
instance.data["label"], ext
)
self.log.debug("Instance review: {}".format(instance.data["name"]))
# adding representation for review mov
representation = {
"files": files,
"stagingDir": file_dir,
"frameStart": frame_start - handle_start,
"frameEnd": frame_end + handle_end,
"name": ext[1:],
"ext": ext[1:]
}
instance.data["representations"].append(representation)
self.log.debug(
"Added representations: {}".format(
instance.data["representations"]))
def version_data(self, instance):
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut", "frameStart",
"frameEnd", "sourceInH", "sourceOutH", "clipIn", "clipOut",
"clipInH", "clipOutH", "asset", "track", "resolutionWidth", "resolutionHeight", "pixelAspect", "fps"
"handleStart", "handleEnd", "sourceIn", "sourceOut",
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
"clipIn", "clipOut", "clipInH", "clipOutH", "asset",
"track"
]
version_data = dict()
# pass data to version
version_data.update({k: instance.data[k] for k in transfer_data})
if 'version' in instance.data:
version_data["version"] = instance.data["version"]
# add to data of representation
version_data.update({
"colorspace": item.sourceMediaColourTransform(),
"colorspaceScript": instance.context.data["colorspace"],
"families": [f for f in families if 'ftrack' not in f],
"subset": name,
"fps": instance.context.data["fps"]
"colorspace": self.rw_clip.sourceMediaColourTransform(),
"families": instance.data["families"],
"subset": instance.data["subset"],
"fps": instance.data["fps"]
})
version = instance.data.get("version")
if version:
version_data.update({
"version": version
})
source_first_frame = instance.data.get("sourceFirst")
source_file_head = instance.data.get("sourceFileHead")
self.log.debug("source_first_frame: `{}`".format(source_first_frame))
if instance.data.get("isSequence", False):
self.log.info("Is sequence of files")
file = os.path.basename(source_file)
ext = os.path.splitext(file)[-1][1:]
self.log.debug("source_file_head: `{}`".format(source_file_head))
head = source_file_head[:-1]
start_frame = int(source_first_frame + instance.data["sourceInH"])
duration = int(
instance.data["sourceOutH"] - instance.data["sourceInH"])
end_frame = start_frame + duration
self.log.debug("start_frame: `{}`".format(start_frame))
self.log.debug("end_frame: `{}`".format(end_frame))
files = [file % i for i in range(start_frame, (end_frame + 1), 1)]
else:
self.log.info("Is single file")
ext = os.path.splitext(source_file)[-1][1:]
head = source_file_head
files = source_file
start_frame = instance.data["sourceInH"]
end_frame = instance.data["sourceOutH"]
mov_file = head + ".mov"
mov_path = os.path.normpath(os.path.join(staging_dir, mov_file))
if os.path.exists(mov_path):
# adding mov into the representations
self.log.debug("__ mov_path: {}".format(mov_path))
instance.data["label"] += " - review"
plates_mov_representation = {
'files': mov_file,
'stagingDir': staging_dir,
"frameStart": 0,
"frameEnd": instance.data["sourceOut"] - instance.data["sourceIn"] + 1,
'step': 1,
'fps': instance.context.data["fps"],
'tags': ["review"],
'name': "preview",
'ext': "mov",
}
if mov_file not in source_file:
instance.data["representations"].append(
plates_mov_representation)
thumb_frame = instance.data["sourceInH"] + (
(instance.data["sourceOutH"] - instance.data["sourceInH"]) / 2)
thumb_file = "{}_{}{}".format(head, thumb_frame, ".png")
thumb_path = os.path.join(staging_dir, thumb_file)
self.log.debug("__ thumb_path: `{}`, frame: `{}`".format(
thumb_path, thumb_frame))
thumbnail = item.thumbnail(thumb_frame).save(
thumb_path,
format='png'
)
self.log.debug("__ thumbnail: `{}`, frame: `{}`".format(
thumbnail, thumb_frame))
thumb_representation = {
'files': thumb_file,
'stagingDir': staging_dir,
'name': "thumbnail",
'thumbnail': True,
'ext': "png"
}
instance.data["representations"].append(
thumb_representation)
# adding representation for plates
frame_start = instance.data["frameStart"] - \
instance.data["handleStart"]
frame_end = instance.data["frameEnd"] + instance.data["handleEnd"]
# exception for retimes
if instance.data.get("retime"):
source_in_h = instance.data["sourceInH"]
source_in = instance.data["sourceIn"]
source_handle_start = source_in_h - source_in
frame_start = instance.data["frameStart"] + source_handle_start
duration = instance.data["sourceOutH"] - instance.data["sourceInH"]
frame_end = frame_start + duration
plates_representation = {
'files': files,
'stagingDir': staging_dir,
'name': ext,
'ext': ext,
"frameEnd": frame_end,
"frameStart": "%0{}d".format(
len(str(frame_end))) % frame_start
}
instance.data["representations"].append(plates_representation)
# deal with retimed clip
if instance.data.get("retime"):
version_data.update({
"retime": True,
"speed": instance.data.get("speed", 1),
"timewarps": instance.data.get("timeWarpNodes", []),
"frameStart": frame_start,
"frameEnd": frame_end,
})
instance.data["versionData"] = version_data
# testing families
family = instance.data["family"]
families = instance.data["families"]
def detect_sequence(self, file):
""" Get identificating pater for image sequence
# test prints version_data
self.log.debug("__ version_data: {}".format(version_data))
self.log.debug("__ representations: {}".format(
instance.data["representations"]))
self.log.debug("__ after family: {}".format(family))
self.log.debug("__ after families: {}".format(families))
Can find file.0001.ext, file.%02d.ext, file.####.ext
Return:
string: any matching sequence patern
int: padding of sequnce numbering
"""
foundall = re.findall(
r"(#+)|(%\d+d)|(?<=[^a-zA-Z0-9])(\d+)(?=\.\w+$)", file)
if foundall:
found = sorted(list(set(foundall[0])))[-1]
if "%" in found:
padding = int(re.findall(r"\d+", found)[-1])
else:
padding = len(found)
return found, padding
else:
return None, None

View file

@ -1,17 +0,0 @@
from pyblish import api
class CollectClipSubsets(api.InstancePlugin):
"""Collect Subsets from selected Clips, Tags, Preset."""
order = api.CollectorOrder + 0.103
label = "Collect Remove Clip Instaces"
hosts = ["hiero"]
families = ['clip']
def process(self, instance):
context = instance.context
# removing original instance
self.log.info("Removing instance.name: `{}`".format(instance.data["name"]))
context.remove(instance)

View file

@ -1,92 +1,117 @@
from pyblish import api
import os
import re
import clique
from pype.hosts.hiero.api import (
is_overlapping, get_sequence_pattern_and_padding)
class CollectReview(api.InstancePlugin):
"""Collect review from tags.
Tag is expected to have metadata:
{
"family": "review"
"track": "trackName"
}
"""Collect review representation.
"""
# Run just before CollectSubsets
order = api.CollectorOrder + 0.1022
label = "Collect Review"
hosts = ["hiero"]
families = ["plate"]
families = ["review"]
def get_review_item(self, instance):
"""
Get review clip track item from review track name
Args:
instance (obj): publishing instance
Returns:
hiero.core.TrackItem: corresponding track item
Raises:
Exception: description
"""
review_track = instance.data.get("review")
video_tracks = instance.context.data["videoTracks"]
for track in video_tracks:
if review_track not in track.name():
continue
for item in track.items():
self.log.debug(item)
if is_overlapping(item, self.main_clip):
self.log.debug("Winner is: {}".format(item))
break
# validate the clip is fully converted with review clip
assert is_overlapping(
item, self.main_clip, strict=True), (
"Review clip not cowering fully "
"the clip `{}`").format(self.main_clip.name())
return item
def process(self, instance):
is_sequence = instance.data["isSequence"]
tags = ["review", "ftrackreview"]
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
family = dict(tag["metadata"]).get("tag.family", "")
if family.lower() == "review":
tagged = True
track = dict(tag["metadata"]).get("tag.track")
break
# get reviewable item from `review` instance.data attribute
self.main_clip = instance.data.get("item")
self.rw_clip = self.get_review_item(instance)
if not tagged:
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"review\"".format(instance)
)
return
if not track:
self.log.debug((
"Skipping \"{}\" because tag is not having"
"`track` in metadata"
).format(instance))
return
# let user know there is missing review clip and convert instance
# back as not reviewable
assert self.rw_clip, "Missing reviewable clip for '{}'".format(
self.main_clip.name()
)
# add to representations
if not instance.data.get("representations"):
instance.data["representations"] = list()
if track in instance.data["track"]:
self.log.debug("Review will work on `subset`: {}".format(
instance.data["subset"]))
# get review media main info
rw_source = self.rw_clip.source().mediaSource()
rw_source_duration = int(rw_source.duration())
self.rw_source_path = rw_source.firstpath()
rw_source_file_info = rw_source.fileinfos().pop()
# change families
instance.data["family"] = "plate"
instance.data["families"] = ["review", "ftrack"]
# define if review media is sequence
is_sequence = bool(not rw_source.singleFile())
self.log.debug("is_sequence: {}".format(is_sequence))
self.version_data(instance)
self.create_thumbnail(instance)
# get handles
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
rev_inst = instance
# review timeline and source frame ranges
rw_clip_in = int(self.rw_clip.timelineIn())
rw_clip_out = int(self.rw_clip.timelineOut())
self.rw_clip_source_in = int(self.rw_clip.sourceIn())
self.rw_clip_source_out = int(self.rw_clip.sourceOut())
rw_source_first = int(rw_source_file_info.startFrame())
else:
self.log.debug("Track item on plateMain")
rev_inst = None
for inst in instance.context[:]:
if inst.data["track"] != track:
continue
# calculate delivery source_in and source_out
# main_clip_timeline_in - review_item_timeline_in + 1
main_clip_in = self.main_clip.timelineIn()
main_clip_out = self.main_clip.timelineOut()
if inst.data["item"].name() != instance.data["item"].name():
continue
source_in_diff = main_clip_in - rw_clip_in
source_out_diff = main_clip_out - rw_clip_out
rev_inst = inst
break
if source_in_diff:
self.rw_clip_source_in += source_in_diff
if source_out_diff:
self.rw_clip_source_out += source_out_diff
if rev_inst is None:
raise RuntimeError((
"TrackItem from track name `{}` has to"
"be also selected"
).format(track))
# review clip durations
rw_clip_duration = (
self.rw_clip_source_out - self.rw_clip_source_in) + 1
rw_clip_duration_h = rw_clip_duration + (
handle_start + handle_end)
instance.data["families"].append("review")
# add created data to review item data
instance.data["reviewItemData"] = {
"mediaDuration": rw_source_duration
}
file_path = rev_inst.data.get("sourcePath")
file_dir = os.path.dirname(file_path)
file = os.path.basename(file_path)
file_dir = os.path.dirname(self.rw_source_path)
file = os.path.basename(self.rw_source_path)
ext = os.path.splitext(file)[-1]
# detect if sequence
@ -95,74 +120,87 @@ class CollectReview(api.InstancePlugin):
files = file
else:
files = list()
source_first = instance.data["sourceFirst"]
self.log.debug("_ file: {}".format(file))
spliter, padding = self.detect_sequence(file)
spliter, padding = get_sequence_pattern_and_padding(file)
self.log.debug("_ spliter, padding: {}, {}".format(
spliter, padding))
base_name = file.split(spliter)[0]
# define collection and calculate frame range
collection = clique.Collection(base_name, ext, padding, set(range(
int(source_first + rev_inst.data.get("sourceInH")),
int(source_first + rev_inst.data.get("sourceOutH") + 1))))
int(rw_source_first + int(
self.rw_clip_source_in - handle_start)),
int(rw_source_first + int(
self.rw_clip_source_out + handle_end) + 1))))
self.log.debug("_ collection: {}".format(collection))
real_files = os.listdir(file_dir)
self.log.debug("_ real_files: {}".format(real_files))
# collect frames to repre files list
for item in collection:
if item not in real_files:
self.log.debug("_ item: {}".format(item))
continue
files.append(item)
# add prep tag
tags.extend(["prep", "delete"])
# change label
instance.data["label"] = "{0} - {1} - ({2})".format(
instance.data['asset'], instance.data["subset"], ext
instance.data["label"] = "{0} - ({1})".format(
instance.data["label"], ext
)
self.log.debug("Instance review: {}".format(rev_inst.data["name"]))
self.log.debug("Instance review: {}".format(instance.data["name"]))
# adding representation for review mov
representation = {
"files": files,
"stagingDir": file_dir,
"frameStart": rev_inst.data.get("sourceIn"),
"frameEnd": rev_inst.data.get("sourceOut"),
"frameStartFtrack": rev_inst.data.get("sourceInH"),
"frameEndFtrack": rev_inst.data.get("sourceOutH"),
"frameStart": rw_source_first + self.rw_clip_source_in,
"frameEnd": rw_source_first + self.rw_clip_source_out,
"frameStartFtrack": int(
self.rw_clip_source_in - handle_start),
"frameEndFtrack": int(self.rw_clip_source_out + handle_end),
"step": 1,
"fps": rev_inst.data.get("fps"),
"fps": instance.data["fps"],
"name": "review",
"tags": ["review", "ftrackreview"],
"tags": tags,
"ext": ext[1:]
}
media_duration = instance.data.get("mediaDuration")
clip_duration_h = instance.data.get("clipDurationH")
if media_duration > clip_duration_h:
if rw_source_duration > rw_clip_duration_h:
self.log.debug("Media duration higher: {}".format(
(media_duration - clip_duration_h)))
(rw_source_duration - rw_clip_duration_h)))
representation.update({
"frameStart": instance.data.get("sourceInH"),
"frameEnd": instance.data.get("sourceOutH"),
"tags": ["_cut-bigger", "delete"]
"frameStart": rw_source_first + int(
self.rw_clip_source_in - handle_start),
"frameEnd": rw_source_first + int(
self.rw_clip_source_out + handle_end),
"tags": ["_cut-bigger", "prep", "delete"]
})
elif media_duration < clip_duration_h:
elif rw_source_duration < rw_clip_duration_h:
self.log.debug("Media duration higher: {}".format(
(media_duration - clip_duration_h)))
(rw_source_duration - rw_clip_duration_h)))
representation.update({
"frameStart": instance.data.get("sourceInH"),
"frameEnd": instance.data.get("sourceOutH"),
"tags": ["_cut-smaller", "delete"]
"frameStart": rw_source_first + int(
self.rw_clip_source_in - handle_start),
"frameEnd": rw_source_first + int(
self.rw_clip_source_out + handle_end),
"tags": ["prep", "delete"]
})
instance.data["representations"].append(representation)
self.log.debug("Added representation: {}".format(representation))
self.create_thumbnail(instance)
self.log.debug(
"Added representations: {}".format(
instance.data["representations"]))
def create_thumbnail(self, instance):
item = instance.data["item"]
source_path = instance.data["sourcePath"]
source_file = os.path.basename(source_path)
spliter, padding = self.detect_sequence(source_file)
source_file = os.path.basename(self.rw_source_path)
spliter, padding = get_sequence_pattern_and_padding(source_file)
if spliter:
head, ext = source_file.split(spliter)
@ -171,25 +209,16 @@ class CollectReview(api.InstancePlugin):
# staging dir creation
staging_dir = os.path.dirname(
source_path)
self.rw_source_path)
media_duration = instance.data.get("mediaDuration")
clip_duration_h = instance.data.get("clipDurationH")
self.log.debug("__ media_duration: {}".format(media_duration))
self.log.debug("__ clip_duration_h: {}".format(clip_duration_h))
thumb_frame = int(instance.data["sourceIn"] + (
(instance.data["sourceOut"] - instance.data["sourceIn"]) / 2))
# get thumbnail frame from the middle
thumb_frame = int(self.rw_clip_source_in + (
(self.rw_clip_source_out - self.rw_clip_source_in) / 2))
thumb_file = "{}thumbnail{}{}".format(head, thumb_frame, ".png")
thumb_path = os.path.join(staging_dir, thumb_file)
self.log.debug("__ thumb_path: {}".format(thumb_path))
self.log.debug("__ thumb_frame: {}".format(thumb_frame))
self.log.debug(
"__ sourceIn: `{}`".format(instance.data["sourceIn"]))
thumbnail = item.thumbnail(thumb_frame).save(
thumbnail = self.rw_clip.thumbnail(thumb_frame).save(
thumb_path,
format='png'
)
@ -208,8 +237,6 @@ class CollectReview(api.InstancePlugin):
thumb_representation)
def version_data(self, instance):
item = instance.data["item"]
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut",
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
@ -226,34 +253,9 @@ class CollectReview(api.InstancePlugin):
# add to data of representation
version_data.update({
"colorspace": item.sourceMediaColourTransform(),
"colorspace": self.rw_clip.sourceMediaColourTransform(),
"families": instance.data["families"],
"subset": instance.data["subset"],
"fps": instance.context.data["fps"]
"fps": instance.data["fps"]
})
instance.data["versionData"] = version_data
instance.data["source"] = instance.data["sourcePath"]
def detect_sequence(self, file):
""" Get identificating pater for image sequence
Can find file.0001.ext, file.%02d.ext, file.####.ext
Return:
string: any matching sequence patern
int: padding of sequnce numbering
"""
foundall = re.findall(
r"(#+)|(%\d+d)|(?<=[^a-zA-Z0-9])(\d+)(?=\.\w+$)", file)
if foundall:
found = sorted(list(set(foundall[0])))[-1]
if "%" in found:
padding = int(re.findall(r"\d+", found)[-1])
else:
padding = len(found)
return found, padding
else:
return None, None

View file

@ -1,17 +0,0 @@
import pyblish.api
import hiero
class CollectSelection(pyblish.api.ContextPlugin):
"""Inject the selection in the context."""
order = pyblish.api.CollectorOrder - 0.1
label = "Selection"
def process(self, context):
selection = list(hiero.selection)
self.log.debug("selection: {}".format(selection))
context.data["selection"] = selection

View file

@ -1,13 +0,0 @@
from pyblish import api
import hiero
class CollectSequence(api.ContextPlugin):
"""Collect all Track items selection."""
order = api.CollectorOrder - 0.01
label = "Collect Sequence"
hosts = ["hiero"]
def process(self, context):
context.data['activeSequence'] = hiero.ui.activeSequence()

View file

@ -1,54 +0,0 @@
from pyblish import api
class CollectShots(api.InstancePlugin):
"""Collect Shot from Clip."""
# Run just before CollectClipSubsets
order = api.CollectorOrder + 0.1021
label = "Collect Shots"
hosts = ["hiero"]
families = ["clip"]
def process(self, instance):
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"Hierarchy\"".format(instance))
# Exclude non-tagged instances.
tagged = False
for tag in instance.data["tags"]:
if tag["name"].lower() == "hierarchy":
tagged = True
if not tagged:
self.log.debug(
"Skipping \"{}\" because its not tagged with "
"\"Hierarchy\"".format(instance)
)
return
# Collect data.
data = {}
for key, value in instance.data.iteritems():
data[key] = value
data["family"] = "shot"
data["families"] = []
data["subset"] = data["family"] + "Main"
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = (
"{} - {} - tasks:{} - assetbuilds:{} - comments:{}".format(
data["asset"],
data["subset"],
[task for task in data["tasks"]],
[x["name"] for x in data.get("assetbuilds", [])],
len(data.get("comments", []))
)
)
# Create instance.
self.log.debug("Creating instance with: {}".format(data["name"]))
instance.context.create_instance(**data)

View file

@ -1,43 +0,0 @@
from pyblish import api
import os
class CollectClipTagFrameStart(api.InstancePlugin):
"""Collect FrameStart from Tags of selected track items."""
order = api.CollectorOrder + 0.013
label = "Collect Frame Start"
hosts = ["hiero"]
families = ['clip']
def process(self, instance):
# gets tags
tags = instance.data["tags"]
for t in tags:
t_metadata = dict(t["metadata"])
t_family = t_metadata.get("tag.family", "")
# gets only task family tags and collect labels
if "frameStart" in t_family:
t_value = t_metadata.get("tag.value", None)
# backward compatibility
t_number = t_metadata.get("tag.number", None)
start_frame = t_number or t_value
try:
start_frame = int(start_frame)
except ValueError:
if "source" in t_value:
source_first = instance.data["sourceFirst"]
if source_first == 0:
source_first = 1
self.log.info("Start frame on `{0}`".format(source_first))
source_in = instance.data["sourceIn"]
self.log.info("Start frame on `{0}`".format(source_in))
start_frame = source_first + source_in
instance.data["startingFrame"] = start_frame
self.log.info("Start frame on `{0}` set to `{1}`".format(
instance, start_frame
))

View file

@ -1,64 +0,0 @@
import json
from pyblish import api
class CollectClipTagHandles(api.ContextPlugin):
"""Collect Handles from selected track items."""
order = api.CollectorOrder + 0.012
label = "Collect Tag Handles"
hosts = ["hiero"]
families = ['clip']
def process(self, context):
assets_shared = context.data.get("assetsShared")
for instance in context[:]:
self.log.info("Instance.name: `{}`".format(
instance.data["name"]))
# gets tags
tags = instance.data["tags"]
assets_shared_a = assets_shared[instance.data["asset"]]
tag_occurance = 0
for t in tags:
t_metadata = dict(t["metadata"])
t_family = t_metadata.get("tag.family", "")
# gets only task family tags and collect labels
if "handles" in t_family:
tag_occurance += 1
# restore handleStart/End to 0 at first occurance of Tag
if tag_occurance == 1:
instance.data["handleTag"] = True
instance.data["handleStart"] = 0
instance.data["handleEnd"] = 0
# gets value of handles
t_value = int(t_metadata.get("tag.value", ""))
# gets arguments if there are any
t_args = t_metadata.get("tag.args", "")
assert t_args, self.log.error(
"Tag with Handles is missing Args. "
"Use only handle start/end")
t_args = json.loads(t_args.replace("'", "\""))
# add in start
if 'start' in t_args['where']:
instance.data["handleStart"] += t_value
self.log.info("Collected Handle Start: `{}`".format(
instance.data["handleStart"]))
# add in end
if 'end' in t_args['where']:
instance.data["handleEnd"] += t_value
self.log.info("Collected Handle End: `{}`".format(
instance.data["handleEnd"]))
# adding handles to asset_shared on context
if instance.data.get("handleEnd"):
assets_shared_a[
"handleEnd"] = instance.data["handleEnd"]
if instance.data.get("handleStart"):
assets_shared_a[
"handleStart"] = instance.data["handleStart"]

View file

@ -1,22 +0,0 @@
from pyblish import api
class CollectClipTagResolution(api.InstancePlugin):
"""Collect Source Resolution from Tags of selected track items."""
order = api.CollectorOrder + 0.013
label = "Collect Source Resolution"
hosts = ["hiero"]
families = ['clip']
def process(self, instance):
# gets tags
tags = instance.data["tags"]
for t in tags:
t_metadata = dict(t["metadata"])
t_family = t_metadata.get("tag.family", "")
# gets only task family tags and collect labels
if "resolution" in t_family:
instance.data["sourceResolution"] = True

View file

@ -1,28 +0,0 @@
from pyblish import api
class CollectClipSubsetsTags(api.InstancePlugin):
"""Collect Subsets from Tags of selected track items."""
order = api.CollectorOrder + 0.012
label = "Collect Tags Subsets"
hosts = ["hiero"]
families = ['clip']
def process(self, instance):
# gets tags
tags = instance.data["tags"]
for t in tags:
t_metadata = dict(t["metadata"])
t_family = t_metadata.get("tag.family", None)
t_subset = t_metadata.get("tag.subset", None)
# gets only task family tags and collect labels
if t_subset and t_family:
subset_name = "{0}{1}".format(
t_family,
t_subset.capitalize())
instance.data['subset'] = subset_name
self.log.info("`subset`: {0} found in `instance.name`: `{1}`".format(subset_name, instance.data["name"]))

View file

@ -4,7 +4,7 @@ from pyblish import api
class CollectClipTagTasks(api.InstancePlugin):
"""Collect Tags from selected track items."""
order = api.CollectorOrder + 0.012
order = api.CollectorOrder
label = "Collect Tag Tasks"
hosts = ["hiero"]
families = ['clip']
@ -14,8 +14,8 @@ class CollectClipTagTasks(api.InstancePlugin):
tags = instance.data["tags"]
tasks = dict()
for t in tags:
t_metadata = dict(t["metadata"])
for tag in tags:
t_metadata = dict(tag.metadata())
t_family = t_metadata.get("tag.family", "")
# gets only task family tags and collect labels

View file

@ -1,30 +0,0 @@
from pyblish import api
class CollectClipTags(api.InstancePlugin):
"""Collect Tags from selected track items."""
order = api.CollectorOrder + 0.011
label = "Collect Tags"
hosts = ["hiero"]
families = ['clip']
def process(self, instance):
tags = instance.data["item"].tags()
tags_d = []
if tags:
for t in tags:
tag_data = {
"name": t.name(),
"object": t,
"metadata": t.metadata(),
"inTime": t.inTime(),
"outTime": t.outTime(),
}
tags_d.append(tag_data)
instance.data["tags"] = tags_d
self.log.info(instance.data["tags"])
return

View file

@ -0,0 +1,74 @@
import os
import pyblish.api
from pype.hosts import hiero as phiero
from avalon import api as avalon
class CollectWorkfile(pyblish.api.ContextPlugin):
"""Inject the current working file into context"""
label = "Collect Workfile"
order = pyblish.api.CollectorOrder - 0.51
def process(self, context):
asset = avalon.Session["AVALON_ASSET"]
subset = "workfile"
project = phiero.get_current_project()
active_sequence = phiero.get_current_sequence()
video_tracks = active_sequence.videoTracks()
audio_tracks = active_sequence.audioTracks()
current_file = project.path()
staging_dir = os.path.dirname(current_file)
base_name = os.path.basename(current_file)
# get workfile's colorspace properties
_clrs = {}
_clrs["useOCIOEnvironmentOverride"] = project.useOCIOEnvironmentOverride() # noqa
_clrs["lutSetting16Bit"] = project.lutSetting16Bit()
_clrs["lutSetting8Bit"] = project.lutSetting8Bit()
_clrs["lutSettingFloat"] = project.lutSettingFloat()
_clrs["lutSettingLog"] = project.lutSettingLog()
_clrs["lutSettingViewer"] = project.lutSettingViewer()
_clrs["lutSettingWorkingSpace"] = project.lutSettingWorkingSpace()
_clrs["lutUseOCIOForExport"] = project.lutUseOCIOForExport()
_clrs["ocioConfigName"] = project.ocioConfigName()
_clrs["ocioConfigPath"] = project.ocioConfigPath()
# set main project attributes to context
context.data["activeProject"] = project
context.data["activeSequence"] = active_sequence
context.data["videoTracks"] = video_tracks
context.data["audioTracks"] = audio_tracks
context.data["currentFile"] = current_file
context.data["colorspace"] = _clrs
self.log.info("currentFile: {}".format(current_file))
# creating workfile representation
representation = {
'name': 'hrox',
'ext': 'hrox',
'files': base_name,
"stagingDir": staging_dir,
}
instance_data = {
"name": "{}_{}".format(asset, subset),
"asset": asset,
"subset": "{}{}".format(asset, subset.capitalize()),
"item": project,
"family": "workfile",
# version data
"versionData": {
"colorspace": _clrs
},
# source attribute
"sourcePath": current_file,
"representations": [representation]
}
instance = context.create_instance(**instance_data)
self.log.info("Creating instance: {}".format(instance))

View file

@ -1,47 +1,44 @@
from pyblish import api
import os
from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles
import pyblish
import pype
class ExtractAudioFile(pype.api.Extractor):
"""Extracts audio subset file"""
"""Extracts audio subset file from all active timeline audio tracks"""
order = api.ExtractorOrder
order = pyblish.api.ExtractorOrder
label = "Extract Subset Audio"
hosts = ["hiero"]
families = ["clip", "audio"]
match = api.Intersection
match = pyblish.api.Intersection
def process(self, instance):
import os
from hiero.exporters.FnExportUtil import writeSequenceAudioWithHandles
item = instance.data["item"]
context = instance.context
self.log.debug("creating staging dir")
self.staging_dir(instance)
staging_dir = instance.data["stagingDir"]
# get sequence
sequence = instance.context.data["activeSequence"]
subset = instance.data["subset"]
# get timeline in / out
clip_in = instance.data["clipIn"]
clip_out = instance.data["clipOut"]
# get handles from context
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
# get sequence from context
sequence = context.data["activeSequence"]
staging_dir = self.staging_dir(instance)
self.log.info("Created staging dir: {}...".format(staging_dir))
# path to wav file
audio_file = os.path.join(
staging_dir, "{0}.wav".format(instance.data["subset"])
staging_dir, "{}.wav".format(subset)
)
# export audio to disk
writeSequenceAudioWithHandles(
audio_file,
sequence,
item.timelineIn(),
item.timelineOut(),
clip_in,
clip_out,
handle_start,
handle_end
)

View file

@ -0,0 +1,100 @@
# from pype import plugins
import os
import json
import pyblish.api
import pype
class ExtractClipEffects(pype.api.Extractor):
"""Extract clip effects instances."""
order = pyblish.api.ExtractorOrder
label = "Export Clip Effects"
families = ["effect"]
def process(self, instance):
item = instance.data["item"]
effects = instance.data.get("effects")
# skip any without effects
if not effects:
return
subset = instance.data.get("subset")
family = instance.data["family"]
self.log.debug("creating staging dir")
staging_dir = self.staging_dir(instance)
transfers = list()
if "transfers" not in instance.data:
instance.data["transfers"] = list()
ext = "json"
file = subset + "." + ext
# when instance is created during collection part
resources_dir = instance.data["resourcesDir"]
# change paths in effects to files
for k, effect in effects.items():
if "assignTo" in k:
continue
trn = self.copy_linked_files(effect, resources_dir)
if trn:
transfers.append((trn[0], trn[1]))
instance.data["transfers"].extend(transfers)
self.log.debug("_ transfers: `{}`".format(
instance.data["transfers"]))
# create representations
instance.data["representations"] = list()
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut",
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
"clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track",
"version"
]
# pass data to version
version_data = dict()
version_data.update({k: instance.data[k] for k in transfer_data})
# add to data of representation
version_data.update({
"colorspace": item.sourceMediaColourTransform(),
"colorspaceScript": instance.context.data["colorspace"],
"families": [family, "plate"],
"subset": subset,
"fps": instance.context.data["fps"]
})
instance.data["versionData"] = version_data
representation = {
'files': file,
'stagingDir': staging_dir,
'name': family + ext.title(),
'ext': ext
}
instance.data["representations"].append(representation)
self.log.debug("_ representations: `{}`".format(
instance.data["representations"]))
self.log.debug("_ version_data: `{}`".format(
instance.data["versionData"]))
with open(os.path.join(staging_dir, file), "w") as outfile:
outfile.write(json.dumps(effects, indent=4, sort_keys=True))
def copy_linked_files(self, effect, dst_dir):
for k, v in effect["node"].items():
if k in "file" and v != '':
base_name = os.path.basename(v)
dst = os.path.join(dst_dir, base_name).replace("\\", "/")
# add it to the json
effect["node"][k] = dst
return (v, dst)

View file

@ -1,260 +0,0 @@
# from pype import plugins
import os
import json
import re
import copy
import pyblish.api
import tempfile
from avalon import io, api
class ExtractVideoTracksLuts(pyblish.api.InstancePlugin):
"""Collect video tracks effects into context."""
order = pyblish.api.ExtractorOrder
label = "Export Soft Lut Effects"
families = ["lut"]
def process(self, instance):
item = instance.data["item"]
effects = instance.data.get("effectTrackItems")
instance.data["families"] = [f for f in instance.data.get(
"families", []) if f not in ["lut"]]
self.log.debug(
"__ instance.data[families]: `{}`".format(
instance.data["families"]))
# skip any without effects
if not effects:
return
subset = instance.data.get("subset")
subset_split = re.findall(r'[A-Z][^A-Z]*', subset)
if len(subset_split) > 0:
root_name = subset.replace(subset_split[0], "")
subset_split.insert(0, root_name.capitalize())
subset_split.insert(0, "lut")
self.log.debug("creating staging dir")
# staging_dir = self.staging_dir(instance)
# TODO: only provisory will be replace by function
staging_dir = instance.data.get('stagingDir', None)
if not staging_dir:
staging_dir = os.path.normpath(
tempfile.mkdtemp(prefix="pyblish_tmp_")
)
instance.data['stagingDir'] = staging_dir
self.log.debug("creating staging dir: `{}`".format(staging_dir))
transfers = list()
if "transfers" not in instance.data:
instance.data["transfers"] = list()
name = "".join(subset_split)
ext = "json"
file = name + "." + ext
# create new instance and inherit data
data = {}
for key, value in instance.data.iteritems():
data[key] = value
# change names
data["subset"] = name
data["family"] = "lut"
data["families"] = []
data["name"] = data["subset"] + "_" + data["asset"]
data["label"] = "{} - {} - ({})".format(
data['asset'], data["subset"], os.path.splitext(file)[1]
)
data["source"] = data["sourcePath"]
# WARNING instance should not be created in Extractor!
# create new instance
instance = instance.context.create_instance(**data)
# TODO replace line below with `instance.data["resourcesDir"]`
# when instance is created during collection part
dst_dir = self.resource_destination_dir(instance)
# change paths in effects to files
for k, effect in effects["effects"].items():
trn = self.copy_linked_files(effect, dst_dir)
if trn:
transfers.append((trn[0], trn[1]))
instance.data["transfers"].extend(transfers)
self.log.debug("_ transfers: `{}`".format(
instance.data["transfers"]))
# create representations
instance.data["representations"] = list()
transfer_data = [
"handleStart", "handleEnd", "sourceIn", "sourceOut",
"frameStart", "frameEnd", "sourceInH", "sourceOutH",
"clipIn", "clipOut", "clipInH", "clipOutH", "asset", "track",
"version"
]
# pass data to version
version_data = dict()
version_data.update({k: instance.data[k] for k in transfer_data})
# add to data of representation
version_data.update({
"colorspace": item.sourceMediaColourTransform(),
"colorspaceScript": instance.context.data["colorspace"],
"families": ["plate", "lut"],
"subset": name,
"fps": instance.context.data["fps"]
})
instance.data["versionData"] = version_data
representation = {
'files': file,
'stagingDir': staging_dir,
'name': "lut" + ext.title(),
'ext': ext
}
instance.data["representations"].append(representation)
self.log.debug("_ representations: `{}`".format(
instance.data["representations"]))
self.log.debug("_ version_data: `{}`".format(
instance.data["versionData"]))
with open(os.path.join(staging_dir, file), "w") as outfile:
outfile.write(json.dumps(effects, indent=4, sort_keys=True))
return
def copy_linked_files(self, effect, dst_dir):
for k, v in effect["node"].items():
if k in "file" and v != '':
base_name = os.path.basename(v)
dst = os.path.join(dst_dir, base_name).replace("\\", "/")
# add it to the json
effect["node"][k] = dst
return (v, dst)
def resource_destination_dir(self, instance):
# WARNING this is from `collect_instance_anatomy_data.py`
anatomy_data = copy.deepcopy(instance.context.data["anatomyData"])
project_entity = instance.context.data["projectEntity"]
context_asset_entity = instance.context.data["assetEntity"]
asset_name = instance.data["asset"]
if context_asset_entity["name"] == asset_name:
asset_entity = context_asset_entity
else:
asset_entity = io.find_one({
"type": "asset",
"name": asset_name,
"parent": project_entity["_id"]
})
subset_name = instance.data["subset"]
version_number = instance.data.get("version")
latest_version = None
if asset_entity:
subset_entity = io.find_one({
"type": "subset",
"name": subset_name,
"parent": asset_entity["_id"]
})
if subset_entity is None:
self.log.debug("Subset entity does not exist yet.")
else:
version_entity = io.find_one(
{
"type": "version",
"parent": subset_entity["_id"]
},
sort=[("name", -1)]
)
if version_entity:
latest_version = version_entity["name"]
if version_number is None:
version_number = 1
if latest_version is not None:
version_number += int(latest_version)
anatomy_data.update({
"asset": asset_name,
"family": instance.data["family"],
"subset": subset_name,
"version": version_number,
"hierarchy": instance.data["hierarchy"]
})
resolution_width = instance.data.get("resolutionWidth")
if resolution_width:
anatomy_data["resolution_width"] = resolution_width
resolution_height = instance.data.get("resolutionHeight")
if resolution_height:
anatomy_data["resolution_height"] = resolution_height
pixel_aspect = instance.data.get("pixelAspect")
if pixel_aspect:
anatomy_data["pixel_aspect"] = float("{:0.2f}".format(pixel_aspect))
fps = instance.data.get("fps")
if resolution_height:
anatomy_data["fps"] = float("{:0.2f}".format(fps))
instance.data["projectEntity"] = project_entity
instance.data["assetEntity"] = asset_entity
instance.data["anatomyData"] = anatomy_data
instance.data["latestVersion"] = latest_version
instance.data["version"] = version_number
# WARNING this is from `collect_resources_path.py`
anatomy = instance.context.data["anatomy"]
template_data = copy.deepcopy(instance.data["anatomyData"])
# This is for cases of Deprecated anatomy without `folder`
# TODO remove when all clients have solved this issue
template_data.update({
"frame": "FRAME_TEMP",
"representation": "TEMP"
})
anatomy_filled = anatomy.format(template_data)
if "folder" in anatomy.templates["publish"]:
publish_folder = anatomy_filled["publish"]["folder"]
else:
# solve deprecated situation when `folder` key is not underneath
# `publish` anatomy
project_name = api.Session["AVALON_PROJECT"]
self.log.warning((
"Deprecation warning: Anatomy does not have set `folder`"
" key underneath `publish` (in global of for project `{}`)."
).format(project_name))
file_path = anatomy_filled["publish"]["path"]
# Directory
publish_folder = os.path.dirname(file_path)
publish_folder = os.path.normpath(publish_folder)
resources_folder = os.path.join(publish_folder, "resources")
instance.data["publishDir"] = publish_folder
instance.data["resourcesDir"] = resources_folder
return resources_folder

View file

@ -8,12 +8,11 @@ import clique
from avalon.vendor import filelink
class ExtractReviewCutUp(pype.api.Extractor):
class ExtractReviewPreparation(pype.api.Extractor):
"""Cut up clips from long video file"""
order = api.ExtractorOrder
# order = api.CollectorOrder + 0.1023
label = "Extract Review CutUp"
label = "Extract Review Preparation"
hosts = ["hiero"]
families = ["review"]
@ -22,22 +21,18 @@ class ExtractReviewCutUp(pype.api.Extractor):
def process(self, instance):
inst_data = instance.data
asset = inst_data['asset']
item = inst_data['item']
event_number = int(item.eventNumber())
asset = inst_data["asset"]
review_item_data = instance.data.get("reviewItemData")
# get representation and loop them
representations = inst_data["representations"]
# check if sequence
is_sequence = inst_data["isSequence"]
# get resolution default
resolution_width = inst_data["resolutionWidth"]
resolution_height = inst_data["resolutionHeight"]
# frame range data
media_duration = inst_data["mediaDuration"]
media_duration = review_item_data["mediaDuration"]
ffmpeg_path = pype.lib.get_ffmpeg_tool_path("ffmpeg")
ffprobe_path = pype.lib.get_ffmpeg_tool_path("ffprobe")
@ -52,7 +47,7 @@ class ExtractReviewCutUp(pype.api.Extractor):
# check if supported tags are in representation for activation
filter_tag = False
for tag in ["_cut-bigger", "_cut-smaller"]:
for tag in ["_cut-bigger", "prep"]:
if tag in tags:
filter_tag = True
break
@ -70,7 +65,7 @@ class ExtractReviewCutUp(pype.api.Extractor):
full_output_dir = os.path.join(
staging_dir, "cuts")
if is_sequence:
if isinstance(files, list):
new_files = list()
# frame range delivery included handles
@ -99,12 +94,7 @@ class ExtractReviewCutUp(pype.api.Extractor):
index = 0
for image in collection:
dst_file_num = frame_start + index
dst_file_name = "".join([
str(event_number),
head,
str(padding % dst_file_num),
tail
])
dst_file_name = head + str(padding % dst_file_num) + tail
src = os.path.join(staging_dir, image)
dst = os.path.join(full_output_dir, dst_file_name)
self.log.info("Creating temp hardlinks: {}".format(dst))
@ -142,7 +132,7 @@ class ExtractReviewCutUp(pype.api.Extractor):
).format(**locals())
self.log.debug("ffprob_cmd: {}".format(ffprob_cmd))
audio_check_output = pype.api.run_subprocess(ffprob_cmd)
audio_check_output = pype.api.subprocess(ffprob_cmd)
self.log.debug(
"audio_check_output: {}".format(audio_check_output))
@ -177,7 +167,7 @@ class ExtractReviewCutUp(pype.api.Extractor):
# try to get video native resolution data
try:
resolution_output = pype.api.run_subprocess((
resolution_output = pype.api.subprocess((
"\"{ffprobe_path}\" -i \"{full_input_path}\""
" -v error "
"-select_streams v:0 -show_entries "
@ -290,7 +280,8 @@ class ExtractReviewCutUp(pype.api.Extractor):
# run subprocess
self.log.debug("Executing: {}".format(subprcs_cmd))
pype.api.run_subprocess(subprcs_cmd, logger=self.log)
output = pype.api.subprocess(subprcs_cmd)
self.log.debug("Output: {}".format(output))
repre_new = {
"files": new_files,
@ -302,7 +293,8 @@ class ExtractReviewCutUp(pype.api.Extractor):
"step": 1,
"fps": fps,
"name": "cut_up_preview",
"tags": ["review"] + self.tags_addition,
"tags": [
"review", "ftrackreview", "delete"] + self.tags_addition,
"ext": ext,
"anatomy_template": "publish"
}

View file

@ -0,0 +1,25 @@
import pyblish
from pype.hosts.hiero.api import is_overlapping
class ValidateAudioFile(pyblish.api.InstancePlugin):
"""Validate audio subset has avilable audio track clips"""
order = pyblish.api.ValidatorOrder
label = "Validate Audio Tracks"
hosts = ["hiero"]
families = ["audio"]
def process(self, instance):
clip = instance.data["item"]
audio_tracks = instance.context.data["audioTracks"]
audio_clip = None
for a_track in audio_tracks:
for item in a_track.items():
if is_overlapping(item, clip):
audio_clip = item
assert audio_clip, "Missing relative audio clip for clip {}".format(
clip.name()
)