moved hiero integratoin next to server codebase
10
server_addon/hiero/client/ayon_hiero/__init__.py
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
from .addon import (
|
||||
HIERO_ROOT_DIR,
|
||||
HieroAddon,
|
||||
)
|
||||
|
||||
|
||||
__all__ = (
|
||||
"HIERO_ROOT_DIR",
|
||||
"HieroAddon",
|
||||
)
|
||||
64
server_addon/hiero/client/ayon_hiero/addon.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
import os
|
||||
import platform
|
||||
from ayon_core.addon import AYONAddon, IHostAddon
|
||||
|
||||
HIERO_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
|
||||
class HieroAddon(AYONAddon, IHostAddon):
|
||||
name = "hiero"
|
||||
host_name = "hiero"
|
||||
|
||||
def add_implementation_envs(self, env, _app):
|
||||
# Add requirements to HIERO_PLUGIN_PATH
|
||||
new_hiero_paths = [
|
||||
os.path.join(HIERO_ROOT_DIR, "api", "startup")
|
||||
]
|
||||
old_hiero_path = env.get("HIERO_PLUGIN_PATH") or ""
|
||||
for path in old_hiero_path.split(os.pathsep):
|
||||
if not path:
|
||||
continue
|
||||
|
||||
norm_path = os.path.normpath(path)
|
||||
if norm_path not in new_hiero_paths:
|
||||
new_hiero_paths.append(norm_path)
|
||||
|
||||
env["HIERO_PLUGIN_PATH"] = os.pathsep.join(new_hiero_paths)
|
||||
# Remove auto screen scale factor for Qt
|
||||
# - let Hiero decide it's value
|
||||
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
|
||||
# Remove tkinter library paths if are set
|
||||
env.pop("TK_LIBRARY", None)
|
||||
env.pop("TCL_LIBRARY", None)
|
||||
|
||||
# Add vendor to PYTHONPATH
|
||||
python_path = env["PYTHONPATH"]
|
||||
python_path_parts = []
|
||||
if python_path:
|
||||
python_path_parts = python_path.split(os.pathsep)
|
||||
vendor_path = os.path.join(HIERO_ROOT_DIR, "vendor")
|
||||
python_path_parts.insert(0, vendor_path)
|
||||
env["PYTHONPATH"] = os.pathsep.join(python_path_parts)
|
||||
|
||||
# Set default values if are not already set via settings
|
||||
defaults = {
|
||||
"LOGLEVEL": "DEBUG"
|
||||
}
|
||||
for key, value in defaults.items():
|
||||
if not env.get(key):
|
||||
env[key] = value
|
||||
|
||||
# Try to add QuickTime to PATH
|
||||
quick_time_path = "C:/Program Files (x86)/QuickTime/QTSystem"
|
||||
if platform.system() == "windows" and os.path.exists(quick_time_path):
|
||||
path_value = env.get("PATH") or ""
|
||||
path_paths = [
|
||||
path
|
||||
for path in path_value.split(os.pathsep)
|
||||
if path
|
||||
]
|
||||
path_paths.append(quick_time_path)
|
||||
env["PATH"] = os.pathsep.join(path_paths)
|
||||
|
||||
def get_workfile_extensions(self):
|
||||
return [".hrox"]
|
||||
131
server_addon/hiero/client/ayon_hiero/api/__init__.py
Normal file
|
|
@ -0,0 +1,131 @@
|
|||
from .workio import (
|
||||
open_file,
|
||||
save_file,
|
||||
current_file,
|
||||
has_unsaved_changes,
|
||||
file_extensions,
|
||||
work_root
|
||||
)
|
||||
|
||||
from .pipeline import (
|
||||
launch_workfiles_app,
|
||||
ls,
|
||||
install,
|
||||
uninstall,
|
||||
reload_config,
|
||||
containerise,
|
||||
publish,
|
||||
maintained_selection,
|
||||
parse_container,
|
||||
update_container,
|
||||
reset_selection
|
||||
)
|
||||
|
||||
from .constants import (
|
||||
OPENPYPE_TAG_NAME,
|
||||
DEFAULT_SEQUENCE_NAME,
|
||||
DEFAULT_BIN_NAME
|
||||
)
|
||||
|
||||
from .lib import (
|
||||
flatten,
|
||||
get_track_items,
|
||||
get_current_project,
|
||||
get_current_sequence,
|
||||
get_timeline_selection,
|
||||
get_current_track,
|
||||
get_track_item_tags,
|
||||
get_track_openpype_tag,
|
||||
set_track_openpype_tag,
|
||||
get_track_openpype_data,
|
||||
get_track_item_pype_tag,
|
||||
set_track_item_pype_tag,
|
||||
get_track_item_pype_data,
|
||||
get_trackitem_openpype_tag,
|
||||
set_trackitem_openpype_tag,
|
||||
get_trackitem_openpype_data,
|
||||
set_publish_attribute,
|
||||
get_publish_attribute,
|
||||
imprint,
|
||||
get_selected_track_items,
|
||||
set_selected_track_items,
|
||||
create_nuke_workfile_clips,
|
||||
create_bin,
|
||||
apply_colorspace_project,
|
||||
apply_colorspace_clips,
|
||||
is_overlapping,
|
||||
get_sequence_pattern_and_padding
|
||||
)
|
||||
|
||||
from .plugin import (
|
||||
CreatorWidget,
|
||||
Creator,
|
||||
PublishClip,
|
||||
SequenceLoader,
|
||||
ClipLoader
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# avalon pipeline module
|
||||
"launch_workfiles_app",
|
||||
"ls",
|
||||
"install",
|
||||
"uninstall",
|
||||
"reload_config",
|
||||
"containerise",
|
||||
"publish",
|
||||
"maintained_selection",
|
||||
"parse_container",
|
||||
"update_container",
|
||||
"reset_selection",
|
||||
|
||||
# Workfiles API
|
||||
"open_file",
|
||||
"save_file",
|
||||
"current_file",
|
||||
"has_unsaved_changes",
|
||||
"file_extensions",
|
||||
"work_root",
|
||||
|
||||
# Constants
|
||||
"OPENPYPE_TAG_NAME",
|
||||
"DEFAULT_SEQUENCE_NAME",
|
||||
"DEFAULT_BIN_NAME",
|
||||
|
||||
# Lib functions
|
||||
"flatten",
|
||||
"get_track_items",
|
||||
"get_current_project",
|
||||
"get_current_sequence",
|
||||
"get_timeline_selection",
|
||||
"get_current_track",
|
||||
"get_track_item_tags",
|
||||
"get_track_openpype_tag",
|
||||
"set_track_openpype_tag",
|
||||
"get_track_openpype_data",
|
||||
"get_trackitem_openpype_tag",
|
||||
"set_trackitem_openpype_tag",
|
||||
"get_trackitem_openpype_data",
|
||||
"set_publish_attribute",
|
||||
"get_publish_attribute",
|
||||
"imprint",
|
||||
"get_selected_track_items",
|
||||
"set_selected_track_items",
|
||||
"create_nuke_workfile_clips",
|
||||
"create_bin",
|
||||
"is_overlapping",
|
||||
"apply_colorspace_project",
|
||||
"apply_colorspace_clips",
|
||||
"get_sequence_pattern_and_padding",
|
||||
# deprecated
|
||||
"get_track_item_pype_tag",
|
||||
"set_track_item_pype_tag",
|
||||
"get_track_item_pype_data",
|
||||
|
||||
# plugins
|
||||
"CreatorWidget",
|
||||
"Creator",
|
||||
"PublishClip",
|
||||
"SequenceLoader",
|
||||
"ClipLoader"
|
||||
]
|
||||
3
server_addon/hiero/client/ayon_hiero/api/constants.py
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
OPENPYPE_TAG_NAME = "openpypeData"
|
||||
DEFAULT_SEQUENCE_NAME = "openpypeSequence"
|
||||
DEFAULT_BIN_NAME = "openpypeBin"
|
||||
136
server_addon/hiero/client/ayon_hiero/api/events.py
Normal file
|
|
@ -0,0 +1,136 @@
|
|||
import os
|
||||
|
||||
import hiero.core.events
|
||||
|
||||
from ayon_core.lib import Logger, register_event_callback
|
||||
|
||||
from .lib import (
|
||||
sync_avalon_data_to_workfile,
|
||||
launch_workfiles_app,
|
||||
before_project_save,
|
||||
apply_colorspace_project
|
||||
)
|
||||
from .tags import add_tags_to_workfile
|
||||
from .menu import update_menu_task_label
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def startupCompleted(event):
|
||||
log.info("startup competed event...")
|
||||
return
|
||||
|
||||
|
||||
def shutDown(event):
|
||||
log.info("shut down event...")
|
||||
return
|
||||
|
||||
|
||||
def beforeNewProjectCreated(event):
|
||||
log.info("before new project created event...")
|
||||
return
|
||||
|
||||
|
||||
def afterNewProjectCreated(event):
|
||||
log.info("after new project created event...")
|
||||
# sync avalon data to project properties
|
||||
sync_avalon_data_to_workfile()
|
||||
|
||||
# add tags from preset
|
||||
add_tags_to_workfile()
|
||||
|
||||
# Workfiles.
|
||||
if int(os.environ.get("WORKFILES_STARTUP", "0")):
|
||||
hiero.core.events.sendEvent("kStartWorkfiles", None)
|
||||
# reset workfiles startup not to open any more in session
|
||||
os.environ["WORKFILES_STARTUP"] = "0"
|
||||
|
||||
apply_colorspace_project()
|
||||
|
||||
|
||||
def beforeProjectLoad(event):
|
||||
log.info("before project load event...")
|
||||
return
|
||||
|
||||
|
||||
def afterProjectLoad(event):
|
||||
log.info("after project load event...")
|
||||
# sync avalon data to project properties
|
||||
sync_avalon_data_to_workfile()
|
||||
|
||||
# add tags from preset
|
||||
add_tags_to_workfile()
|
||||
|
||||
|
||||
def beforeProjectClosed(event):
|
||||
log.info("before project closed event...")
|
||||
return
|
||||
|
||||
|
||||
def afterProjectClosed(event):
|
||||
log.info("after project closed event...")
|
||||
return
|
||||
|
||||
|
||||
def beforeProjectSaved(event):
|
||||
log.info("before project saved event...")
|
||||
return
|
||||
|
||||
|
||||
def afterProjectSaved(event):
|
||||
log.info("after project saved event...")
|
||||
return
|
||||
|
||||
|
||||
def register_hiero_events():
|
||||
log.info(
|
||||
"Registering events for: kBeforeNewProjectCreated, "
|
||||
"kAfterNewProjectCreated, kBeforeProjectLoad, kAfterProjectLoad, "
|
||||
"kBeforeProjectSave, kAfterProjectSave, kBeforeProjectClose, "
|
||||
"kAfterProjectClose, kShutdown, kStartup, kSelectionChanged"
|
||||
)
|
||||
|
||||
# hiero.core.events.registerInterest(
|
||||
# "kBeforeNewProjectCreated", beforeNewProjectCreated)
|
||||
hiero.core.events.registerInterest(
|
||||
"kAfterNewProjectCreated", afterNewProjectCreated)
|
||||
|
||||
# hiero.core.events.registerInterest(
|
||||
# "kBeforeProjectLoad", beforeProjectLoad)
|
||||
hiero.core.events.registerInterest(
|
||||
"kAfterProjectLoad", afterProjectLoad)
|
||||
|
||||
hiero.core.events.registerInterest(
|
||||
"kBeforeProjectSave", before_project_save)
|
||||
# hiero.core.events.registerInterest(
|
||||
# "kAfterProjectSave", afterProjectSaved)
|
||||
#
|
||||
# hiero.core.events.registerInterest(
|
||||
# "kBeforeProjectClose", beforeProjectClosed)
|
||||
# hiero.core.events.registerInterest(
|
||||
# "kAfterProjectClose", afterProjectClosed)
|
||||
#
|
||||
# hiero.core.events.registerInterest("kShutdown", shutDown)
|
||||
# hiero.core.events.registerInterest("kStartup", startupCompleted)
|
||||
|
||||
# INFO: was disabled because it was slowing down timeline operations
|
||||
# hiero.core.events.registerInterest(
|
||||
# ("kSelectionChanged", "kTimeline"), selection_changed_timeline)
|
||||
|
||||
# workfiles
|
||||
try:
|
||||
hiero.core.events.registerEventType("kStartWorkfiles")
|
||||
hiero.core.events.registerInterest(
|
||||
"kStartWorkfiles", launch_workfiles_app)
|
||||
except RuntimeError:
|
||||
pass
|
||||
|
||||
|
||||
def register_events():
|
||||
"""
|
||||
Adding all callbacks.
|
||||
"""
|
||||
|
||||
# if task changed then change notext of hiero
|
||||
register_event_callback("taskChanged", update_menu_task_label)
|
||||
log.info("Installed event callback for 'taskChanged'..")
|
||||
85
server_addon/hiero/client/ayon_hiero/api/launchforhiero.py
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
import logging
|
||||
|
||||
from scriptsmenu import scriptsmenu
|
||||
from qtpy import QtWidgets
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _hiero_main_window():
|
||||
"""Return Hiero's main window"""
|
||||
for obj in QtWidgets.QApplication.topLevelWidgets():
|
||||
if (obj.inherits('QMainWindow') and
|
||||
obj.metaObject().className() == 'Foundry::UI::DockMainWindow'):
|
||||
return obj
|
||||
raise RuntimeError('Could not find HieroWindow instance')
|
||||
|
||||
|
||||
def _hiero_main_menubar():
|
||||
"""Retrieve the main menubar of the Hiero window"""
|
||||
hiero_window = _hiero_main_window()
|
||||
menubar = [i for i in hiero_window.children() if isinstance(
|
||||
i,
|
||||
QtWidgets.QMenuBar
|
||||
)]
|
||||
|
||||
assert len(menubar) == 1, "Error, could not find menu bar!"
|
||||
return menubar[0]
|
||||
|
||||
|
||||
def find_scripts_menu(title, parent):
|
||||
"""
|
||||
Check if the menu exists with the given title in the parent
|
||||
|
||||
Args:
|
||||
title (str): the title name of the scripts menu
|
||||
|
||||
parent (QtWidgets.QMenuBar): the menubar to check
|
||||
|
||||
Returns:
|
||||
QtWidgets.QMenu or None
|
||||
|
||||
"""
|
||||
|
||||
menu = None
|
||||
search = [i for i in parent.children() if
|
||||
isinstance(i, scriptsmenu.ScriptsMenu)
|
||||
and i.title() == title]
|
||||
if search:
|
||||
assert len(search) < 2, ("Multiple instances of menu '{}' "
|
||||
"in menu bar".format(title))
|
||||
menu = search[0]
|
||||
|
||||
return menu
|
||||
|
||||
|
||||
def main(title="Scripts", parent=None, objectName=None):
|
||||
"""Build the main scripts menu in Hiero
|
||||
|
||||
Args:
|
||||
title (str): name of the menu in the application
|
||||
|
||||
parent (QtWidgets.QtMenuBar): the parent object for the menu
|
||||
|
||||
objectName (str): custom objectName for scripts menu
|
||||
|
||||
Returns:
|
||||
scriptsmenu.ScriptsMenu instance
|
||||
|
||||
"""
|
||||
hieromainbar = parent or _hiero_main_menubar()
|
||||
try:
|
||||
# check menu already exists
|
||||
menu = find_scripts_menu(title, hieromainbar)
|
||||
if not menu:
|
||||
log.info("Attempting to build menu ...")
|
||||
object_name = objectName or title.lower()
|
||||
menu = scriptsmenu.ScriptsMenu(title=title,
|
||||
parent=hieromainbar,
|
||||
objectName=object_name)
|
||||
except Exception as e:
|
||||
log.error(e)
|
||||
return
|
||||
|
||||
return menu
|
||||
1381
server_addon/hiero/client/ayon_hiero/api/lib.py
Normal file
175
server_addon/hiero/client/ayon_hiero/api/menu.py
Normal file
|
|
@ -0,0 +1,175 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
import hiero.core
|
||||
from hiero.ui import findMenuAction
|
||||
|
||||
from qtpy import QtGui
|
||||
|
||||
from ayon_core.lib import Logger, is_dev_mode_enabled
|
||||
from ayon_core.tools.utils import host_tools
|
||||
from ayon_core.settings import get_project_settings
|
||||
from ayon_core.pipeline import (
|
||||
get_current_project_name,
|
||||
get_current_folder_path,
|
||||
get_current_task_name
|
||||
)
|
||||
|
||||
from . import tags
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
self = sys.modules[__name__]
|
||||
self._change_context_menu = None
|
||||
|
||||
|
||||
def get_context_label():
|
||||
return "{}, {}".format(
|
||||
get_current_folder_path(),
|
||||
get_current_task_name()
|
||||
)
|
||||
|
||||
|
||||
def update_menu_task_label():
|
||||
"""Update the task label in Avalon menu to current session"""
|
||||
|
||||
object_name = self._change_context_menu
|
||||
found_menu = findMenuAction(object_name)
|
||||
|
||||
if not found_menu:
|
||||
log.warning("Can't find menuItem: {}".format(object_name))
|
||||
return
|
||||
|
||||
label = get_context_label()
|
||||
|
||||
menu = found_menu.menu()
|
||||
self._change_context_menu = label
|
||||
menu.setTitle(label)
|
||||
|
||||
|
||||
def menu_install():
|
||||
"""
|
||||
Installing menu into Hiero
|
||||
|
||||
"""
|
||||
|
||||
from . import (
|
||||
publish, launch_workfiles_app, reload_config,
|
||||
apply_colorspace_project, apply_colorspace_clips
|
||||
)
|
||||
from .lib import get_main_window
|
||||
|
||||
main_window = get_main_window()
|
||||
|
||||
# here is the best place to add menu
|
||||
|
||||
menu_name = os.environ['AYON_MENU_LABEL']
|
||||
|
||||
context_label = get_context_label()
|
||||
|
||||
self._change_context_menu = context_label
|
||||
|
||||
try:
|
||||
check_made_menu = findMenuAction(menu_name)
|
||||
except Exception:
|
||||
check_made_menu = None
|
||||
|
||||
if not check_made_menu:
|
||||
# Grab Hiero's MenuBar
|
||||
menu = hiero.ui.menuBar().addMenu(menu_name)
|
||||
else:
|
||||
menu = check_made_menu.menu()
|
||||
|
||||
context_label_action = menu.addAction(context_label)
|
||||
context_label_action.setEnabled(False)
|
||||
|
||||
menu.addSeparator()
|
||||
|
||||
workfiles_action = menu.addAction("Work Files...")
|
||||
workfiles_action.setIcon(QtGui.QIcon("icons:Position.png"))
|
||||
workfiles_action.triggered.connect(launch_workfiles_app)
|
||||
|
||||
default_tags_action = menu.addAction("Create Default Tags")
|
||||
default_tags_action.setIcon(QtGui.QIcon("icons:Position.png"))
|
||||
default_tags_action.triggered.connect(tags.add_tags_to_workfile)
|
||||
|
||||
menu.addSeparator()
|
||||
|
||||
creator_action = menu.addAction("Create...")
|
||||
creator_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
creator_action.triggered.connect(
|
||||
lambda: host_tools.show_creator(parent=main_window)
|
||||
)
|
||||
|
||||
publish_action = menu.addAction("Publish...")
|
||||
publish_action.setIcon(QtGui.QIcon("icons:Output.png"))
|
||||
publish_action.triggered.connect(
|
||||
lambda *args: publish(hiero.ui.mainWindow())
|
||||
)
|
||||
|
||||
loader_action = menu.addAction("Load...")
|
||||
loader_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
loader_action.triggered.connect(
|
||||
lambda: host_tools.show_loader(parent=main_window)
|
||||
)
|
||||
|
||||
sceneinventory_action = menu.addAction("Manage...")
|
||||
sceneinventory_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
sceneinventory_action.triggered.connect(
|
||||
lambda: host_tools.show_scene_inventory(parent=main_window)
|
||||
)
|
||||
|
||||
library_action = menu.addAction("Library...")
|
||||
library_action.setIcon(QtGui.QIcon("icons:CopyRectangle.png"))
|
||||
library_action.triggered.connect(
|
||||
lambda: host_tools.show_library_loader(parent=main_window)
|
||||
)
|
||||
|
||||
if is_dev_mode_enabled():
|
||||
menu.addSeparator()
|
||||
reload_action = menu.addAction("Reload pipeline")
|
||||
reload_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
|
||||
reload_action.triggered.connect(reload_config)
|
||||
|
||||
menu.addSeparator()
|
||||
apply_colorspace_p_action = menu.addAction("Apply Colorspace Project")
|
||||
apply_colorspace_p_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
|
||||
apply_colorspace_p_action.triggered.connect(apply_colorspace_project)
|
||||
|
||||
apply_colorspace_c_action = menu.addAction("Apply Colorspace Clips")
|
||||
apply_colorspace_c_action.setIcon(QtGui.QIcon("icons:ColorAdd.png"))
|
||||
apply_colorspace_c_action.triggered.connect(apply_colorspace_clips)
|
||||
|
||||
menu.addSeparator()
|
||||
|
||||
exeprimental_action = menu.addAction("Experimental tools...")
|
||||
exeprimental_action.triggered.connect(
|
||||
lambda: host_tools.show_experimental_tools_dialog(parent=main_window)
|
||||
)
|
||||
|
||||
|
||||
def add_scripts_menu():
|
||||
try:
|
||||
from . import launchforhiero
|
||||
except ImportError:
|
||||
|
||||
log.warning(
|
||||
"Skipping studio.menu install, because "
|
||||
"'scriptsmenu' module seems unavailable."
|
||||
)
|
||||
return
|
||||
|
||||
# load configuration of custom menu
|
||||
project_settings = get_project_settings(get_current_project_name())
|
||||
config = project_settings["hiero"]["scriptsmenu"]["definition"]
|
||||
_menu = project_settings["hiero"]["scriptsmenu"]["name"]
|
||||
|
||||
if not config:
|
||||
log.warning("Skipping studio menu, no definition found.")
|
||||
return
|
||||
|
||||
# run the launcher for Hiero menu
|
||||
studio_menu = launchforhiero.main(title=_menu.title())
|
||||
|
||||
# apply configuration
|
||||
studio_menu.build_from_configuration(studio_menu, config)
|
||||
443
server_addon/hiero/client/ayon_hiero/api/otio/hiero_export.py
Normal file
|
|
@ -0,0 +1,443 @@
|
|||
""" compatibility OpenTimelineIO 0.12.0 and newer
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
import ast
|
||||
import opentimelineio as otio
|
||||
from . import utils
|
||||
import hiero.core
|
||||
import hiero.ui
|
||||
|
||||
|
||||
TRACK_TYPE_MAP = {
|
||||
hiero.core.VideoTrack: otio.schema.TrackKind.Video,
|
||||
hiero.core.AudioTrack: otio.schema.TrackKind.Audio
|
||||
}
|
||||
MARKER_COLOR_MAP = {
|
||||
"magenta": otio.schema.MarkerColor.MAGENTA,
|
||||
"red": otio.schema.MarkerColor.RED,
|
||||
"yellow": otio.schema.MarkerColor.YELLOW,
|
||||
"green": otio.schema.MarkerColor.GREEN,
|
||||
"cyan": otio.schema.MarkerColor.CYAN,
|
||||
"blue": otio.schema.MarkerColor.BLUE,
|
||||
}
|
||||
|
||||
|
||||
class CTX:
|
||||
project_fps = None
|
||||
timeline = None
|
||||
include_tags = True
|
||||
|
||||
|
||||
def flatten(list_):
|
||||
for item_ in list_:
|
||||
if isinstance(item_, (list, tuple)):
|
||||
for sub_item in flatten(item_):
|
||||
yield sub_item
|
||||
else:
|
||||
yield item_
|
||||
|
||||
|
||||
def create_otio_rational_time(frame, fps):
|
||||
return otio.opentime.RationalTime(
|
||||
float(frame),
|
||||
float(fps)
|
||||
)
|
||||
|
||||
|
||||
def create_otio_time_range(start_frame, frame_duration, fps):
|
||||
return otio.opentime.TimeRange(
|
||||
start_time=create_otio_rational_time(start_frame, fps),
|
||||
duration=create_otio_rational_time(frame_duration, fps)
|
||||
)
|
||||
|
||||
|
||||
def _get_metadata(item):
|
||||
if hasattr(item, 'metadata'):
|
||||
return {key: value for key, value in dict(item.metadata()).items()}
|
||||
return {}
|
||||
|
||||
|
||||
def create_time_effects(otio_clip, track_item):
|
||||
# get all subtrack items
|
||||
subTrackItems = flatten(track_item.parent().subTrackItems())
|
||||
speed = track_item.playbackSpeed()
|
||||
|
||||
otio_effect = None
|
||||
# retime on track item
|
||||
if speed != 1.:
|
||||
# make effect
|
||||
otio_effect = otio.schema.LinearTimeWarp()
|
||||
otio_effect.name = "Speed"
|
||||
otio_effect.time_scalar = speed
|
||||
|
||||
# freeze frame effect
|
||||
if speed == 0.:
|
||||
otio_effect = otio.schema.FreezeFrame()
|
||||
otio_effect.name = "FreezeFrame"
|
||||
|
||||
if otio_effect:
|
||||
# add otio effect to clip effects
|
||||
otio_clip.effects.append(otio_effect)
|
||||
|
||||
# loop through and get all Timewarps
|
||||
for effect in subTrackItems:
|
||||
if ((track_item not in effect.linkedItems())
|
||||
and (len(effect.linkedItems()) > 0)):
|
||||
continue
|
||||
# avoid all effect which are not TimeWarp and disabled
|
||||
if "TimeWarp" not in effect.name():
|
||||
continue
|
||||
|
||||
if not effect.isEnabled():
|
||||
continue
|
||||
|
||||
node = effect.node()
|
||||
name = node["name"].value()
|
||||
|
||||
# solve effect class as effect name
|
||||
_name = effect.name()
|
||||
if "_" in _name:
|
||||
effect_name = re.sub(r"(?:_)[_0-9]+", "", _name) # more numbers
|
||||
else:
|
||||
effect_name = re.sub(r"\d+", "", _name) # one number
|
||||
|
||||
metadata = {}
|
||||
# add knob to metadata
|
||||
for knob in ["lookup", "length"]:
|
||||
value = node[knob].value()
|
||||
animated = node[knob].isAnimated()
|
||||
if animated:
|
||||
value = [
|
||||
((node[knob].getValueAt(i)) - i)
|
||||
for i in range(
|
||||
track_item.timelineIn(), track_item.timelineOut() + 1)
|
||||
]
|
||||
|
||||
metadata[knob] = value
|
||||
|
||||
# make effect
|
||||
otio_effect = otio.schema.TimeEffect()
|
||||
otio_effect.name = name
|
||||
otio_effect.effect_name = effect_name
|
||||
otio_effect.metadata.update(metadata)
|
||||
|
||||
# add otio effect to clip effects
|
||||
otio_clip.effects.append(otio_effect)
|
||||
|
||||
|
||||
def create_otio_reference(clip):
|
||||
metadata = _get_metadata(clip)
|
||||
media_source = clip.mediaSource()
|
||||
|
||||
# get file info for path and start frame
|
||||
file_info = media_source.fileinfos().pop()
|
||||
frame_start = file_info.startFrame()
|
||||
path = file_info.filename()
|
||||
|
||||
# get padding and other file infos
|
||||
padding = media_source.filenamePadding()
|
||||
file_head = media_source.filenameHead()
|
||||
is_sequence = not media_source.singleFile()
|
||||
frame_duration = media_source.duration()
|
||||
fps = utils.get_rate(clip) or CTX.project_fps
|
||||
extension = os.path.splitext(path)[-1]
|
||||
|
||||
if is_sequence:
|
||||
metadata.update({
|
||||
"isSequence": True,
|
||||
"padding": padding
|
||||
})
|
||||
|
||||
# add resolution metadata
|
||||
metadata.update({
|
||||
"openpype.source.colourtransform": clip.sourceMediaColourTransform(),
|
||||
"openpype.source.width": int(media_source.width()),
|
||||
"openpype.source.height": int(media_source.height()),
|
||||
"openpype.source.pixelAspect": float(media_source.pixelAspect())
|
||||
})
|
||||
|
||||
otio_ex_ref_item = None
|
||||
|
||||
if is_sequence:
|
||||
# if it is file sequence try to create `ImageSequenceReference`
|
||||
# the OTIO might not be compatible so return nothing and do it old way
|
||||
try:
|
||||
dirname = os.path.dirname(path)
|
||||
otio_ex_ref_item = otio.schema.ImageSequenceReference(
|
||||
target_url_base=dirname + os.sep,
|
||||
name_prefix=file_head,
|
||||
name_suffix=extension,
|
||||
start_frame=frame_start,
|
||||
frame_zero_padding=padding,
|
||||
rate=fps,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
frame_duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if not otio_ex_ref_item:
|
||||
reformat_path = utils.get_reformated_path(path, padded=False)
|
||||
# in case old OTIO or video file create `ExternalReference`
|
||||
otio_ex_ref_item = otio.schema.ExternalReference(
|
||||
target_url=reformat_path,
|
||||
available_range=create_otio_time_range(
|
||||
frame_start,
|
||||
frame_duration,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
# add metadata to otio item
|
||||
add_otio_metadata(otio_ex_ref_item, media_source, **metadata)
|
||||
|
||||
return otio_ex_ref_item
|
||||
|
||||
|
||||
def get_marker_color(tag):
|
||||
icon = tag.icon()
|
||||
pat = r'icons:Tag(?P<color>\w+)\.\w+'
|
||||
|
||||
res = re.search(pat, icon)
|
||||
if res:
|
||||
color = res.groupdict().get('color')
|
||||
if color.lower() in MARKER_COLOR_MAP:
|
||||
return MARKER_COLOR_MAP[color.lower()]
|
||||
|
||||
return otio.schema.MarkerColor.RED
|
||||
|
||||
|
||||
def create_otio_markers(otio_item, item):
|
||||
for tag in item.tags():
|
||||
if not tag.visible():
|
||||
continue
|
||||
|
||||
if tag.name() == 'Copy':
|
||||
# Hiero adds this tag to a lot of clips
|
||||
continue
|
||||
|
||||
frame_rate = utils.get_rate(item) or CTX.project_fps
|
||||
|
||||
marked_range = otio.opentime.TimeRange(
|
||||
start_time=otio.opentime.RationalTime(
|
||||
tag.inTime(),
|
||||
frame_rate
|
||||
),
|
||||
duration=otio.opentime.RationalTime(
|
||||
int(tag.metadata().dict().get('tag.length', '0')),
|
||||
frame_rate
|
||||
)
|
||||
)
|
||||
# add tag metadata but remove "tag." string
|
||||
metadata = {}
|
||||
|
||||
for key, value in tag.metadata().dict().items():
|
||||
_key = key.replace("tag.", "")
|
||||
|
||||
try:
|
||||
# capture exceptions which are related to strings only
|
||||
_value = ast.literal_eval(value)
|
||||
except (ValueError, SyntaxError):
|
||||
_value = value
|
||||
|
||||
metadata.update({_key: _value})
|
||||
|
||||
# Store the source item for future import assignment
|
||||
metadata['hiero_source_type'] = item.__class__.__name__
|
||||
|
||||
marker = otio.schema.Marker(
|
||||
name=tag.name(),
|
||||
color=get_marker_color(tag),
|
||||
marked_range=marked_range,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
otio_item.markers.append(marker)
|
||||
|
||||
|
||||
def create_otio_clip(track_item):
|
||||
clip = track_item.source()
|
||||
speed = track_item.playbackSpeed()
|
||||
# flip if speed is in minus
|
||||
source_in = track_item.sourceIn() if speed > 0 else track_item.sourceOut()
|
||||
|
||||
duration = int(track_item.duration())
|
||||
|
||||
fps = utils.get_rate(track_item) or CTX.project_fps
|
||||
name = track_item.name()
|
||||
|
||||
media_reference = create_otio_reference(clip)
|
||||
source_range = create_otio_time_range(
|
||||
int(source_in),
|
||||
int(duration),
|
||||
fps
|
||||
)
|
||||
|
||||
otio_clip = otio.schema.Clip(
|
||||
name=name,
|
||||
source_range=source_range,
|
||||
media_reference=media_reference
|
||||
)
|
||||
|
||||
# Add tags as markers
|
||||
if CTX.include_tags:
|
||||
create_otio_markers(otio_clip, track_item)
|
||||
create_otio_markers(otio_clip, track_item.source())
|
||||
|
||||
# only if video
|
||||
if not clip.mediaSource().hasAudio():
|
||||
# Add effects to clips
|
||||
create_time_effects(otio_clip, track_item)
|
||||
|
||||
return otio_clip
|
||||
|
||||
|
||||
def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
|
||||
return otio.schema.Gap(
|
||||
source_range=create_otio_time_range(
|
||||
gap_start,
|
||||
(clip_start - tl_start_frame) - gap_start,
|
||||
fps
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def _create_otio_timeline():
|
||||
project = CTX.timeline.project()
|
||||
metadata = _get_metadata(CTX.timeline)
|
||||
|
||||
metadata.update({
|
||||
"openpype.timeline.width": int(CTX.timeline.format().width()),
|
||||
"openpype.timeline.height": int(CTX.timeline.format().height()),
|
||||
"openpype.timeline.pixelAspect": int(CTX.timeline.format().pixelAspect()), # noqa
|
||||
"openpype.project.useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(), # noqa
|
||||
"openpype.project.lutSetting16Bit": project.lutSetting16Bit(),
|
||||
"openpype.project.lutSetting8Bit": project.lutSetting8Bit(),
|
||||
"openpype.project.lutSettingFloat": project.lutSettingFloat(),
|
||||
"openpype.project.lutSettingLog": project.lutSettingLog(),
|
||||
"openpype.project.lutSettingViewer": project.lutSettingViewer(),
|
||||
"openpype.project.lutSettingWorkingSpace": project.lutSettingWorkingSpace(), # noqa
|
||||
"openpype.project.lutUseOCIOForExport": project.lutUseOCIOForExport(),
|
||||
"openpype.project.ocioConfigName": project.ocioConfigName(),
|
||||
"openpype.project.ocioConfigPath": project.ocioConfigPath()
|
||||
})
|
||||
|
||||
start_time = create_otio_rational_time(
|
||||
CTX.timeline.timecodeStart(), CTX.project_fps)
|
||||
|
||||
return otio.schema.Timeline(
|
||||
name=CTX.timeline.name(),
|
||||
global_start_time=start_time,
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
|
||||
def create_otio_track(track_type, track_name):
|
||||
return otio.schema.Track(
|
||||
name=track_name,
|
||||
kind=TRACK_TYPE_MAP[track_type]
|
||||
)
|
||||
|
||||
|
||||
def add_otio_gap(track_item, otio_track, prev_out):
|
||||
gap_length = track_item.timelineIn() - prev_out
|
||||
if prev_out != 0:
|
||||
gap_length -= 1
|
||||
|
||||
gap = otio.opentime.TimeRange(
|
||||
duration=otio.opentime.RationalTime(
|
||||
gap_length,
|
||||
CTX.project_fps
|
||||
)
|
||||
)
|
||||
otio_gap = otio.schema.Gap(source_range=gap)
|
||||
otio_track.append(otio_gap)
|
||||
|
||||
|
||||
def add_otio_metadata(otio_item, media_source, **kwargs):
|
||||
metadata = _get_metadata(media_source)
|
||||
|
||||
# add additional metadata from kwargs
|
||||
if kwargs:
|
||||
metadata.update(kwargs)
|
||||
|
||||
# add metadata to otio item metadata
|
||||
for key, value in metadata.items():
|
||||
otio_item.metadata.update({key: value})
|
||||
|
||||
|
||||
def create_otio_timeline():
|
||||
|
||||
def set_prev_item(itemindex, track_item):
|
||||
# Add Gap if needed
|
||||
if itemindex == 0:
|
||||
# if it is first track item at track then add
|
||||
# it to previous item
|
||||
return track_item
|
||||
|
||||
else:
|
||||
# get previous item
|
||||
return track_item.parent().items()[itemindex - 1]
|
||||
|
||||
# get current timeline
|
||||
CTX.timeline = hiero.ui.activeSequence()
|
||||
CTX.project_fps = CTX.timeline.framerate().toFloat()
|
||||
|
||||
# convert timeline to otio
|
||||
otio_timeline = _create_otio_timeline()
|
||||
|
||||
# loop all defined track types
|
||||
for track in CTX.timeline.items():
|
||||
# skip if track is disabled
|
||||
if not track.isEnabled():
|
||||
continue
|
||||
|
||||
# convert track to otio
|
||||
otio_track = create_otio_track(
|
||||
type(track), track.name())
|
||||
|
||||
for itemindex, track_item in enumerate(track):
|
||||
# Add Gap if needed
|
||||
if itemindex == 0:
|
||||
# if it is first track item at track then add
|
||||
# it to previous item
|
||||
prev_item = track_item
|
||||
|
||||
else:
|
||||
# get previous item
|
||||
prev_item = track_item.parent().items()[itemindex - 1]
|
||||
|
||||
# calculate clip frame range difference from each other
|
||||
clip_diff = track_item.timelineIn() - prev_item.timelineOut()
|
||||
|
||||
# add gap if first track item is not starting
|
||||
# at first timeline frame
|
||||
if itemindex == 0 and track_item.timelineIn() > 0:
|
||||
add_otio_gap(track_item, otio_track, 0)
|
||||
|
||||
# or add gap if following track items are having
|
||||
# frame range differences from each other
|
||||
elif itemindex and clip_diff != 1:
|
||||
add_otio_gap(track_item, otio_track, prev_item.timelineOut())
|
||||
|
||||
# create otio clip and add it to track
|
||||
otio_clip = create_otio_clip(track_item)
|
||||
otio_track.append(otio_clip)
|
||||
|
||||
# Add tags as markers
|
||||
if CTX.include_tags:
|
||||
create_otio_markers(otio_track, track)
|
||||
|
||||
# add track to otio timeline
|
||||
otio_timeline.tracks.append(otio_track)
|
||||
|
||||
return otio_timeline
|
||||
|
||||
|
||||
def write_to_file(otio_timeline, path):
|
||||
otio.adapters.write_to_file(otio_timeline, path)
|
||||
535
server_addon/hiero/client/ayon_hiero/api/otio/hiero_import.py
Normal file
|
|
@ -0,0 +1,535 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__author__ = "Daniel Flehner Heen"
|
||||
__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"]
|
||||
|
||||
|
||||
import os
|
||||
import hiero.core
|
||||
import hiero.ui
|
||||
|
||||
import PySide2.QtWidgets as qw
|
||||
|
||||
try:
|
||||
from urllib import unquote
|
||||
|
||||
except ImportError:
|
||||
from urllib.parse import unquote # lint:ok
|
||||
|
||||
import opentimelineio as otio
|
||||
|
||||
_otio_old = False
|
||||
|
||||
|
||||
def inform(messages):
|
||||
if isinstance(messages, type('')):
|
||||
messages = [messages]
|
||||
|
||||
qw.QMessageBox.information(
|
||||
hiero.ui.mainWindow(),
|
||||
'OTIO Import',
|
||||
'\n'.join(messages),
|
||||
qw.QMessageBox.StandardButton.Ok
|
||||
)
|
||||
|
||||
|
||||
def get_transition_type(otio_item, otio_track):
|
||||
_in, _out = otio_track.neighbors_of(otio_item)
|
||||
|
||||
if isinstance(_in, otio.schema.Gap):
|
||||
_in = None
|
||||
|
||||
if isinstance(_out, otio.schema.Gap):
|
||||
_out = None
|
||||
|
||||
if _in and _out:
|
||||
return 'dissolve'
|
||||
|
||||
elif _in and not _out:
|
||||
return 'fade_out'
|
||||
|
||||
elif not _in and _out:
|
||||
return 'fade_in'
|
||||
|
||||
else:
|
||||
return 'unknown'
|
||||
|
||||
|
||||
def find_trackitem(otio_clip, hiero_track):
|
||||
for item in hiero_track.items():
|
||||
if item.timelineIn() == otio_clip.range_in_parent().start_time.value:
|
||||
if item.name() == otio_clip.name:
|
||||
return item
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_neighboring_trackitems(otio_item, otio_track, hiero_track):
|
||||
_in, _out = otio_track.neighbors_of(otio_item)
|
||||
trackitem_in = None
|
||||
trackitem_out = None
|
||||
|
||||
if _in:
|
||||
trackitem_in = find_trackitem(_in, hiero_track)
|
||||
|
||||
if _out:
|
||||
trackitem_out = find_trackitem(_out, hiero_track)
|
||||
|
||||
return trackitem_in, trackitem_out
|
||||
|
||||
|
||||
def apply_transition(otio_track, otio_item, track):
|
||||
warning = None
|
||||
|
||||
# Figure out type of transition
|
||||
transition_type = get_transition_type(otio_item, otio_track)
|
||||
|
||||
# Figure out track kind for getattr below
|
||||
kind = ''
|
||||
if isinstance(track, hiero.core.AudioTrack):
|
||||
kind = 'Audio'
|
||||
|
||||
# Gather TrackItems involved in transition
|
||||
item_in, item_out = get_neighboring_trackitems(
|
||||
otio_item,
|
||||
otio_track,
|
||||
track
|
||||
)
|
||||
|
||||
# Create transition object
|
||||
if transition_type == 'dissolve':
|
||||
transition_func = getattr(
|
||||
hiero.core.Transition,
|
||||
"create{kind}DissolveTransition".format(kind=kind)
|
||||
)
|
||||
|
||||
try:
|
||||
transition = transition_func(
|
||||
item_in,
|
||||
item_out,
|
||||
otio_item.in_offset.value,
|
||||
otio_item.out_offset.value,
|
||||
)
|
||||
|
||||
# Catch error raised if transition is bigger than TrackItem source
|
||||
except RuntimeError as e:
|
||||
transition = None
|
||||
warning = (
|
||||
"Unable to apply transition \"{t.name}\": {e} "
|
||||
"Ignoring the transition.").format(t=otio_item, e=str(e))
|
||||
|
||||
elif transition_type == 'fade_in':
|
||||
transition_func = getattr(
|
||||
hiero.core.Transition,
|
||||
'create{kind}FadeInTransition'.format(kind=kind)
|
||||
)
|
||||
|
||||
# Warn user if part of fade is outside of clip
|
||||
if otio_item.in_offset.value:
|
||||
warning = \
|
||||
'Fist half of transition "{t.name}" is outside of clip and ' \
|
||||
'not valid in Hiero. Only applied second half.' \
|
||||
.format(t=otio_item)
|
||||
|
||||
transition = transition_func(
|
||||
item_out,
|
||||
otio_item.out_offset.value,
|
||||
)
|
||||
|
||||
elif transition_type == 'fade_out':
|
||||
transition_func = getattr(
|
||||
hiero.core.Transition,
|
||||
'create{kind}FadeOutTransition'.format(kind=kind)
|
||||
)
|
||||
transition = transition_func(
|
||||
item_in,
|
||||
otio_item.in_offset.value
|
||||
)
|
||||
|
||||
# Warn user if part of fade is outside of clip
|
||||
if otio_item.out_offset.value:
|
||||
warning = \
|
||||
'Second half of transition "{t.name}" is outside of clip ' \
|
||||
'and not valid in Hiero. Only applied first half.' \
|
||||
.format(t=otio_item)
|
||||
|
||||
else:
|
||||
# Unknown transition
|
||||
return
|
||||
|
||||
# Apply transition to track
|
||||
if transition:
|
||||
track.addTransition(transition)
|
||||
|
||||
# Inform user about missing or adjusted transitions
|
||||
return warning
|
||||
|
||||
|
||||
def prep_url(url_in):
|
||||
url = unquote(url_in)
|
||||
|
||||
if url.startswith('file://localhost/'):
|
||||
return url
|
||||
|
||||
url = 'file://localhost{sep}{url}'.format(
|
||||
sep=url.startswith(os.sep) and '' or os.sep,
|
||||
url=url.startswith(os.sep) and url[1:] or url
|
||||
)
|
||||
|
||||
return url
|
||||
|
||||
|
||||
def create_offline_mediasource(otio_clip, path=None):
|
||||
global _otio_old
|
||||
|
||||
hiero_rate = hiero.core.TimeBase(otio_clip.source_range.start_time.rate)
|
||||
|
||||
try:
|
||||
legal_media_refs = (
|
||||
otio.schema.ExternalReference,
|
||||
otio.schema.ImageSequenceReference
|
||||
)
|
||||
except AttributeError:
|
||||
_otio_old = True
|
||||
legal_media_refs = (
|
||||
otio.schema.ExternalReference
|
||||
)
|
||||
|
||||
if isinstance(otio_clip.media_reference, legal_media_refs):
|
||||
source_range = otio_clip.available_range()
|
||||
|
||||
else:
|
||||
source_range = otio_clip.source_range
|
||||
|
||||
if path is None:
|
||||
path = otio_clip.name
|
||||
|
||||
media = hiero.core.MediaSource.createOfflineVideoMediaSource(
|
||||
prep_url(path),
|
||||
source_range.start_time.value,
|
||||
source_range.duration.value,
|
||||
hiero_rate,
|
||||
source_range.start_time.value,
|
||||
)
|
||||
|
||||
return media
|
||||
|
||||
|
||||
def load_otio(otio_file, project=None, sequence=None):
|
||||
otio_timeline = otio.adapters.read_from_file(otio_file)
|
||||
build_sequence(otio_timeline, project=project, sequence=sequence)
|
||||
|
||||
|
||||
marker_color_map = {
|
||||
"PINK": "Magenta",
|
||||
"RED": "Red",
|
||||
"ORANGE": "Yellow",
|
||||
"YELLOW": "Yellow",
|
||||
"GREEN": "Green",
|
||||
"CYAN": "Cyan",
|
||||
"BLUE": "Blue",
|
||||
"PURPLE": "Magenta",
|
||||
"MAGENTA": "Magenta",
|
||||
"BLACK": "Blue",
|
||||
"WHITE": "Green"
|
||||
}
|
||||
|
||||
|
||||
def get_tag(tagname, tagsbin):
|
||||
for tag in tagsbin.items():
|
||||
if tag.name() == tagname:
|
||||
return tag
|
||||
|
||||
if isinstance(tag, hiero.core.Bin):
|
||||
tag = get_tag(tagname, tag)
|
||||
|
||||
if tag is not None:
|
||||
return tag
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def add_metadata(metadata, hiero_item):
|
||||
for key, value in metadata.get('Hiero', dict()).items():
|
||||
if key == 'source_type':
|
||||
# Only used internally to reassign tag to correct Hiero item
|
||||
continue
|
||||
|
||||
if isinstance(value, dict):
|
||||
add_metadata(value, hiero_item)
|
||||
continue
|
||||
|
||||
if value is not None:
|
||||
if not key.startswith('tag.'):
|
||||
key = 'tag.' + key
|
||||
|
||||
hiero_item.metadata().setValue(key, str(value))
|
||||
|
||||
|
||||
def add_markers(otio_item, hiero_item, tagsbin):
|
||||
if isinstance(otio_item, (otio.schema.Stack, otio.schema.Clip)):
|
||||
markers = otio_item.markers
|
||||
|
||||
elif isinstance(otio_item, otio.schema.Timeline):
|
||||
markers = otio_item.tracks.markers
|
||||
|
||||
else:
|
||||
markers = []
|
||||
|
||||
for marker in markers:
|
||||
meta = marker.metadata.get('Hiero', dict())
|
||||
if 'source_type' in meta:
|
||||
if hiero_item.__class__.__name__ != meta.get('source_type'):
|
||||
continue
|
||||
|
||||
marker_color = marker.color
|
||||
|
||||
_tag = get_tag(marker.name, tagsbin)
|
||||
if _tag is None:
|
||||
_tag = get_tag(marker_color_map[marker_color], tagsbin)
|
||||
|
||||
if _tag is None:
|
||||
_tag = hiero.core.Tag(marker_color_map[marker.color])
|
||||
|
||||
start = marker.marked_range.start_time.value
|
||||
end = (
|
||||
marker.marked_range.start_time.value +
|
||||
marker.marked_range.duration.value
|
||||
)
|
||||
|
||||
if hasattr(hiero_item, 'addTagToRange'):
|
||||
tag = hiero_item.addTagToRange(_tag, start, end)
|
||||
|
||||
else:
|
||||
tag = hiero_item.addTag(_tag)
|
||||
|
||||
tag.setName(marker.name or marker_color_map[marker_color])
|
||||
# tag.setNote(meta.get('tag.note', ''))
|
||||
|
||||
# Add metadata
|
||||
add_metadata(marker.metadata, tag)
|
||||
|
||||
|
||||
def create_track(otio_track, tracknum, track_kind):
|
||||
if track_kind is None and hasattr(otio_track, 'kind'):
|
||||
track_kind = otio_track.kind
|
||||
|
||||
# Create a Track
|
||||
if track_kind == otio.schema.TrackKind.Video:
|
||||
track = hiero.core.VideoTrack(
|
||||
otio_track.name or 'Video{n}'.format(n=tracknum)
|
||||
)
|
||||
|
||||
else:
|
||||
track = hiero.core.AudioTrack(
|
||||
otio_track.name or 'Audio{n}'.format(n=tracknum)
|
||||
)
|
||||
|
||||
return track
|
||||
|
||||
|
||||
def create_clip(otio_clip, tagsbin, sequencebin):
|
||||
# Create MediaSource
|
||||
url = None
|
||||
media = None
|
||||
otio_media = otio_clip.media_reference
|
||||
|
||||
if isinstance(otio_media, otio.schema.ExternalReference):
|
||||
url = prep_url(otio_media.target_url)
|
||||
media = hiero.core.MediaSource(url)
|
||||
|
||||
elif not _otio_old:
|
||||
if isinstance(otio_media, otio.schema.ImageSequenceReference):
|
||||
url = prep_url(otio_media.abstract_target_url('#'))
|
||||
media = hiero.core.MediaSource(url)
|
||||
|
||||
if media is None or media.isOffline():
|
||||
media = create_offline_mediasource(otio_clip, url)
|
||||
|
||||
# Reuse previous clip if possible
|
||||
clip = None
|
||||
for item in sequencebin.clips():
|
||||
if item.activeItem().mediaSource() == media:
|
||||
clip = item.activeItem()
|
||||
break
|
||||
|
||||
if not clip:
|
||||
# Create new Clip
|
||||
clip = hiero.core.Clip(media)
|
||||
|
||||
# Add Clip to a Bin
|
||||
sequencebin.addItem(hiero.core.BinItem(clip))
|
||||
|
||||
# Add markers
|
||||
add_markers(otio_clip, clip, tagsbin)
|
||||
|
||||
return clip
|
||||
|
||||
|
||||
def create_trackitem(playhead, track, otio_clip, clip):
|
||||
source_range = otio_clip.source_range
|
||||
|
||||
trackitem = track.createTrackItem(otio_clip.name)
|
||||
trackitem.setPlaybackSpeed(source_range.start_time.rate)
|
||||
trackitem.setSource(clip)
|
||||
|
||||
time_scalar = 1.
|
||||
|
||||
# Check for speed effects and adjust playback speed accordingly
|
||||
for effect in otio_clip.effects:
|
||||
if isinstance(effect, otio.schema.LinearTimeWarp):
|
||||
time_scalar = effect.time_scalar
|
||||
# Only reverse effect can be applied here
|
||||
if abs(time_scalar) == 1.:
|
||||
trackitem.setPlaybackSpeed(
|
||||
trackitem.playbackSpeed() * time_scalar
|
||||
)
|
||||
|
||||
elif isinstance(effect, otio.schema.FreezeFrame):
|
||||
# For freeze frame, playback speed must be set after range
|
||||
time_scalar = 0.
|
||||
|
||||
# If reverse playback speed swap source in and out
|
||||
if trackitem.playbackSpeed() < 0:
|
||||
source_out = source_range.start_time.value
|
||||
source_in = source_range.end_time_inclusive().value
|
||||
|
||||
timeline_in = playhead + source_out
|
||||
timeline_out = (timeline_in + source_range.duration.value) - 1
|
||||
else:
|
||||
# Normal playback speed
|
||||
source_in = source_range.start_time.value
|
||||
source_out = source_range.end_time_inclusive().value
|
||||
|
||||
timeline_in = playhead
|
||||
timeline_out = (timeline_in + source_range.duration.value) - 1
|
||||
|
||||
# Set source and timeline in/out points
|
||||
trackitem.setTimes(
|
||||
timeline_in,
|
||||
timeline_out,
|
||||
source_in,
|
||||
source_out,
|
||||
)
|
||||
|
||||
# Apply playback speed for freeze frames
|
||||
if abs(time_scalar) != 1.:
|
||||
trackitem.setPlaybackSpeed(trackitem.playbackSpeed() * time_scalar)
|
||||
|
||||
# Link audio to video when possible
|
||||
if isinstance(track, hiero.core.AudioTrack):
|
||||
for other in track.parent().trackItemsAt(playhead):
|
||||
if other.source() == clip:
|
||||
trackitem.link(other)
|
||||
|
||||
return trackitem
|
||||
|
||||
|
||||
def build_sequence(
|
||||
otio_timeline, project=None, sequence=None, track_kind=None
|
||||
):
|
||||
if project is None:
|
||||
if sequence:
|
||||
project = sequence.project()
|
||||
|
||||
else:
|
||||
# Per version 12.1v2 there is no way of getting active project
|
||||
project = hiero.core.projects(hiero.core.Project.kUserProjects)[-1]
|
||||
|
||||
projectbin = project.clipsBin()
|
||||
|
||||
if not sequence:
|
||||
# Create a Sequence
|
||||
sequence = hiero.core.Sequence(otio_timeline.name or 'OTIOSequence')
|
||||
|
||||
# Set sequence settings from otio timeline if available
|
||||
if (
|
||||
hasattr(otio_timeline, 'global_start_time')
|
||||
and otio_timeline.global_start_time
|
||||
):
|
||||
start_time = otio_timeline.global_start_time
|
||||
sequence.setFramerate(start_time.rate)
|
||||
sequence.setTimecodeStart(start_time.value)
|
||||
|
||||
# Create a Bin to hold clips
|
||||
projectbin.addItem(hiero.core.BinItem(sequence))
|
||||
|
||||
sequencebin = hiero.core.Bin(sequence.name())
|
||||
projectbin.addItem(sequencebin)
|
||||
|
||||
else:
|
||||
sequencebin = projectbin
|
||||
|
||||
# Get tagsBin
|
||||
tagsbin = hiero.core.project("Tag Presets").tagsBin()
|
||||
|
||||
# Add timeline markers
|
||||
add_markers(otio_timeline, sequence, tagsbin)
|
||||
|
||||
if isinstance(otio_timeline, otio.schema.Timeline):
|
||||
tracks = otio_timeline.tracks
|
||||
|
||||
else:
|
||||
tracks = [otio_timeline]
|
||||
|
||||
for tracknum, otio_track in enumerate(tracks):
|
||||
playhead = 0
|
||||
_transitions = []
|
||||
|
||||
# Add track to sequence
|
||||
track = create_track(otio_track, tracknum, track_kind)
|
||||
sequence.addTrack(track)
|
||||
|
||||
# iterate over items in track
|
||||
for _itemnum, otio_clip in enumerate(otio_track):
|
||||
if isinstance(otio_clip, (otio.schema.Track, otio.schema.Stack)):
|
||||
inform('Nested sequences/tracks are created separately.')
|
||||
|
||||
# Add gap where the nested sequence would have been
|
||||
playhead += otio_clip.source_range.duration.value
|
||||
|
||||
# Process nested sequence
|
||||
build_sequence(
|
||||
otio_clip,
|
||||
project=project,
|
||||
track_kind=otio_track.kind
|
||||
)
|
||||
|
||||
elif isinstance(otio_clip, otio.schema.Clip):
|
||||
# Create a Clip
|
||||
clip = create_clip(otio_clip, tagsbin, sequencebin)
|
||||
|
||||
# Create TrackItem
|
||||
trackitem = create_trackitem(
|
||||
playhead, track, otio_clip, clip
|
||||
)
|
||||
|
||||
# Add markers
|
||||
add_markers(otio_clip, trackitem, tagsbin)
|
||||
|
||||
# Add trackitem to track
|
||||
track.addTrackItem(trackitem)
|
||||
|
||||
# Update playhead
|
||||
playhead = trackitem.timelineOut() + 1
|
||||
|
||||
elif isinstance(otio_clip, otio.schema.Transition):
|
||||
# Store transitions for when all clips in the track are created
|
||||
_transitions.append((otio_track, otio_clip))
|
||||
|
||||
elif isinstance(otio_clip, otio.schema.Gap):
|
||||
# Hiero has no fillers, slugs or blanks at the moment
|
||||
playhead += otio_clip.source_range.duration.value
|
||||
|
||||
# Apply transitions we stored earlier now that all clips are present
|
||||
warnings = []
|
||||
for otio_track, otio_item in _transitions:
|
||||
# Catch warnings form transitions in case
|
||||
# of unsupported transitions
|
||||
warning = apply_transition(otio_track, otio_item, track)
|
||||
if warning:
|
||||
warnings.append(warning)
|
||||
|
||||
if warnings:
|
||||
inform(warnings)
|
||||
80
server_addon/hiero/client/ayon_hiero/api/otio/utils.py
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
import re
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
def timecode_to_frames(timecode, framerate):
|
||||
rt = otio.opentime.from_timecode(timecode, 24)
|
||||
return int(otio.opentime.to_frames(rt))
|
||||
|
||||
|
||||
def frames_to_timecode(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_timecode(rt)
|
||||
|
||||
|
||||
def frames_to_secons(frames, framerate):
|
||||
rt = otio.opentime.from_frames(frames, framerate)
|
||||
return otio.opentime.to_seconds(rt)
|
||||
|
||||
|
||||
def get_reformated_path(path, padded=True):
|
||||
"""
|
||||
Return fixed python expression path
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
type: string with reformatted path
|
||||
|
||||
Example:
|
||||
get_reformated_path("plate.[0001-1008].exr") > plate.%04d.exr
|
||||
|
||||
"""
|
||||
if "%" in path:
|
||||
padding_pattern = r"(\d+)"
|
||||
padding = int(re.findall(padding_pattern, path).pop())
|
||||
num_pattern = r"(%\d+d)"
|
||||
if padded:
|
||||
path = re.sub(num_pattern, "%0{}d".format(padding), path)
|
||||
else:
|
||||
path = re.sub(num_pattern, "%d", path)
|
||||
return path
|
||||
|
||||
|
||||
def get_padding_from_path(path):
|
||||
"""
|
||||
Return padding number from DaVinci Resolve sequence path style
|
||||
|
||||
Args:
|
||||
path (str): path url or simple file name
|
||||
|
||||
Returns:
|
||||
int: padding number
|
||||
|
||||
Example:
|
||||
get_padding_from_path("plate.[0001-1008].exr") > 4
|
||||
|
||||
"""
|
||||
padding_pattern = "(\\d+)(?=-)"
|
||||
if "[" in path:
|
||||
return len(re.findall(padding_pattern, path).pop())
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_rate(item):
|
||||
if not hasattr(item, 'framerate'):
|
||||
return None
|
||||
|
||||
num, den = item.framerate().toRational()
|
||||
|
||||
try:
|
||||
rate = float(num) / float(den)
|
||||
except ZeroDivisionError:
|
||||
return None
|
||||
|
||||
if rate.is_integer():
|
||||
return rate
|
||||
|
||||
return round(rate, 4)
|
||||
339
server_addon/hiero/client/ayon_hiero/api/pipeline.py
Normal file
|
|
@ -0,0 +1,339 @@
|
|||
"""
|
||||
Basic avalon integration
|
||||
"""
|
||||
from copy import deepcopy
|
||||
import os
|
||||
import contextlib
|
||||
from collections import OrderedDict
|
||||
|
||||
from pyblish import api as pyblish
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline import (
|
||||
schema,
|
||||
register_creator_plugin_path,
|
||||
register_loader_plugin_path,
|
||||
deregister_creator_plugin_path,
|
||||
deregister_loader_plugin_path,
|
||||
AVALON_CONTAINER_ID,
|
||||
AYON_CONTAINER_ID,
|
||||
)
|
||||
from ayon_core.tools.utils import host_tools
|
||||
from . import lib, menu, events
|
||||
import hiero
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
# plugin paths
|
||||
API_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
HOST_DIR = os.path.dirname(API_DIR)
|
||||
PLUGINS_DIR = os.path.join(HOST_DIR, "plugins")
|
||||
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish").replace("\\", "/")
|
||||
LOAD_PATH = os.path.join(PLUGINS_DIR, "load").replace("\\", "/")
|
||||
CREATE_PATH = os.path.join(PLUGINS_DIR, "create").replace("\\", "/")
|
||||
|
||||
AVALON_CONTAINERS = ":AVALON_CONTAINERS"
|
||||
|
||||
|
||||
def install():
|
||||
"""Installing Hiero integration."""
|
||||
|
||||
# adding all events
|
||||
events.register_events()
|
||||
|
||||
log.info("Registering Hiero plug-ins..")
|
||||
pyblish.register_host("hiero")
|
||||
pyblish.register_plugin_path(PUBLISH_PATH)
|
||||
register_loader_plugin_path(LOAD_PATH)
|
||||
register_creator_plugin_path(CREATE_PATH)
|
||||
|
||||
# register callback for switching publishable
|
||||
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
# install menu
|
||||
menu.menu_install()
|
||||
menu.add_scripts_menu()
|
||||
|
||||
# register hiero events
|
||||
events.register_hiero_events()
|
||||
|
||||
|
||||
def uninstall():
|
||||
"""
|
||||
Uninstalling Hiero integration for avalon
|
||||
|
||||
"""
|
||||
log.info("Deregistering Hiero plug-ins..")
|
||||
pyblish.deregister_host("hiero")
|
||||
pyblish.deregister_plugin_path(PUBLISH_PATH)
|
||||
deregister_loader_plugin_path(LOAD_PATH)
|
||||
deregister_creator_plugin_path(CREATE_PATH)
|
||||
|
||||
# register callback for switching publishable
|
||||
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
|
||||
|
||||
|
||||
def containerise(track_item,
|
||||
name,
|
||||
namespace,
|
||||
context,
|
||||
loader=None,
|
||||
data=None):
|
||||
"""Bundle Hiero's object into an assembly and imprint it with metadata
|
||||
|
||||
Containerisation enables a tracking of version, author and origin
|
||||
for loaded assets.
|
||||
|
||||
Arguments:
|
||||
track_item (hiero.core.TrackItem): object to imprint as container
|
||||
name (str): Name of resulting assembly
|
||||
namespace (str): Namespace under which to host container
|
||||
context (dict): Asset information
|
||||
loader (str, optional): Name of node used to produce this container.
|
||||
|
||||
Returns:
|
||||
track_item (hiero.core.TrackItem): containerised object
|
||||
|
||||
"""
|
||||
|
||||
data_imprint = OrderedDict({
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"name": str(name),
|
||||
"namespace": str(namespace),
|
||||
"loader": str(loader),
|
||||
"representation": context["representation"]["id"],
|
||||
})
|
||||
|
||||
if data:
|
||||
for k, v in data.items():
|
||||
data_imprint.update({k: v})
|
||||
|
||||
log.debug("_ data_imprint: {}".format(data_imprint))
|
||||
lib.set_trackitem_openpype_tag(track_item, data_imprint)
|
||||
|
||||
return track_item
|
||||
|
||||
|
||||
def ls():
|
||||
"""List available containers.
|
||||
|
||||
This function is used by the Container Manager in Nuke. You'll
|
||||
need to implement a for-loop that then *yields* one Container at
|
||||
a time.
|
||||
|
||||
See the `container.json` schema for details on how it should look,
|
||||
and the Maya equivalent, which is in `avalon.maya.pipeline`
|
||||
"""
|
||||
|
||||
# get all track items from current timeline
|
||||
all_items = lib.get_track_items()
|
||||
|
||||
# append all video tracks
|
||||
for track in lib.get_current_sequence():
|
||||
if type(track) != hiero.core.VideoTrack:
|
||||
continue
|
||||
all_items.append(track)
|
||||
|
||||
for item in all_items:
|
||||
container_data = parse_container(item)
|
||||
|
||||
if isinstance(container_data, list):
|
||||
for _c in container_data:
|
||||
yield _c
|
||||
elif container_data:
|
||||
yield container_data
|
||||
|
||||
|
||||
def parse_container(item, validate=True):
|
||||
"""Return container data from track_item's pype tag.
|
||||
|
||||
Args:
|
||||
item (hiero.core.TrackItem or hiero.core.VideoTrack):
|
||||
A containerised track item.
|
||||
validate (bool)[optional]: validating with avalon scheme
|
||||
|
||||
Returns:
|
||||
dict: The container schema data for input containerized track item.
|
||||
|
||||
"""
|
||||
def data_to_container(item, data):
|
||||
if (
|
||||
not data
|
||||
or data.get("id") not in {
|
||||
AYON_CONTAINER_ID, AVALON_CONTAINER_ID
|
||||
}
|
||||
):
|
||||
return
|
||||
|
||||
if validate and data and data.get("schema"):
|
||||
schema.validate(data)
|
||||
|
||||
if not isinstance(data, dict):
|
||||
return
|
||||
|
||||
# If not all required data return the empty container
|
||||
required = ['schema', 'id', 'name',
|
||||
'namespace', 'loader', 'representation']
|
||||
|
||||
if any(key not in data for key in required):
|
||||
return
|
||||
|
||||
container = {key: data[key] for key in required}
|
||||
|
||||
container["objectName"] = item.name()
|
||||
|
||||
# Store reference to the node object
|
||||
container["_item"] = item
|
||||
|
||||
return container
|
||||
|
||||
# convert tag metadata to normal keys names
|
||||
if type(item) == hiero.core.VideoTrack:
|
||||
return_list = []
|
||||
_data = lib.get_track_openpype_data(item)
|
||||
|
||||
if not _data:
|
||||
return
|
||||
# convert the data to list and validate them
|
||||
for _, obj_data in _data.items():
|
||||
container = data_to_container(item, obj_data)
|
||||
return_list.append(container)
|
||||
return return_list
|
||||
else:
|
||||
_data = lib.get_trackitem_openpype_data(item)
|
||||
return data_to_container(item, _data)
|
||||
|
||||
|
||||
def _update_container_data(container, data):
|
||||
for key in container:
|
||||
try:
|
||||
container[key] = data[key]
|
||||
except KeyError:
|
||||
pass
|
||||
return container
|
||||
|
||||
|
||||
def update_container(item, data=None):
|
||||
"""Update container data to input track_item or track's
|
||||
openpype tag.
|
||||
|
||||
Args:
|
||||
item (hiero.core.TrackItem or hiero.core.VideoTrack):
|
||||
A containerised track item.
|
||||
data (dict)[optional]: dictionery with data to be updated
|
||||
|
||||
Returns:
|
||||
bool: True if container was updated correctly
|
||||
|
||||
"""
|
||||
|
||||
data = data or {}
|
||||
data = deepcopy(data)
|
||||
|
||||
if type(item) == hiero.core.VideoTrack:
|
||||
# form object data for test
|
||||
object_name = data["objectName"]
|
||||
|
||||
# get all available containers
|
||||
containers = lib.get_track_openpype_data(item)
|
||||
container = lib.get_track_openpype_data(item, object_name)
|
||||
|
||||
containers = deepcopy(containers)
|
||||
container = deepcopy(container)
|
||||
|
||||
# update data in container
|
||||
updated_container = _update_container_data(container, data)
|
||||
# merge updated container back to containers
|
||||
containers.update({object_name: updated_container})
|
||||
|
||||
return bool(lib.set_track_openpype_tag(item, containers))
|
||||
else:
|
||||
container = lib.get_trackitem_openpype_data(item)
|
||||
updated_container = _update_container_data(container, data)
|
||||
|
||||
log.info("Updating container: `{}`".format(item.name()))
|
||||
return bool(lib.set_trackitem_openpype_tag(item, updated_container))
|
||||
|
||||
|
||||
def launch_workfiles_app(*args):
|
||||
''' Wrapping function for workfiles launcher '''
|
||||
from .lib import get_main_window
|
||||
|
||||
main_window = get_main_window()
|
||||
# show workfile gui
|
||||
host_tools.show_workfiles(parent=main_window)
|
||||
|
||||
|
||||
def publish(parent):
|
||||
"""Shorthand to publish from within host"""
|
||||
return host_tools.show_publish(parent)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def maintained_selection():
|
||||
"""Maintain selection during context
|
||||
|
||||
Example:
|
||||
>>> with maintained_selection():
|
||||
... for track_item in track_items:
|
||||
... < do some stuff >
|
||||
"""
|
||||
from .lib import (
|
||||
set_selected_track_items,
|
||||
get_selected_track_items
|
||||
)
|
||||
previous_selection = get_selected_track_items()
|
||||
reset_selection()
|
||||
try:
|
||||
# do the operation
|
||||
yield
|
||||
finally:
|
||||
reset_selection()
|
||||
set_selected_track_items(previous_selection)
|
||||
|
||||
|
||||
def reset_selection():
|
||||
"""Deselect all selected nodes
|
||||
"""
|
||||
from .lib import set_selected_track_items
|
||||
set_selected_track_items([])
|
||||
|
||||
|
||||
def reload_config():
|
||||
"""Attempt to reload pipeline at run-time.
|
||||
|
||||
CAUTION: This is primarily for development and debugging purposes.
|
||||
|
||||
"""
|
||||
import importlib
|
||||
|
||||
for module in (
|
||||
"ayon_core.hosts.hiero.lib",
|
||||
"ayon_core.hosts.hiero.menu",
|
||||
"ayon_core.hosts.hiero.tags"
|
||||
):
|
||||
log.info("Reloading module: {}...".format(module))
|
||||
try:
|
||||
module = importlib.import_module(module)
|
||||
import imp
|
||||
imp.reload(module)
|
||||
except Exception as e:
|
||||
log.warning("Cannot reload module: {}".format(e))
|
||||
importlib.reload(module)
|
||||
|
||||
|
||||
def on_pyblish_instance_toggled(instance, old_value, new_value):
|
||||
"""Toggle node passthrough states on instance toggles."""
|
||||
|
||||
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
|
||||
instance, old_value, new_value))
|
||||
|
||||
from ayon_core.hosts.hiero.api import (
|
||||
get_trackitem_openpype_tag,
|
||||
set_publish_attribute
|
||||
)
|
||||
|
||||
# Whether instances should be passthrough based on new value
|
||||
track_item = instance.data["item"]
|
||||
tag = get_trackitem_openpype_tag(track_item)
|
||||
set_publish_attribute(tag, new_value)
|
||||
945
server_addon/hiero/client/ayon_hiero/api/plugin.py
Normal file
|
|
@ -0,0 +1,945 @@
|
|||
import os
|
||||
from pprint import pformat
|
||||
import re
|
||||
from copy import deepcopy
|
||||
|
||||
import hiero
|
||||
|
||||
from qtpy import QtWidgets, QtCore
|
||||
import qargparse
|
||||
|
||||
from ayon_core.settings import get_current_project_settings
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline import LoaderPlugin, LegacyCreator
|
||||
from ayon_core.pipeline.load import get_representation_path_from_context
|
||||
from . import lib
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def load_stylesheet():
|
||||
path = os.path.join(os.path.dirname(__file__), "style.css")
|
||||
if not os.path.exists(path):
|
||||
log.warning("Unable to load stylesheet, file not found in resources")
|
||||
return ""
|
||||
|
||||
with open(path, "r") as file_stream:
|
||||
stylesheet = file_stream.read()
|
||||
return stylesheet
|
||||
|
||||
|
||||
class CreatorWidget(QtWidgets.QDialog):
|
||||
|
||||
# output items
|
||||
items = {}
|
||||
|
||||
def __init__(self, name, info, ui_inputs, parent=None):
|
||||
super(CreatorWidget, self).__init__(parent)
|
||||
|
||||
self.setObjectName(name)
|
||||
|
||||
self.setWindowFlags(
|
||||
QtCore.Qt.Window
|
||||
| QtCore.Qt.CustomizeWindowHint
|
||||
| QtCore.Qt.WindowTitleHint
|
||||
| QtCore.Qt.WindowCloseButtonHint
|
||||
| QtCore.Qt.WindowStaysOnTopHint
|
||||
)
|
||||
self.setWindowTitle(name or "AYON Creator Input")
|
||||
self.resize(500, 700)
|
||||
|
||||
# Where inputs and labels are set
|
||||
self.content_widget = [QtWidgets.QWidget(self)]
|
||||
top_layout = QtWidgets.QFormLayout(self.content_widget[0])
|
||||
top_layout.setObjectName("ContentLayout")
|
||||
top_layout.addWidget(Spacer(5, self))
|
||||
|
||||
# first add widget tag line
|
||||
top_layout.addWidget(QtWidgets.QLabel(info))
|
||||
|
||||
# main dynamic layout
|
||||
self.scroll_area = QtWidgets.QScrollArea(self, widgetResizable=True)
|
||||
self.scroll_area.setVerticalScrollBarPolicy(
|
||||
QtCore.Qt.ScrollBarAsNeeded)
|
||||
self.scroll_area.setVerticalScrollBarPolicy(
|
||||
QtCore.Qt.ScrollBarAlwaysOn)
|
||||
self.scroll_area.setHorizontalScrollBarPolicy(
|
||||
QtCore.Qt.ScrollBarAlwaysOff)
|
||||
self.scroll_area.setWidgetResizable(True)
|
||||
|
||||
self.content_widget.append(self.scroll_area)
|
||||
|
||||
scroll_widget = QtWidgets.QWidget(self)
|
||||
in_scroll_area = QtWidgets.QVBoxLayout(scroll_widget)
|
||||
self.content_layout = [in_scroll_area]
|
||||
|
||||
# add preset data into input widget layout
|
||||
self.items = self.populate_widgets(ui_inputs)
|
||||
self.scroll_area.setWidget(scroll_widget)
|
||||
|
||||
# Confirmation buttons
|
||||
btns_widget = QtWidgets.QWidget(self)
|
||||
btns_layout = QtWidgets.QHBoxLayout(btns_widget)
|
||||
|
||||
cancel_btn = QtWidgets.QPushButton("Cancel")
|
||||
btns_layout.addWidget(cancel_btn)
|
||||
|
||||
ok_btn = QtWidgets.QPushButton("Ok")
|
||||
btns_layout.addWidget(ok_btn)
|
||||
|
||||
# Main layout of the dialog
|
||||
main_layout = QtWidgets.QVBoxLayout(self)
|
||||
main_layout.setContentsMargins(10, 10, 10, 10)
|
||||
main_layout.setSpacing(0)
|
||||
|
||||
# adding content widget
|
||||
for w in self.content_widget:
|
||||
main_layout.addWidget(w)
|
||||
|
||||
main_layout.addWidget(btns_widget)
|
||||
|
||||
ok_btn.clicked.connect(self._on_ok_clicked)
|
||||
cancel_btn.clicked.connect(self._on_cancel_clicked)
|
||||
|
||||
stylesheet = load_stylesheet()
|
||||
self.setStyleSheet(stylesheet)
|
||||
|
||||
def _on_ok_clicked(self):
|
||||
self.result = self.value(self.items)
|
||||
self.close()
|
||||
|
||||
def _on_cancel_clicked(self):
|
||||
self.result = None
|
||||
self.close()
|
||||
|
||||
def value(self, data, new_data=None):
|
||||
new_data = new_data or dict()
|
||||
for k, v in data.items():
|
||||
new_data[k] = {
|
||||
"target": None,
|
||||
"value": None
|
||||
}
|
||||
if v["type"] == "dict":
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = self.value(v["value"])
|
||||
if v["type"] == "section":
|
||||
new_data.pop(k)
|
||||
new_data = self.value(v["value"], new_data)
|
||||
elif getattr(v["value"], "currentText", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].currentText()
|
||||
elif getattr(v["value"], "isChecked", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].isChecked()
|
||||
elif getattr(v["value"], "value", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].value()
|
||||
elif getattr(v["value"], "text", None):
|
||||
new_data[k]["target"] = v["target"]
|
||||
new_data[k]["value"] = v["value"].text()
|
||||
|
||||
return new_data
|
||||
|
||||
def camel_case_split(self, text):
|
||||
matches = re.finditer(
|
||||
'.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', text)
|
||||
return " ".join([str(m.group(0)).capitalize() for m in matches])
|
||||
|
||||
def create_row(self, layout, type, text, **kwargs):
|
||||
value_keys = ["setText", "setCheckState", "setValue", "setChecked"]
|
||||
|
||||
# get type attribute from qwidgets
|
||||
attr = getattr(QtWidgets, type)
|
||||
|
||||
# convert label text to normal capitalized text with spaces
|
||||
label_text = self.camel_case_split(text)
|
||||
|
||||
# assign the new text to label widget
|
||||
label = QtWidgets.QLabel(label_text)
|
||||
label.setObjectName("LineLabel")
|
||||
|
||||
# create attribute name text strip of spaces
|
||||
attr_name = text.replace(" ", "")
|
||||
|
||||
# create attribute and assign default values
|
||||
setattr(
|
||||
self,
|
||||
attr_name,
|
||||
attr(parent=self))
|
||||
|
||||
# assign the created attribute to variable
|
||||
item = getattr(self, attr_name)
|
||||
|
||||
# set attributes to item which are not values
|
||||
for func, val in kwargs.items():
|
||||
if func in value_keys:
|
||||
continue
|
||||
|
||||
if getattr(item, func):
|
||||
log.debug("Setting {} to {}".format(func, val))
|
||||
func_attr = getattr(item, func)
|
||||
if isinstance(val, tuple):
|
||||
func_attr(*val)
|
||||
else:
|
||||
func_attr(val)
|
||||
|
||||
# set values to item
|
||||
for value_item in value_keys:
|
||||
if value_item not in kwargs:
|
||||
continue
|
||||
if getattr(item, value_item):
|
||||
getattr(item, value_item)(kwargs[value_item])
|
||||
|
||||
# add to layout
|
||||
layout.addRow(label, item)
|
||||
|
||||
return item
|
||||
|
||||
def populate_widgets(self, data, content_layout=None):
|
||||
"""
|
||||
Populate widget from input dict.
|
||||
|
||||
Each plugin has its own set of widget rows defined in dictionary
|
||||
each row values should have following keys: `type`, `target`,
|
||||
`label`, `order`, `value` and optionally also `toolTip`.
|
||||
|
||||
Args:
|
||||
data (dict): widget rows or organized groups defined
|
||||
by types `dict` or `section`
|
||||
content_layout (QtWidgets.QFormLayout)[optional]: used when nesting
|
||||
|
||||
Returns:
|
||||
dict: redefined data dict updated with created widgets
|
||||
|
||||
"""
|
||||
|
||||
content_layout = content_layout or self.content_layout[-1]
|
||||
# fix order of process by defined order value
|
||||
ordered_keys = list(data.keys())
|
||||
for k, v in data.items():
|
||||
try:
|
||||
# try removing a key from index which should
|
||||
# be filled with new
|
||||
ordered_keys.pop(v["order"])
|
||||
except IndexError:
|
||||
pass
|
||||
# add key into correct order
|
||||
ordered_keys.insert(v["order"], k)
|
||||
|
||||
# process ordered
|
||||
for k in ordered_keys:
|
||||
v = data[k]
|
||||
tool_tip = v.get("toolTip", "")
|
||||
if v["type"] == "dict":
|
||||
# adding spacer between sections
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
self.content_layout[-1].setObjectName("sectionHeadline")
|
||||
|
||||
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
|
||||
headline.addWidget(Spacer(20, self))
|
||||
headline.addWidget(QtWidgets.QLabel(v["label"]))
|
||||
|
||||
# adding nested layout with label
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
self.content_layout[-1].setObjectName("sectionContent")
|
||||
|
||||
nested_content_layout = QtWidgets.QFormLayout(
|
||||
self.content_layout[-1])
|
||||
nested_content_layout.setObjectName("NestedContentLayout")
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
|
||||
# add nested key as label
|
||||
data[k]["value"] = self.populate_widgets(
|
||||
v["value"], nested_content_layout)
|
||||
|
||||
if v["type"] == "section":
|
||||
# adding spacer between sections
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
self.content_layout[-1].setObjectName("sectionHeadline")
|
||||
|
||||
headline = QtWidgets.QVBoxLayout(self.content_layout[-1])
|
||||
headline.addWidget(Spacer(20, self))
|
||||
headline.addWidget(QtWidgets.QLabel(v["label"]))
|
||||
|
||||
# adding nested layout with label
|
||||
self.content_layout.append(QtWidgets.QWidget(self))
|
||||
self.content_layout[-1].setObjectName("sectionContent")
|
||||
|
||||
nested_content_layout = QtWidgets.QFormLayout(
|
||||
self.content_layout[-1])
|
||||
nested_content_layout.setObjectName("NestedContentLayout")
|
||||
content_layout.addWidget(self.content_layout[-1])
|
||||
|
||||
# add nested key as label
|
||||
data[k]["value"] = self.populate_widgets(
|
||||
v["value"], nested_content_layout)
|
||||
|
||||
elif v["type"] == "QLineEdit":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QLineEdit", v["label"],
|
||||
setText=v["value"], setToolTip=tool_tip)
|
||||
elif v["type"] == "QComboBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QComboBox", v["label"],
|
||||
addItems=v["value"], setToolTip=tool_tip)
|
||||
elif v["type"] == "QCheckBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QCheckBox", v["label"],
|
||||
setChecked=v["value"], setToolTip=tool_tip)
|
||||
elif v["type"] == "QSpinBox":
|
||||
data[k]["value"] = self.create_row(
|
||||
content_layout, "QSpinBox", v["label"],
|
||||
setValue=v["value"],
|
||||
setDisplayIntegerBase=10000,
|
||||
setRange=(0, 99999), setMinimum=0,
|
||||
setMaximum=100000, setToolTip=tool_tip)
|
||||
|
||||
return data
|
||||
|
||||
|
||||
class Spacer(QtWidgets.QWidget):
|
||||
def __init__(self, height, *args, **kwargs):
|
||||
super(self.__class__, self).__init__(*args, **kwargs)
|
||||
|
||||
self.setFixedHeight(height)
|
||||
|
||||
real_spacer = QtWidgets.QWidget(self)
|
||||
real_spacer.setObjectName("Spacer")
|
||||
real_spacer.setFixedHeight(height)
|
||||
|
||||
layout = QtWidgets.QVBoxLayout(self)
|
||||
layout.setContentsMargins(0, 0, 0, 0)
|
||||
layout.addWidget(real_spacer)
|
||||
|
||||
self.setLayout(layout)
|
||||
|
||||
|
||||
class SequenceLoader(LoaderPlugin):
|
||||
"""A basic SequenceLoader for Resolve
|
||||
|
||||
This will implement the basic behavior for a loader to inherit from that
|
||||
will containerize the reference and will implement the `remove` and
|
||||
`update` logic.
|
||||
|
||||
"""
|
||||
|
||||
options = [
|
||||
qargparse.Boolean(
|
||||
"handles",
|
||||
label="Include handles",
|
||||
default=0,
|
||||
help="Load with handles or without?"
|
||||
),
|
||||
qargparse.Choice(
|
||||
"load_to",
|
||||
label="Where to load clips",
|
||||
items=[
|
||||
"Current timeline",
|
||||
"New timeline"
|
||||
],
|
||||
default="Current timeline",
|
||||
help="Where do you want clips to be loaded?"
|
||||
),
|
||||
qargparse.Choice(
|
||||
"load_how",
|
||||
label="How to load clips",
|
||||
items=[
|
||||
"Original timing",
|
||||
"Sequentially in order"
|
||||
],
|
||||
default="Original timing",
|
||||
help="Would you like to place it at original timing?"
|
||||
)
|
||||
]
|
||||
|
||||
def load(
|
||||
self,
|
||||
context,
|
||||
name=None,
|
||||
namespace=None,
|
||||
options=None
|
||||
):
|
||||
pass
|
||||
|
||||
def update(self, container, context):
|
||||
"""Update an existing `container`
|
||||
"""
|
||||
pass
|
||||
|
||||
def remove(self, container):
|
||||
"""Remove an existing `container`
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class ClipLoader:
|
||||
|
||||
active_bin = None
|
||||
data = dict()
|
||||
|
||||
def __init__(self, cls, context, path, **options):
|
||||
""" Initialize object
|
||||
|
||||
Arguments:
|
||||
cls (avalon.api.Loader): plugin object
|
||||
context (dict): loader plugin context
|
||||
options (dict)[optional]: possible keys:
|
||||
projectBinPath: "path/to/binItem"
|
||||
|
||||
"""
|
||||
self.__dict__.update(cls.__dict__)
|
||||
self.context = context
|
||||
self.active_project = lib.get_current_project()
|
||||
self.fname = path
|
||||
|
||||
# try to get value from options or evaluate key value for `handles`
|
||||
self.with_handles = options.get("handles") or bool(
|
||||
options.get("handles") is True)
|
||||
# try to get value from options or evaluate key value for `load_how`
|
||||
self.sequencial_load = options.get("sequentially") or bool(
|
||||
"Sequentially in order" in options.get("load_how", ""))
|
||||
# try to get value from options or evaluate key value for `load_to`
|
||||
self.new_sequence = options.get("newSequence") or bool(
|
||||
"New timeline" in options.get("load_to", ""))
|
||||
self.clip_name_template = options.get(
|
||||
"clipNameTemplate") or "{asset}_{subset}_{representation}"
|
||||
assert self._populate_data(), str(
|
||||
"Cannot Load selected data, look into database "
|
||||
"or call your supervisor")
|
||||
|
||||
# inject folder data to representation dict
|
||||
folder_entity = self.context["folder"]
|
||||
self.data["folderAttributes"] = folder_entity["attrib"]
|
||||
log.info("__init__ self.data: `{}`".format(pformat(self.data)))
|
||||
log.info("__init__ options: `{}`".format(pformat(options)))
|
||||
|
||||
# add active components to class
|
||||
if self.new_sequence:
|
||||
if options.get("sequence"):
|
||||
# if multiselection is set then use options sequence
|
||||
self.active_sequence = options["sequence"]
|
||||
else:
|
||||
# create new sequence
|
||||
self.active_sequence = lib.get_current_sequence(new=True)
|
||||
self.active_sequence.setFramerate(
|
||||
hiero.core.TimeBase.fromString(
|
||||
str(self.data["folderAttributes"]["fps"])))
|
||||
else:
|
||||
self.active_sequence = lib.get_current_sequence()
|
||||
|
||||
if options.get("track"):
|
||||
# if multiselection is set then use options track
|
||||
self.active_track = options["track"]
|
||||
else:
|
||||
self.active_track = lib.get_current_track(
|
||||
self.active_sequence, self.data["track_name"])
|
||||
|
||||
def _populate_data(self):
|
||||
""" Gets context and convert it to self.data
|
||||
data structure:
|
||||
{
|
||||
"name": "assetName_productName_representationName"
|
||||
"path": "path/to/file/created/by/get_repr..",
|
||||
"binPath": "projectBinPath",
|
||||
}
|
||||
"""
|
||||
# create name
|
||||
repr = self.context["representation"]
|
||||
repr_cntx = repr["context"]
|
||||
folder_path = self.context["folder"]["path"]
|
||||
product_name = self.context["product"]["name"]
|
||||
representation = repr["name"]
|
||||
self.data["clip_name"] = self.clip_name_template.format(**repr_cntx)
|
||||
self.data["track_name"] = "_".join([product_name, representation])
|
||||
self.data["versionAttributes"] = self.context["version"]["attrib"]
|
||||
# gets file path
|
||||
file = get_representation_path_from_context(self.context)
|
||||
if not file:
|
||||
repr_id = repr["id"]
|
||||
log.warning(
|
||||
"Representation id `{}` is failing to load".format(repr_id))
|
||||
return None
|
||||
self.data["path"] = file.replace("\\", "/")
|
||||
|
||||
# convert to hashed path
|
||||
if repr_cntx.get("frame"):
|
||||
self._fix_path_hashes()
|
||||
|
||||
# solve project bin structure path
|
||||
hierarchy = "Loader{}".format(folder_path)
|
||||
|
||||
self.data["binPath"] = hierarchy
|
||||
|
||||
return True
|
||||
|
||||
def _fix_path_hashes(self):
|
||||
""" Convert file path where it is needed padding with hashes
|
||||
"""
|
||||
file = self.data["path"]
|
||||
if "#" not in file:
|
||||
frame = self.context["representation"]["context"].get("frame")
|
||||
padding = len(frame)
|
||||
file = file.replace(frame, "#" * padding)
|
||||
self.data["path"] = file
|
||||
|
||||
def _make_track_item(self, source_bin_item, audio=False):
|
||||
""" Create track item with """
|
||||
|
||||
clip = source_bin_item.activeItem()
|
||||
|
||||
# add to track as clip item
|
||||
if not audio:
|
||||
track_item = hiero.core.TrackItem(
|
||||
self.data["clip_name"], hiero.core.TrackItem.kVideo)
|
||||
else:
|
||||
track_item = hiero.core.TrackItem(
|
||||
self.data["clip_name"], hiero.core.TrackItem.kAudio)
|
||||
|
||||
track_item.setSource(clip)
|
||||
track_item.setSourceIn(self.handle_start)
|
||||
track_item.setTimelineIn(self.timeline_in)
|
||||
track_item.setSourceOut((self.media_duration) - self.handle_end)
|
||||
track_item.setTimelineOut(self.timeline_out)
|
||||
track_item.setPlaybackSpeed(1)
|
||||
self.active_track.addTrackItem(track_item)
|
||||
|
||||
return track_item
|
||||
|
||||
def load(self):
|
||||
# create project bin for the media to be imported into
|
||||
self.active_bin = lib.create_bin(self.data["binPath"])
|
||||
|
||||
# create mediaItem in active project bin
|
||||
# create clip media
|
||||
self.media = hiero.core.MediaSource(self.data["path"])
|
||||
self.media_duration = int(self.media.duration())
|
||||
|
||||
# get handles
|
||||
version_attributes = self.data["versionAttributes"]
|
||||
self.handle_start = version_attributes.get("handleStart")
|
||||
self.handle_end = version_attributes.get("handleEnd")
|
||||
if self.handle_start is None:
|
||||
self.handle_start = self.data["folderAttributes"]["handleStart"]
|
||||
if self.handle_end is None:
|
||||
self.handle_end = self.data["folderAttributes"]["handleEnd"]
|
||||
|
||||
self.handle_start = int(self.handle_start)
|
||||
self.handle_end = int(self.handle_end)
|
||||
|
||||
if self.sequencial_load:
|
||||
last_track_item = lib.get_track_items(
|
||||
sequence_name=self.active_sequence.name(),
|
||||
track_name=self.active_track.name()
|
||||
)
|
||||
if len(last_track_item) == 0:
|
||||
last_timeline_out = 0
|
||||
else:
|
||||
last_track_item = last_track_item[-1]
|
||||
last_timeline_out = int(last_track_item.timelineOut()) + 1
|
||||
self.timeline_in = last_timeline_out
|
||||
self.timeline_out = last_timeline_out + int(
|
||||
self.data["folderAttributes"]["clipOut"]
|
||||
- self.data["folderAttributes"]["clipIn"])
|
||||
else:
|
||||
self.timeline_in = int(self.data["folderAttributes"]["clipIn"])
|
||||
self.timeline_out = int(self.data["folderAttributes"]["clipOut"])
|
||||
|
||||
log.debug("__ self.timeline_in: {}".format(self.timeline_in))
|
||||
log.debug("__ self.timeline_out: {}".format(self.timeline_out))
|
||||
|
||||
# check if slate is included
|
||||
slate_on = "slate" in self.context["version"]["data"]["families"]
|
||||
log.debug("__ slate_on: {}".format(slate_on))
|
||||
|
||||
# if slate is on then remove the slate frame from beginning
|
||||
if slate_on:
|
||||
self.media_duration -= 1
|
||||
self.handle_start += 1
|
||||
|
||||
# create Clip from Media
|
||||
clip = hiero.core.Clip(self.media)
|
||||
clip.setName(self.data["clip_name"])
|
||||
|
||||
# add Clip to bin if not there yet
|
||||
if self.data["clip_name"] not in [
|
||||
b.name() for b in self.active_bin.items()]:
|
||||
bin_item = hiero.core.BinItem(clip)
|
||||
self.active_bin.addItem(bin_item)
|
||||
|
||||
# just make sure the clip is created
|
||||
# there were some cases were hiero was not creating it
|
||||
source_bin_item = None
|
||||
for item in self.active_bin.items():
|
||||
if self.data["clip_name"] == item.name():
|
||||
source_bin_item = item
|
||||
if not source_bin_item:
|
||||
log.warning("Problem with created Source clip: `{}`".format(
|
||||
self.data["clip_name"]))
|
||||
|
||||
# include handles
|
||||
if self.with_handles:
|
||||
self.timeline_in -= self.handle_start
|
||||
self.timeline_out += self.handle_end
|
||||
self.handle_start = 0
|
||||
self.handle_end = 0
|
||||
|
||||
# make track item from source in bin as item
|
||||
track_item = self._make_track_item(source_bin_item)
|
||||
|
||||
log.info("Loading clips: `{}`".format(self.data["clip_name"]))
|
||||
return track_item
|
||||
|
||||
|
||||
class Creator(LegacyCreator):
|
||||
"""Creator class wrapper
|
||||
"""
|
||||
clip_color = "Purple"
|
||||
rename_index = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(Creator, self).__init__(*args, **kwargs)
|
||||
import ayon_core.hosts.hiero.api as phiero
|
||||
self.presets = get_current_project_settings()[
|
||||
"hiero"]["create"].get(self.__class__.__name__, {})
|
||||
|
||||
# adding basic current context resolve objects
|
||||
self.project = phiero.get_current_project()
|
||||
self.sequence = phiero.get_current_sequence()
|
||||
|
||||
if (self.options or {}).get("useSelection"):
|
||||
timeline_selection = phiero.get_timeline_selection()
|
||||
self.selected = phiero.get_track_items(
|
||||
selection=timeline_selection
|
||||
)
|
||||
else:
|
||||
self.selected = phiero.get_track_items()
|
||||
|
||||
self.widget = CreatorWidget
|
||||
|
||||
|
||||
class PublishClip:
|
||||
"""
|
||||
Convert a track item to publishable instance
|
||||
|
||||
Args:
|
||||
track_item (hiero.core.TrackItem): hiero track item object
|
||||
kwargs (optional): additional data needed for rename=True (presets)
|
||||
|
||||
Returns:
|
||||
hiero.core.TrackItem: hiero track item object with pype tag
|
||||
"""
|
||||
vertical_clip_match = {}
|
||||
tag_data = {}
|
||||
types = {
|
||||
"shot": "shot",
|
||||
"folder": "folder",
|
||||
"episode": "episode",
|
||||
"sequence": "sequence",
|
||||
"track": "sequence",
|
||||
}
|
||||
|
||||
# parents search pattern
|
||||
parents_search_pattern = r"\{([a-z]*?)\}"
|
||||
|
||||
# default templates for non-ui use
|
||||
rename_default = False
|
||||
hierarchy_default = "{_folder_}/{_sequence_}/{_track_}"
|
||||
clip_name_default = "shot_{_trackIndex_:0>3}_{_clipIndex_:0>4}"
|
||||
base_product_name_default = "<track_name>"
|
||||
review_track_default = "< none >"
|
||||
product_type_default = "plate"
|
||||
count_from_default = 10
|
||||
count_steps_default = 10
|
||||
vertical_sync_default = False
|
||||
driving_layer_default = ""
|
||||
|
||||
def __init__(self, cls, track_item, **kwargs):
|
||||
# populate input cls attribute onto self.[attr]
|
||||
self.__dict__.update(cls.__dict__)
|
||||
|
||||
# get main parent objects
|
||||
self.track_item = track_item
|
||||
sequence_name = lib.get_current_sequence().name()
|
||||
self.sequence_name = str(sequence_name).replace(" ", "_")
|
||||
|
||||
# track item (clip) main attributes
|
||||
self.ti_name = track_item.name()
|
||||
self.ti_index = int(track_item.eventNumber())
|
||||
|
||||
# get track name and index
|
||||
track_name = track_item.parent().name()
|
||||
self.track_name = str(track_name).replace(" ", "_")
|
||||
self.track_index = int(track_item.parent().trackIndex())
|
||||
|
||||
# adding tag.family into tag
|
||||
if kwargs.get("avalon"):
|
||||
self.tag_data.update(kwargs["avalon"])
|
||||
|
||||
# add publish attribute to tag data
|
||||
self.tag_data.update({"publish": True})
|
||||
|
||||
# adding ui inputs if any
|
||||
self.ui_inputs = kwargs.get("ui_inputs", {})
|
||||
|
||||
# populate default data before we get other attributes
|
||||
self._populate_track_item_default_data()
|
||||
|
||||
# use all populated default data to create all important attributes
|
||||
self._populate_attributes()
|
||||
|
||||
# create parents with correct types
|
||||
self._create_parents()
|
||||
|
||||
def convert(self):
|
||||
# solve track item data and add them to tag data
|
||||
tag_hierarchy_data = self._convert_to_tag_data()
|
||||
|
||||
self.tag_data.update(tag_hierarchy_data)
|
||||
|
||||
# if track name is in review track name and also if driving track name
|
||||
# is not in review track name: skip tag creation
|
||||
if (self.track_name in self.review_layer) and (
|
||||
self.driving_layer not in self.review_layer):
|
||||
return
|
||||
|
||||
# deal with clip name
|
||||
new_name = self.tag_data.pop("newClipName")
|
||||
|
||||
if self.rename:
|
||||
# rename track item
|
||||
self.track_item.setName(new_name)
|
||||
self.tag_data["asset_name"] = new_name
|
||||
else:
|
||||
self.tag_data["asset_name"] = self.ti_name
|
||||
self.tag_data["hierarchyData"]["shot"] = self.ti_name
|
||||
|
||||
# AYON unique identifier
|
||||
folder_path = "/{}/{}".format(
|
||||
tag_hierarchy_data["hierarchy"],
|
||||
self.tag_data["asset_name"]
|
||||
)
|
||||
self.tag_data["folderPath"] = folder_path
|
||||
if self.tag_data["heroTrack"] and self.review_layer:
|
||||
self.tag_data.update({"reviewTrack": self.review_layer})
|
||||
else:
|
||||
self.tag_data.update({"reviewTrack": None})
|
||||
|
||||
# TODO: remove debug print
|
||||
log.debug("___ self.tag_data: {}".format(
|
||||
pformat(self.tag_data)
|
||||
))
|
||||
|
||||
# create pype tag on track_item and add data
|
||||
lib.imprint(self.track_item, self.tag_data)
|
||||
|
||||
return self.track_item
|
||||
|
||||
def _populate_track_item_default_data(self):
|
||||
""" Populate default formatting data from track item. """
|
||||
|
||||
self.track_item_default_data = {
|
||||
"_folder_": "shots",
|
||||
"_sequence_": self.sequence_name,
|
||||
"_track_": self.track_name,
|
||||
"_clip_": self.ti_name,
|
||||
"_trackIndex_": self.track_index,
|
||||
"_clipIndex_": self.ti_index
|
||||
}
|
||||
|
||||
def _populate_attributes(self):
|
||||
""" Populate main object attributes. """
|
||||
# track item frame range and parent track name for vertical sync check
|
||||
self.clip_in = int(self.track_item.timelineIn())
|
||||
self.clip_out = int(self.track_item.timelineOut())
|
||||
|
||||
# define ui inputs if non gui mode was used
|
||||
self.shot_num = self.ti_index
|
||||
log.debug(
|
||||
"____ self.shot_num: {}".format(self.shot_num))
|
||||
|
||||
# ui_inputs data or default values if gui was not used
|
||||
self.rename = self.ui_inputs.get(
|
||||
"clipRename", {}).get("value") or self.rename_default
|
||||
self.clip_name = self.ui_inputs.get(
|
||||
"clipName", {}).get("value") or self.clip_name_default
|
||||
self.hierarchy = self.ui_inputs.get(
|
||||
"hierarchy", {}).get("value") or self.hierarchy_default
|
||||
self.hierarchy_data = self.ui_inputs.get(
|
||||
"hierarchyData", {}).get("value") or \
|
||||
self.track_item_default_data.copy()
|
||||
self.count_from = self.ui_inputs.get(
|
||||
"countFrom", {}).get("value") or self.count_from_default
|
||||
self.count_steps = self.ui_inputs.get(
|
||||
"countSteps", {}).get("value") or self.count_steps_default
|
||||
self.base_product_name = self.ui_inputs.get(
|
||||
"productName", {}).get("value") or self.base_product_name_default
|
||||
self.product_type = self.ui_inputs.get(
|
||||
"productType", {}).get("value") or self.product_type_default
|
||||
self.vertical_sync = self.ui_inputs.get(
|
||||
"vSyncOn", {}).get("value") or self.vertical_sync_default
|
||||
self.driving_layer = self.ui_inputs.get(
|
||||
"vSyncTrack", {}).get("value") or self.driving_layer_default
|
||||
self.review_track = self.ui_inputs.get(
|
||||
"reviewTrack", {}).get("value") or self.review_track_default
|
||||
self.audio = self.ui_inputs.get(
|
||||
"audio", {}).get("value") or False
|
||||
|
||||
# build product name from layer name
|
||||
if self.base_product_name == "<track_name>":
|
||||
self.base_product_name = self.track_name
|
||||
|
||||
# create product for publishing
|
||||
self.product_name = (
|
||||
self.product_type + self.base_product_name.capitalize()
|
||||
)
|
||||
|
||||
def _replace_hash_to_expression(self, name, text):
|
||||
""" Replace hash with number in correct padding. """
|
||||
_spl = text.split("#")
|
||||
_len = (len(_spl) - 1)
|
||||
_repl = "{{{0}:0>{1}}}".format(name, _len)
|
||||
return text.replace(("#" * _len), _repl)
|
||||
|
||||
|
||||
def _convert_to_tag_data(self):
|
||||
""" Convert internal data to tag data.
|
||||
|
||||
Populating the tag data into internal variable self.tag_data
|
||||
"""
|
||||
# define vertical sync attributes
|
||||
hero_track = True
|
||||
self.review_layer = ""
|
||||
if self.vertical_sync:
|
||||
# check if track name is not in driving layer
|
||||
if self.track_name not in self.driving_layer:
|
||||
# if it is not then define vertical sync as None
|
||||
hero_track = False
|
||||
|
||||
# increasing steps by index of rename iteration
|
||||
self.count_steps *= self.rename_index
|
||||
|
||||
hierarchy_formatting_data = {}
|
||||
hierarchy_data = deepcopy(self.hierarchy_data)
|
||||
_data = self.track_item_default_data.copy()
|
||||
if self.ui_inputs:
|
||||
# adding tag metadata from ui
|
||||
for _k, _v in self.ui_inputs.items():
|
||||
if _v["target"] == "tag":
|
||||
self.tag_data[_k] = _v["value"]
|
||||
|
||||
# driving layer is set as positive match
|
||||
if hero_track or self.vertical_sync:
|
||||
# mark review layer
|
||||
if self.review_track and (
|
||||
self.review_track not in self.review_track_default):
|
||||
# if review layer is defined and not the same as default
|
||||
self.review_layer = self.review_track
|
||||
# shot num calculate
|
||||
if self.rename_index == 0:
|
||||
self.shot_num = self.count_from
|
||||
else:
|
||||
self.shot_num = self.count_from + self.count_steps
|
||||
|
||||
# clip name sequence number
|
||||
_data.update({"shot": self.shot_num})
|
||||
|
||||
# solve # in test to pythonic expression
|
||||
for _k, _v in hierarchy_data.items():
|
||||
if "#" not in _v["value"]:
|
||||
continue
|
||||
hierarchy_data[
|
||||
_k]["value"] = self._replace_hash_to_expression(
|
||||
_k, _v["value"])
|
||||
|
||||
# fill up pythonic expresisons in hierarchy data
|
||||
for k, _v in hierarchy_data.items():
|
||||
hierarchy_formatting_data[k] = _v["value"].format(**_data)
|
||||
else:
|
||||
# if no gui mode then just pass default data
|
||||
hierarchy_formatting_data = hierarchy_data
|
||||
|
||||
tag_hierarchy_data = self._solve_tag_hierarchy_data(
|
||||
hierarchy_formatting_data
|
||||
)
|
||||
|
||||
tag_hierarchy_data.update({"heroTrack": True})
|
||||
if hero_track and self.vertical_sync:
|
||||
self.vertical_clip_match.update({
|
||||
(self.clip_in, self.clip_out): tag_hierarchy_data
|
||||
})
|
||||
|
||||
if not hero_track and self.vertical_sync:
|
||||
# driving layer is set as negative match
|
||||
for (_in, _out), hero_data in self.vertical_clip_match.items():
|
||||
hero_data.update({"heroTrack": False})
|
||||
if _in == self.clip_in and _out == self.clip_out:
|
||||
data_product_name = hero_data["productName"]
|
||||
# add track index in case duplicity of names in hero data
|
||||
if self.product_name in data_product_name:
|
||||
hero_data["productName"] = self.product_name + str(
|
||||
self.track_index)
|
||||
# in case track name and product name is the same then add
|
||||
if self.base_product_name == self.track_name:
|
||||
hero_data["productName"] = self.product_name
|
||||
# assign data to return hierarchy data to tag
|
||||
tag_hierarchy_data = hero_data
|
||||
|
||||
# add data to return data dict
|
||||
return tag_hierarchy_data
|
||||
|
||||
def _solve_tag_hierarchy_data(self, hierarchy_formatting_data):
|
||||
""" Solve tag data from hierarchy data and templates. """
|
||||
# fill up clip name and hierarchy keys
|
||||
hierarchy_filled = self.hierarchy.format(**hierarchy_formatting_data)
|
||||
clip_name_filled = self.clip_name.format(**hierarchy_formatting_data)
|
||||
|
||||
# remove shot from hierarchy data: is not needed anymore
|
||||
hierarchy_formatting_data.pop("shot")
|
||||
|
||||
return {
|
||||
"newClipName": clip_name_filled,
|
||||
"hierarchy": hierarchy_filled,
|
||||
"parents": self.parents,
|
||||
"hierarchyData": hierarchy_formatting_data,
|
||||
"productName": self.product_name,
|
||||
"productType": self.product_type,
|
||||
"families": [self.product_type, self.data["productType"]]
|
||||
}
|
||||
|
||||
def _convert_to_entity(self, src_type, template):
|
||||
""" Converting input key to key with type. """
|
||||
# convert to entity type
|
||||
folder_type = self.types.get(src_type, None)
|
||||
|
||||
assert folder_type, "Missing folder type for `{}`".format(
|
||||
src_type
|
||||
)
|
||||
|
||||
# first collect formatting data to use for formatting template
|
||||
formatting_data = {}
|
||||
for _k, _v in self.hierarchy_data.items():
|
||||
value = _v["value"].format(
|
||||
**self.track_item_default_data)
|
||||
formatting_data[_k] = value
|
||||
|
||||
return {
|
||||
"folder_type": folder_type,
|
||||
"entity_name": template.format(
|
||||
**formatting_data
|
||||
)
|
||||
}
|
||||
|
||||
def _create_parents(self):
|
||||
""" Create parents and return it in list. """
|
||||
self.parents = []
|
||||
|
||||
pattern = re.compile(self.parents_search_pattern)
|
||||
|
||||
par_split = [(pattern.findall(t).pop(), t)
|
||||
for t in self.hierarchy.split("/")]
|
||||
|
||||
for type, template in par_split:
|
||||
parent = self._convert_to_entity(type, template)
|
||||
self.parents.append(parent)
|
||||
|
After Width: | Height: | Size: 5.9 KiB |
|
After Width: | Height: | Size: 6.1 KiB |
BIN
server_addon/hiero/client/ayon_hiero/api/startup/Icons/3D.png
Normal file
|
After Width: | Height: | Size: 10 KiB |
|
After Width: | Height: | Size: 5.7 KiB |
BIN
server_addon/hiero/client/ayon_hiero/api/startup/Icons/4_2D.png
Normal file
|
After Width: | Height: | Size: 7.8 KiB |
BIN
server_addon/hiero/client/ayon_hiero/api/startup/Icons/edit.png
Normal file
|
After Width: | Height: | Size: 8.2 KiB |
|
After Width: | Height: | Size: 8 KiB |
|
After Width: | Height: | Size: 5.6 KiB |
|
After Width: | Height: | Size: 5.1 KiB |
BIN
server_addon/hiero/client/ayon_hiero/api/startup/Icons/lense.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
|
After Width: | Height: | Size: 13 KiB |
BIN
server_addon/hiero/client/ayon_hiero/api/startup/Icons/maya.png
Normal file
|
After Width: | Height: | Size: 4.5 KiB |
BIN
server_addon/hiero/client/ayon_hiero/api/startup/Icons/nuke.png
Normal file
|
After Width: | Height: | Size: 7.5 KiB |
|
After Width: | Height: | Size: 3.7 KiB |
|
After Width: | Height: | Size: 3.8 KiB |
|
After Width: | Height: | Size: 8.1 KiB |
|
After Width: | Height: | Size: 5.5 KiB |
|
After Width: | Height: | Size: 4.7 KiB |
|
After Width: | Height: | Size: 7.9 KiB |
|
After Width: | Height: | Size: 7.8 KiB |
|
After Width: | Height: | Size: 7.4 KiB |
|
|
@ -0,0 +1,142 @@
|
|||
# This action adds itself to the Spreadsheet View context menu allowing the contents of the Spreadsheet be exported as a CSV file.
|
||||
# Usage: Right-click in Spreadsheet > "Export as .CSV"
|
||||
# Note: This only prints the text data that is visible in the active Spreadsheet View.
|
||||
# If you've filtered text, only the visible text will be printed to the CSV file
|
||||
# Usage: Copy to ~/.hiero/Python/StartupUI
|
||||
import os
|
||||
import csv
|
||||
|
||||
import hiero.core.events
|
||||
import hiero.ui
|
||||
try:
|
||||
from PySide.QtGui import *
|
||||
from PySide.QtCore import *
|
||||
except:
|
||||
from PySide2.QtGui import *
|
||||
from PySide2.QtWidgets import *
|
||||
from PySide2.QtCore import *
|
||||
|
||||
|
||||
### Magic Widget Finding Methods - This stuff crawls all the PySide widgets, looking for an answer
|
||||
def findWidget(w):
|
||||
global foundryWidgets
|
||||
if "Foundry" in w.metaObject().className():
|
||||
foundryWidgets += [w]
|
||||
|
||||
for c in w.children():
|
||||
findWidget(c)
|
||||
return foundryWidgets
|
||||
|
||||
|
||||
def getFoundryWidgetsWithClassName(filter=None):
|
||||
global foundryWidgets
|
||||
foundryWidgets = []
|
||||
widgets = []
|
||||
app = QApplication.instance()
|
||||
for w in app.topLevelWidgets():
|
||||
findWidget(w)
|
||||
|
||||
filteredWidgets = foundryWidgets
|
||||
if filter:
|
||||
filteredWidgets = []
|
||||
for widget in foundryWidgets:
|
||||
if filter in widget.metaObject().className():
|
||||
filteredWidgets += [widget]
|
||||
return filteredWidgets
|
||||
|
||||
|
||||
# When right click, get the Sequence Name
|
||||
def activeSpreadsheetTreeView():
|
||||
"""
|
||||
Does some PySide widget Magic to detect the Active Spreadsheet TreeView.
|
||||
"""
|
||||
spreadsheetViews = getFoundryWidgetsWithClassName(
|
||||
filter="SpreadsheetTreeView")
|
||||
for spreadSheet in spreadsheetViews:
|
||||
if spreadSheet.hasFocus():
|
||||
activeSpreadSheet = spreadSheet
|
||||
return activeSpreadSheet
|
||||
return None
|
||||
|
||||
|
||||
#### Adds "Export .CSV" action to the Spreadsheet Context menu ####
|
||||
class SpreadsheetExportCSVAction(QAction):
|
||||
def __init__(self):
|
||||
QAction.__init__(self, "Export as .CSV", None)
|
||||
self.triggered.connect(self.exportCSVFromActiveSpreadsheetView)
|
||||
hiero.core.events.registerInterest("kShowContextMenu/kSpreadsheet",
|
||||
self.eventHandler)
|
||||
self.setIcon(QIcon("icons:FBGridView.png"))
|
||||
|
||||
def eventHandler(self, event):
|
||||
# Insert the action to the Export CSV menu
|
||||
event.menu.addAction(self)
|
||||
|
||||
#### The guts!.. Writes a CSV file from a Sequence Object ####
|
||||
def exportCSVFromActiveSpreadsheetView(self):
|
||||
|
||||
# Get the active QTreeView from the active Spreadsheet
|
||||
spreadsheetTreeView = activeSpreadsheetTreeView()
|
||||
|
||||
if not spreadsheetTreeView:
|
||||
return "Unable to detect the active TreeView."
|
||||
seq = hiero.ui.activeView().sequence()
|
||||
if not seq:
|
||||
print("Unable to detect the active Sequence from the activeView.")
|
||||
return
|
||||
|
||||
# The data model of the QTreeView
|
||||
model = spreadsheetTreeView.model()
|
||||
|
||||
csvSavePath = os.path.join(QDir.homePath(), "Desktop",
|
||||
seq.name() + ".csv")
|
||||
savePath, filter = QFileDialog.getSaveFileName(
|
||||
None,
|
||||
caption="Export Spreadsheet to .CSV as...",
|
||||
dir=csvSavePath,
|
||||
filter="*.csv")
|
||||
print("Saving To: {}".format(savePath))
|
||||
|
||||
# Saving was cancelled...
|
||||
if len(savePath) == 0:
|
||||
return
|
||||
|
||||
# Get the Visible Header Columns from the QTreeView
|
||||
|
||||
#csvHeader = ["Event", "Status", "Shot Name", "Reel", "Track", "Speed", "Src In", "Src Out","Src Duration", "Dst In", "Dst Out", "Dst Duration", "Clip", "Clip Media"]
|
||||
|
||||
# Get a CSV writer object
|
||||
f = open(savePath, "w")
|
||||
csvWriter = csv.writer(
|
||||
f, delimiter=',', quotechar="|", quoting=csv.QUOTE_MINIMAL)
|
||||
|
||||
# This is a list of the Column titles
|
||||
csvHeader = []
|
||||
|
||||
for col in range(0, model.columnCount()):
|
||||
if not spreadsheetTreeView.isColumnHidden(col):
|
||||
csvHeader += [model.headerData(col, Qt.Horizontal)]
|
||||
|
||||
# Write the Header row to the CSV file
|
||||
csvWriter.writerow(csvHeader)
|
||||
|
||||
# Go through each row/column and print
|
||||
for row in range(model.rowCount()):
|
||||
row_data = []
|
||||
for col in range(model.columnCount()):
|
||||
if not spreadsheetTreeView.isColumnHidden(col):
|
||||
row_data.append(
|
||||
model.index(row, col, QModelIndex()).data(
|
||||
Qt.DisplayRole))
|
||||
|
||||
# Write row to CSV file...
|
||||
csvWriter.writerow(row_data)
|
||||
|
||||
f.close()
|
||||
# Conveniently show the CSV file in the native file browser...
|
||||
QDesktopServices.openUrl(
|
||||
QUrl('file:///%s' % (os.path.dirname(savePath))))
|
||||
|
||||
|
||||
# Add the action...
|
||||
csvActions = SpreadsheetExportCSVAction()
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
import traceback
|
||||
|
||||
# activate hiero from pype
|
||||
from ayon_core.pipeline import install_host
|
||||
import ayon_core.hosts.hiero.api as phiero
|
||||
install_host(phiero)
|
||||
|
||||
try:
|
||||
__import__("ayon_core.hosts.hiero.api")
|
||||
__import__("pyblish")
|
||||
|
||||
except ImportError as e:
|
||||
print(traceback.format_exc())
|
||||
print("pyblish: Could not load integration: %s " % e)
|
||||
|
||||
else:
|
||||
# Setup integration
|
||||
import ayon_core.hosts.hiero.api as phiero
|
||||
phiero.lib.setup()
|
||||
|
|
@ -0,0 +1,79 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
__author__ = "Daniel Flehner Heen"
|
||||
__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"]
|
||||
|
||||
import os
|
||||
import hiero.core
|
||||
from hiero.core import util
|
||||
|
||||
import opentimelineio as otio
|
||||
from ayon_core.hosts.hiero.api.otio import hiero_export
|
||||
|
||||
class OTIOExportTask(hiero.core.TaskBase):
|
||||
|
||||
def __init__(self, initDict):
|
||||
"""Initialize"""
|
||||
hiero.core.TaskBase.__init__(self, initDict)
|
||||
self.otio_timeline = None
|
||||
|
||||
def name(self):
|
||||
return str(type(self))
|
||||
|
||||
def startTask(self):
|
||||
self.otio_timeline = hiero_export.create_otio_timeline()
|
||||
|
||||
def taskStep(self):
|
||||
return False
|
||||
|
||||
def finishTask(self):
|
||||
try:
|
||||
exportPath = self.resolvedExportPath()
|
||||
|
||||
# Check file extension
|
||||
if not exportPath.lower().endswith(".otio"):
|
||||
exportPath += ".otio"
|
||||
|
||||
# check export root exists
|
||||
dirname = os.path.dirname(exportPath)
|
||||
util.filesystem.makeDirs(dirname)
|
||||
|
||||
# write otio file
|
||||
hiero_export.write_to_file(self.otio_timeline, exportPath)
|
||||
|
||||
# Catch all exceptions and log error
|
||||
except Exception as e:
|
||||
self.setError("failed to write file {f}\n{e}".format(
|
||||
f=exportPath,
|
||||
e=e)
|
||||
)
|
||||
|
||||
hiero.core.TaskBase.finishTask(self)
|
||||
|
||||
def forcedAbort(self):
|
||||
pass
|
||||
|
||||
|
||||
class OTIOExportPreset(hiero.core.TaskPresetBase):
|
||||
def __init__(self, name, properties):
|
||||
"""Initialise presets to default values"""
|
||||
hiero.core.TaskPresetBase.__init__(self, OTIOExportTask, name)
|
||||
|
||||
self.properties()["includeTags"] = hiero_export.include_tags = True
|
||||
self.properties().update(properties)
|
||||
|
||||
def supportedItems(self):
|
||||
return hiero.core.TaskPresetBase.kSequence
|
||||
|
||||
def addCustomResolveEntries(self, resolver):
|
||||
resolver.addResolver(
|
||||
"{ext}",
|
||||
"Extension of the file to be output",
|
||||
lambda keyword, task: "otio"
|
||||
)
|
||||
|
||||
def supportsAudio(self):
|
||||
return True
|
||||
|
||||
|
||||
hiero.core.taskRegistry.registerTask(OTIOExportPreset, OTIOExportTask)
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
__author__ = "Daniel Flehner Heen"
|
||||
__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"]
|
||||
|
||||
import hiero.ui
|
||||
from .OTIOExportTask import (
|
||||
OTIOExportTask,
|
||||
OTIOExportPreset
|
||||
)
|
||||
|
||||
try:
|
||||
# Hiero >= 11.x
|
||||
from PySide2 import QtCore
|
||||
from PySide2.QtWidgets import QCheckBox
|
||||
from hiero.ui.FnTaskUIFormLayout import TaskUIFormLayout as FormLayout
|
||||
|
||||
except ImportError:
|
||||
# Hiero <= 10.x
|
||||
from PySide import QtCore # lint:ok
|
||||
from PySide.QtGui import QCheckBox, QFormLayout # lint:ok
|
||||
|
||||
FormLayout = QFormLayout # lint:ok
|
||||
|
||||
from ayon_core.hosts.hiero.api.otio import hiero_export
|
||||
|
||||
class OTIOExportUI(hiero.ui.TaskUIBase):
|
||||
def __init__(self, preset):
|
||||
"""Initialize"""
|
||||
hiero.ui.TaskUIBase.__init__(
|
||||
self,
|
||||
OTIOExportTask,
|
||||
preset,
|
||||
"OTIO Exporter"
|
||||
)
|
||||
|
||||
def includeMarkersCheckboxChanged(self, state):
|
||||
# Slot to handle change of checkbox state
|
||||
hiero_export.include_tags = state == QtCore.Qt.Checked
|
||||
|
||||
def populateUI(self, widget, exportTemplate):
|
||||
layout = widget.layout()
|
||||
formLayout = FormLayout()
|
||||
|
||||
# Hiero ~= 10.0v4
|
||||
if layout is None:
|
||||
layout = formLayout
|
||||
widget.setLayout(layout)
|
||||
|
||||
else:
|
||||
layout.addLayout(formLayout)
|
||||
|
||||
# Checkboxes for whether the OTIO should contain markers or not
|
||||
self.includeMarkersCheckbox = QCheckBox()
|
||||
self.includeMarkersCheckbox.setToolTip(
|
||||
"Enable to include Tags as markers in the exported OTIO file."
|
||||
)
|
||||
self.includeMarkersCheckbox.setCheckState(QtCore.Qt.Unchecked)
|
||||
|
||||
if self._preset.properties()["includeTags"]:
|
||||
self.includeMarkersCheckbox.setCheckState(QtCore.Qt.Checked)
|
||||
|
||||
self.includeMarkersCheckbox.stateChanged.connect(
|
||||
self.includeMarkersCheckboxChanged
|
||||
)
|
||||
|
||||
# Add Checkbox to layout
|
||||
formLayout.addRow("Include Tags:", self.includeMarkersCheckbox)
|
||||
|
||||
|
||||
hiero.ui.taskUIRegistry.registerTaskUI(
|
||||
OTIOExportPreset,
|
||||
OTIOExportUI
|
||||
)
|
||||
|
|
@ -0,0 +1,7 @@
|
|||
from .OTIOExportTask import OTIOExportTask
|
||||
from .OTIOExportUI import OTIOExportUI
|
||||
|
||||
__all__ = [
|
||||
"OTIOExportTask",
|
||||
"OTIOExportUI"
|
||||
]
|
||||
|
|
@ -0,0 +1,246 @@
|
|||
try:
|
||||
from PySide.QtGui import *
|
||||
from PySide.QtCore import *
|
||||
except:
|
||||
from PySide2.QtGui import *
|
||||
from PySide2.QtWidgets import *
|
||||
from PySide2.QtCore import *
|
||||
|
||||
from hiero.core.util import uniquify, version_get, version_set
|
||||
import hiero.core
|
||||
import hiero.ui
|
||||
import nuke
|
||||
|
||||
# A globally variable for storing the current Project
|
||||
gTrackedActiveProject = None
|
||||
|
||||
# This selection handler will track changes in items selected/deselected in the Bin/Timeline/Spreadsheet Views
|
||||
|
||||
|
||||
def __trackActiveProjectHandler(event):
|
||||
global gTrackedActiveProject
|
||||
selection = event.sender.selection()
|
||||
binSelection = selection
|
||||
if len(binSelection) > 0 and hasattr(binSelection[0], "project"):
|
||||
proj = binSelection[0].project()
|
||||
|
||||
# We only store this if its a valid, active User Project
|
||||
if proj in hiero.core.projects(hiero.core.Project.kUserProjects):
|
||||
gTrackedActiveProject = proj
|
||||
|
||||
|
||||
hiero.core.events.registerInterest(
|
||||
"kSelectionChanged/kBin", __trackActiveProjectHandler)
|
||||
hiero.core.events.registerInterest(
|
||||
"kSelectionChanged/kTimeline", __trackActiveProjectHandler)
|
||||
hiero.core.events.registerInterest(
|
||||
"kSelectionChanged/Spreadsheet", __trackActiveProjectHandler)
|
||||
|
||||
|
||||
def activeProject():
|
||||
"""hiero.ui.activeProject() -> returns the current Project
|
||||
|
||||
Note: There is not technically a notion of a "active" Project in Hiero/NukeStudio, as it is a multi-project App.
|
||||
This method determines what is "active" by going down the following rules...
|
||||
|
||||
# 1 - If the current Viewer (hiero.ui.currentViewer) contains a Clip or Sequence, this item is assumed to give the active Project
|
||||
# 2 - If nothing is currently in the Viewer, look to the active View, determine project from active selection
|
||||
# 3 - If no current selection can be determined, fall back to a globally tracked last selection from trackActiveProjectHandler
|
||||
# 4 - If all those rules fail, fall back to the last project in the list of hiero.core.projects()
|
||||
|
||||
@return: hiero.core.Project"""
|
||||
global gTrackedActiveProject
|
||||
activeProject = None
|
||||
|
||||
# Case 1 : Look for what the current Viewr tells us - this might not be what we want, and relies on hiero.ui.currentViewer() being robust.
|
||||
cv = hiero.ui.currentViewer().player().sequence()
|
||||
if hasattr(cv, "project"):
|
||||
activeProject = cv.project()
|
||||
else:
|
||||
# Case 2: We can't determine a project from the current Viewer, so try seeing what's selected in the activeView
|
||||
# Note that currently, if you run activeProject from the Script Editor, the activeView is always None, so this will rarely get used!
|
||||
activeView = hiero.ui.activeView()
|
||||
if activeView:
|
||||
# We can determine an active View.. see what's being worked with
|
||||
selection = activeView.selection()
|
||||
|
||||
# Handle the case where nothing is selected in the active view
|
||||
if len(selection) == 0:
|
||||
# It's possible that there is no selection in a Timeline/Spreadsheet, but these views have "sequence" method, so try that...
|
||||
if isinstance(hiero.ui.activeView(), (hiero.ui.TimelineEditor, hiero.ui.SpreadsheetView)):
|
||||
activeSequence = activeView.sequence()
|
||||
if hasattr(currentItem, "project"):
|
||||
activeProject = activeSequence.project()
|
||||
|
||||
# The active view has a selection... assume that the first item in the selection has the active Project
|
||||
else:
|
||||
currentItem = selection[0]
|
||||
if hasattr(currentItem, "project"):
|
||||
activeProject = currentItem.project()
|
||||
|
||||
# Finally, Cases 3 and 4...
|
||||
if not activeProject:
|
||||
activeProjects = hiero.core.projects(hiero.core.Project.kUserProjects)
|
||||
if gTrackedActiveProject in activeProjects:
|
||||
activeProject = gTrackedActiveProject
|
||||
else:
|
||||
activeProject = activeProjects[-1]
|
||||
|
||||
return activeProject
|
||||
|
||||
# Method to get all recent projects
|
||||
|
||||
|
||||
def recentProjects():
|
||||
"""hiero.core.recentProjects() -> Returns a list of paths to recently opened projects
|
||||
|
||||
Hiero stores up to 5 recent projects in uistate.ini with the [recentFile]/# key.
|
||||
|
||||
@return: list of paths to .hrox Projects"""
|
||||
|
||||
appSettings = hiero.core.ApplicationSettings()
|
||||
recentProjects = []
|
||||
for i in range(0, 5):
|
||||
proj = appSettings.value('recentFile/%i' % i)
|
||||
if len(proj) > 0:
|
||||
recentProjects.append(proj)
|
||||
return recentProjects
|
||||
|
||||
# Method to get recent project by index
|
||||
|
||||
|
||||
def recentProject(k=0):
|
||||
"""hiero.core.recentProject(k) -> Returns the recent project path, specified by integer k (0-4)
|
||||
|
||||
@param: k (optional, default = 0) - an integer from 0-4, relating to the index of recent projects.
|
||||
|
||||
@return: hiero.core.Project"""
|
||||
|
||||
appSettings = hiero.core.ApplicationSettings()
|
||||
proj = appSettings.value('recentFile/%i' % int(k), None)
|
||||
return proj
|
||||
|
||||
# Method to get open project by index
|
||||
|
||||
|
||||
def openRecentProject(k=0):
|
||||
"""hiero.core.openRecentProject(k) -> Opens the most the recent project as listed in the Open Recent list.
|
||||
|
||||
@param: k (optional, default = 0) - an integer from 0-4, relating to the index of recent projects.
|
||||
@return: hiero.core.Project"""
|
||||
|
||||
appSettings = hiero.core.ApplicationSettings()
|
||||
proj = appSettings.value('recentFile/%i' % int(k), None)
|
||||
proj = hiero.core.openProject(proj)
|
||||
return proj
|
||||
|
||||
|
||||
# Duck punch these methods into the relevant ui/core namespaces
|
||||
hiero.ui.activeProject = activeProject
|
||||
hiero.core.recentProjects = recentProjects
|
||||
hiero.core.recentProject = recentProject
|
||||
hiero.core.openRecentProject = openRecentProject
|
||||
|
||||
|
||||
# Method to Save a new Version of the activeHrox Project
|
||||
class SaveAllProjects(QAction):
|
||||
|
||||
def __init__(self):
|
||||
QAction.__init__(self, "Save All Projects", None)
|
||||
self.triggered.connect(self.projectSaveAll)
|
||||
hiero.core.events.registerInterest(
|
||||
"kShowContextMenu/kBin", self.eventHandler)
|
||||
|
||||
def projectSaveAll(self):
|
||||
allProjects = hiero.core.projects()
|
||||
for proj in allProjects:
|
||||
try:
|
||||
proj.save()
|
||||
print("Saved Project: {} to: {} ".format(
|
||||
proj.name(), proj.path()
|
||||
))
|
||||
except:
|
||||
print((
|
||||
"Unable to save Project: {} to: {}. "
|
||||
"Check file permissions.").format(
|
||||
proj.name(), proj.path()))
|
||||
|
||||
def eventHandler(self, event):
|
||||
event.menu.addAction(self)
|
||||
|
||||
# For projects with v# in the path name, saves out a new Project with v#+1
|
||||
|
||||
|
||||
class SaveNewProjectVersion(QAction):
|
||||
|
||||
def __init__(self):
|
||||
QAction.__init__(self, "Save New Version...", None)
|
||||
self.triggered.connect(self.saveNewVersion)
|
||||
hiero.core.events.registerInterest(
|
||||
"kShowContextMenu/kBin", self.eventHandler)
|
||||
self.selectedProjects = []
|
||||
|
||||
def saveNewVersion(self):
|
||||
if len(self.selectedProjects) > 0:
|
||||
projects = self.selectedProjects
|
||||
else:
|
||||
projects = [hiero.ui.activeProject()]
|
||||
|
||||
if len(projects) < 1:
|
||||
return
|
||||
|
||||
for proj in projects:
|
||||
oldName = proj.name()
|
||||
path = proj.path()
|
||||
v = None
|
||||
prefix = None
|
||||
try:
|
||||
(prefix, v) = version_get(path, "v")
|
||||
except ValueError as msg:
|
||||
print(msg)
|
||||
|
||||
if (prefix is not None) and (v is not None):
|
||||
v = int(v)
|
||||
newPath = version_set(path, prefix, v, v + 1)
|
||||
try:
|
||||
proj.saveAs(newPath)
|
||||
print("Saved new project version: {} to: {} ".format(
|
||||
oldName, newPath))
|
||||
except:
|
||||
print((
|
||||
"Unable to save Project: {}. Check file permissions."
|
||||
).format(oldName))
|
||||
else:
|
||||
newPath = path.replace(".hrox", "_v01.hrox")
|
||||
answer = nuke.ask(
|
||||
"%s does not contain a version number.\nDo you want to save as %s?" % (proj, newPath))
|
||||
if answer:
|
||||
try:
|
||||
proj.saveAs(newPath)
|
||||
print("Saved new project version: {} to: {} ".format(
|
||||
oldName, newPath))
|
||||
except:
|
||||
print((
|
||||
"Unable to save Project: {}. Check file "
|
||||
"permissions.").format(oldName))
|
||||
|
||||
def eventHandler(self, event):
|
||||
self.selectedProjects = []
|
||||
if hasattr(event.sender, "selection") and event.sender.selection() is not None and len(event.sender.selection()) != 0:
|
||||
selection = event.sender.selection()
|
||||
self.selectedProjects = uniquify(
|
||||
[item.project() for item in selection])
|
||||
event.menu.addAction(self)
|
||||
|
||||
|
||||
# Instantiate the actions
|
||||
saveAllAct = SaveAllProjects()
|
||||
saveNewAct = SaveNewProjectVersion()
|
||||
|
||||
fileMenu = hiero.ui.findMenuAction("foundry.menu.file")
|
||||
importAct = hiero.ui.findMenuAction("foundry.project.importFiles")
|
||||
hiero.ui.insertMenuAction(saveNewAct, fileMenu.menu(),
|
||||
before="Import File(s)...")
|
||||
hiero.ui.insertMenuAction(saveAllAct, fileMenu.menu(),
|
||||
before="Import File(s)...")
|
||||
fileMenu.menu().insertSeparator(importAct)
|
||||
|
|
@ -0,0 +1,9 @@
|
|||
"""Puts the selection project into "hiero.selection"""
|
||||
|
||||
import hiero
|
||||
|
||||
|
||||
def selectionChanged(event):
|
||||
hiero.selection = event.sender.selection()
|
||||
|
||||
hiero.core.events.registerInterest("kSelectionChanged", selectionChanged)
|
||||
|
|
@ -0,0 +1,166 @@
|
|||
# setFrameRate - adds a Right-click menu to the Project Bin view, allowing multiple BinItems (Clips/Sequences) to have their frame rates set.
|
||||
# Install in: ~/.hiero/Python/StartupUI
|
||||
# Requires 1.5v1 or later
|
||||
|
||||
import hiero.core
|
||||
import hiero.ui
|
||||
try:
|
||||
from PySide.QtGui import *
|
||||
from PySide.QtCore import *
|
||||
except:
|
||||
from PySide2.QtGui import *
|
||||
from PySide2.QtCore import *
|
||||
from PySide2.QtWidgets import *
|
||||
|
||||
# Dialog for setting a Custom frame rate.
|
||||
class SetFrameRateDialog(QDialog):
|
||||
|
||||
def __init__(self,itemSelection=None,parent=None):
|
||||
super(SetFrameRateDialog, self).__init__(parent)
|
||||
self.setWindowTitle("Set Custom Frame Rate")
|
||||
self.setSizePolicy( QSizePolicy.Expanding, QSizePolicy.Fixed )
|
||||
layout = QFormLayout()
|
||||
self._itemSelection = itemSelection
|
||||
|
||||
self._frameRateField = QLineEdit()
|
||||
self._frameRateField.setToolTip("Enter custom frame rate here.")
|
||||
self._frameRateField.setValidator(QDoubleValidator(1, 99, 3, self))
|
||||
self._frameRateField.textChanged.connect(self._textChanged)
|
||||
layout.addRow("Enter fps: ",self._frameRateField)
|
||||
|
||||
# Standard buttons for Add/Cancel
|
||||
self._buttonbox = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
|
||||
self._buttonbox.accepted.connect(self.accept)
|
||||
self._buttonbox.rejected.connect(self.reject)
|
||||
self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(False)
|
||||
layout.addRow("",self._buttonbox)
|
||||
self.setLayout(layout)
|
||||
|
||||
def _updateOkButtonState(self):
|
||||
# Cancel is always an option but only enable Ok if there is some text.
|
||||
currentFramerate = float(self.currentFramerateString())
|
||||
enableOk = False
|
||||
enableOk = ((currentFramerate > 0.0) and (currentFramerate <= 250.0))
|
||||
print("enabledOk", enableOk)
|
||||
self._buttonbox.button(QDialogButtonBox.Ok).setEnabled(enableOk)
|
||||
|
||||
def _textChanged(self, newText):
|
||||
self._updateOkButtonState()
|
||||
|
||||
# Returns the current frame rate as a string
|
||||
def currentFramerateString(self):
|
||||
return str(self._frameRateField.text())
|
||||
|
||||
# Presents the Dialog and sets the Frame rate from a selection
|
||||
def showDialogAndSetFrameRateFromSelection(self):
|
||||
|
||||
if self._itemSelection is not None:
|
||||
if self.exec_():
|
||||
# For the Undo loop...
|
||||
|
||||
# Construct an TimeBase object for setting the Frame Rate (fps)
|
||||
fps = hiero.core.TimeBase().fromString(self.currentFramerateString())
|
||||
|
||||
|
||||
# Set the frame rate for the selected BinItmes
|
||||
for item in self._itemSelection:
|
||||
item.setFramerate(fps)
|
||||
return
|
||||
|
||||
# This is just a convenience method for returning QActions with a title, triggered method and icon.
|
||||
def makeAction(title, method, icon = None):
|
||||
action = QAction(title,None)
|
||||
action.setIcon(QIcon(icon))
|
||||
|
||||
# We do this magic, so that the title string from the action is used to set the frame rate!
|
||||
def methodWrapper():
|
||||
method(title)
|
||||
|
||||
action.triggered.connect( methodWrapper )
|
||||
return action
|
||||
|
||||
# Menu which adds a Set Frame Rate Menu to Project Bin view
|
||||
class SetFrameRateMenu:
|
||||
|
||||
def __init__(self):
|
||||
self._frameRateMenu = None
|
||||
self._frameRatesDialog = None
|
||||
|
||||
|
||||
# ant: Could use hiero.core.defaultFrameRates() here but messes up with string matching because we seem to mix decimal points
|
||||
self.frameRates = ["8","12","12.50","15","23.98","24","25","29.97","30","48","50","59.94","60"]
|
||||
hiero.core.events.registerInterest("kShowContextMenu/kBin", self.binViewEventHandler)
|
||||
|
||||
self.menuActions = []
|
||||
|
||||
def createFrameRateMenus(self,selection):
|
||||
selectedClipFPS = [str(bi.activeItem().framerate()) for bi in selection if (isinstance(bi,hiero.core.BinItem) and hasattr(bi,"activeItem"))]
|
||||
selectedClipFPS = hiero.core.util.uniquify(selectedClipFPS)
|
||||
sameFrameRate = len(selectedClipFPS)==1
|
||||
self.menuActions = []
|
||||
for fps in self.frameRates:
|
||||
if fps in selectedClipFPS:
|
||||
if sameFrameRate:
|
||||
self.menuActions+=[makeAction(fps,self.setFrameRateFromMenuSelection, icon="icons:Ticked.png")]
|
||||
else:
|
||||
self.menuActions+=[makeAction(fps,self.setFrameRateFromMenuSelection, icon="icons:remove active.png")]
|
||||
else:
|
||||
self.menuActions+=[makeAction(fps,self.setFrameRateFromMenuSelection, icon=None)]
|
||||
|
||||
# Now add Custom... menu
|
||||
self.menuActions += [makeAction(
|
||||
"Custom...", self.setFrameRateFromMenuSelection, icon=None)
|
||||
]
|
||||
|
||||
frameRateMenu = QMenu("Set Frame Rate")
|
||||
for a in self.menuActions:
|
||||
frameRateMenu.addAction(a)
|
||||
|
||||
return frameRateMenu
|
||||
|
||||
def setFrameRateFromMenuSelection(self, menuSelectionFPS):
|
||||
|
||||
selectedBinItems = [bi.activeItem() for bi in self._selection if (isinstance(bi,hiero.core.BinItem) and hasattr(bi,"activeItem"))]
|
||||
currentProject = selectedBinItems[0].project()
|
||||
|
||||
with currentProject.beginUndo("Set Frame Rate"):
|
||||
if menuSelectionFPS == "Custom...":
|
||||
self._frameRatesDialog = SetFrameRateDialog(itemSelection = selectedBinItems )
|
||||
self._frameRatesDialog.showDialogAndSetFrameRateFromSelection()
|
||||
|
||||
else:
|
||||
for b in selectedBinItems:
|
||||
b.setFramerate(hiero.core.TimeBase().fromString(menuSelectionFPS))
|
||||
|
||||
return
|
||||
|
||||
# This handles events from the Project Bin View
|
||||
def binViewEventHandler(self,event):
|
||||
if not hasattr(event.sender, "selection"):
|
||||
# Something has gone wrong, we should only be here if raised
|
||||
# by the Bin view which gives a selection.
|
||||
return
|
||||
|
||||
# Reset the selection to None...
|
||||
self._selection = None
|
||||
s = event.sender.selection()
|
||||
|
||||
# Return if there's no Selection. We won't add the Menu.
|
||||
if s == None:
|
||||
return
|
||||
# Filter the selection to BinItems
|
||||
self._selection = [item for item in s if isinstance(item, hiero.core.BinItem)]
|
||||
if len(self._selection)==0:
|
||||
return
|
||||
# Creating the menu based on items selected, to highlight which frame rates are contained
|
||||
|
||||
self._frameRateMenu = self.createFrameRateMenus(self._selection)
|
||||
|
||||
# Insert the Set Frame Rate Button before the Set Media Colour Transform Action
|
||||
for action in event.menu.actions():
|
||||
if str(action.text()) == "Set Media Colour Transform":
|
||||
event.menu.insertMenu(action, self._frameRateMenu)
|
||||
break
|
||||
|
||||
# Instantiate the Menu to get it to register itself.
|
||||
SetFrameRateMenu = SetFrameRateMenu()
|
||||
|
|
@ -0,0 +1,845 @@
|
|||
# PimpMySpreadsheet 1.0, Antony Nasce, 23/05/13.
|
||||
# Adds custom spreadsheet columns and right-click menu for setting the Shot Status, and Artist Shot Assignment.
|
||||
# gStatusTags is a global dictionary of key(status)-value(icon) pairs, which can be overridden with custom icons if required
|
||||
# Requires Hiero 1.7v2 or later.
|
||||
# Install Instructions: Copy to ~/.hiero/Python/StartupUI
|
||||
|
||||
import hiero.core
|
||||
import hiero.ui
|
||||
|
||||
try:
|
||||
from PySide.QtGui import *
|
||||
from PySide.QtCore import *
|
||||
except:
|
||||
from PySide2.QtGui import *
|
||||
from PySide2.QtWidgets import *
|
||||
from PySide2.QtCore import *
|
||||
|
||||
# Set to True, if you wat "Set Status" right-click menu, False if not
|
||||
kAddStatusMenu = True
|
||||
|
||||
# Set to True, if you wat "Assign Artist" right-click menu, False if not
|
||||
kAssignArtistMenu = True
|
||||
|
||||
# Global list of Artist Name Dictionaries
|
||||
# Note: Override this to add different names, icons, department, IDs.
|
||||
gArtistList = [{
|
||||
"artistName": "John Smith",
|
||||
"artistIcon": "icons:TagActor.png",
|
||||
"artistDepartment": "3D",
|
||||
"artistID": 0
|
||||
}, {
|
||||
"artistName": "Savlvador Dali",
|
||||
"artistIcon": "icons:TagActor.png",
|
||||
"artistDepartment": "Roto",
|
||||
"artistID": 1
|
||||
}, {
|
||||
"artistName": "Leonardo Da Vinci",
|
||||
"artistIcon": "icons:TagActor.png",
|
||||
"artistDepartment": "Paint",
|
||||
"artistID": 2
|
||||
}, {
|
||||
"artistName": "Claude Monet",
|
||||
"artistIcon": "icons:TagActor.png",
|
||||
"artistDepartment": "Comp",
|
||||
"artistID": 3
|
||||
}, {
|
||||
"artistName": "Pablo Picasso",
|
||||
"artistIcon": "icons:TagActor.png",
|
||||
"artistDepartment": "Animation",
|
||||
"artistID": 4
|
||||
}]
|
||||
|
||||
# Global Dictionary of Status Tags.
|
||||
# Note: This can be overwritten if you want to add a new status cellType or custom icon
|
||||
# Override the gStatusTags dictionary by adding your own "Status":"Icon.png" key-value pairs.
|
||||
# Add new custom keys like so: gStatusTags["For Client"] = "forClient.png"
|
||||
gStatusTags = {
|
||||
"Approved": "icons:status/TagApproved.png",
|
||||
"Unapproved": "icons:status/TagUnapproved.png",
|
||||
"Ready To Start": "icons:status/TagReadyToStart.png",
|
||||
"Blocked": "icons:status/TagBlocked.png",
|
||||
"On Hold": "icons:status/TagOnHold.png",
|
||||
"In Progress": "icons:status/TagInProgress.png",
|
||||
"Awaiting Approval": "icons:status/TagAwaitingApproval.png",
|
||||
"Omitted": "icons:status/TagOmitted.png",
|
||||
"Final": "icons:status/TagFinal.png"
|
||||
}
|
||||
|
||||
|
||||
# The Custom Spreadsheet Columns
|
||||
class CustomSpreadsheetColumns(QObject):
|
||||
"""
|
||||
A class defining custom columns for Hiero's spreadsheet view. This has a similar, but
|
||||
slightly simplified, interface to the QAbstractItemModel and QItemDelegate classes.
|
||||
"""
|
||||
global gStatusTags
|
||||
global gArtistList
|
||||
|
||||
# Ideally, we'd set this list on a Per Item basis, but this is expensive for a large mixed selection
|
||||
standardColourSpaces = [
|
||||
"linear", "sRGB", "rec709", "Cineon", "Gamma1.8", "Gamma2.2",
|
||||
"Panalog", "REDLog", "ViperLog"
|
||||
]
|
||||
arriColourSpaces = [
|
||||
"Video - Rec709", "LogC - Camera Native", "Video - P3", "ACES",
|
||||
"LogC - Film", "LogC - Wide Gamut"
|
||||
]
|
||||
r3dColourSpaces = [
|
||||
"Linear", "Rec709", "REDspace", "REDlog", "PDlog685", "PDlog985",
|
||||
"CustomPDlog", "REDgamma", "SRGB", "REDlogFilm", "REDgamma2",
|
||||
"REDgamma3"
|
||||
]
|
||||
gColourSpaces = standardColourSpaces + arriColourSpaces + r3dColourSpaces
|
||||
|
||||
currentView = hiero.ui.activeView()
|
||||
|
||||
# This is the list of Columns available
|
||||
gCustomColumnList = [
|
||||
{
|
||||
"name": "Tags",
|
||||
"cellType": "readonly"
|
||||
},
|
||||
{
|
||||
"name": "Colourspace",
|
||||
"cellType": "dropdown"
|
||||
},
|
||||
{
|
||||
"name": "Notes",
|
||||
"cellType": "readonly"
|
||||
},
|
||||
{
|
||||
"name": "FileType",
|
||||
"cellType": "readonly"
|
||||
},
|
||||
{
|
||||
"name": "Shot Status",
|
||||
"cellType": "dropdown"
|
||||
},
|
||||
{
|
||||
"name": "Thumbnail",
|
||||
"cellType": "readonly"
|
||||
},
|
||||
{
|
||||
"name": "MediaType",
|
||||
"cellType": "readonly"
|
||||
},
|
||||
{
|
||||
"name": "Width",
|
||||
"cellType": "readonly"
|
||||
},
|
||||
{
|
||||
"name": "Height",
|
||||
"cellType": "readonly"
|
||||
},
|
||||
{
|
||||
"name": "Pixel Aspect",
|
||||
"cellType": "readonly"
|
||||
},
|
||||
{
|
||||
"name": "Artist",
|
||||
"cellType": "dropdown"
|
||||
},
|
||||
{
|
||||
"name": "Department",
|
||||
"cellType": "readonly"
|
||||
},
|
||||
]
|
||||
|
||||
def numColumns(self):
|
||||
"""
|
||||
Return the number of custom columns in the spreadsheet view
|
||||
"""
|
||||
return len(self.gCustomColumnList)
|
||||
|
||||
def columnName(self, column):
|
||||
"""
|
||||
Return the name of a custom column
|
||||
"""
|
||||
return self.gCustomColumnList[column]["name"]
|
||||
|
||||
def getTagsString(self, item):
|
||||
"""
|
||||
Convenience method for returning all the Notes in a Tag as a string
|
||||
"""
|
||||
tagNames = []
|
||||
tags = item.tags()
|
||||
for tag in tags:
|
||||
tagNames += [tag.name()]
|
||||
tagNameString = ','.join(tagNames)
|
||||
return tagNameString
|
||||
|
||||
def getNotes(self, item):
|
||||
"""
|
||||
Convenience method for returning all the Notes in a Tag as a string
|
||||
"""
|
||||
notes = ""
|
||||
tags = item.tags()
|
||||
for tag in tags:
|
||||
note = tag.note()
|
||||
if len(note) > 0:
|
||||
notes += tag.note() + ', '
|
||||
return notes[:-2]
|
||||
|
||||
def getData(self, row, column, item):
|
||||
"""
|
||||
Return the data in a cell
|
||||
"""
|
||||
currentColumn = self.gCustomColumnList[column]
|
||||
if currentColumn["name"] == "Tags":
|
||||
return self.getTagsString(item)
|
||||
|
||||
if currentColumn["name"] == "Colourspace":
|
||||
try:
|
||||
colTransform = item.sourceMediaColourTransform()
|
||||
except:
|
||||
colTransform = "--"
|
||||
return colTransform
|
||||
|
||||
if currentColumn["name"] == "Notes":
|
||||
try:
|
||||
note = self.getNotes(item)
|
||||
except:
|
||||
note = ""
|
||||
return note
|
||||
|
||||
if currentColumn["name"] == "FileType":
|
||||
fileType = "--"
|
||||
M = item.source().mediaSource().metadata()
|
||||
if M.hasKey("foundry.source.type"):
|
||||
fileType = M.value("foundry.source.type")
|
||||
elif M.hasKey("media.input.filereader"):
|
||||
fileType = M.value("media.input.filereader")
|
||||
return fileType
|
||||
|
||||
if currentColumn["name"] == "Shot Status":
|
||||
status = item.status()
|
||||
if not status:
|
||||
status = "--"
|
||||
return str(status)
|
||||
|
||||
if currentColumn["name"] == "MediaType":
|
||||
M = item.mediaType()
|
||||
return str(M).split("MediaType")[-1].replace(".k", "")
|
||||
|
||||
if currentColumn["name"] == "Thumbnail":
|
||||
return str(item.eventNumber())
|
||||
|
||||
if currentColumn["name"] == "Width":
|
||||
return str(item.source().format().width())
|
||||
|
||||
if currentColumn["name"] == "Height":
|
||||
return str(item.source().format().height())
|
||||
|
||||
if currentColumn["name"] == "Pixel Aspect":
|
||||
return str(item.source().format().pixelAspect())
|
||||
|
||||
if currentColumn["name"] == "Artist":
|
||||
if item.artist():
|
||||
name = item.artist()["artistName"]
|
||||
return name
|
||||
else:
|
||||
return "--"
|
||||
|
||||
if currentColumn["name"] == "Department":
|
||||
if item.artist():
|
||||
dep = item.artist()["artistDepartment"]
|
||||
return dep
|
||||
else:
|
||||
return "--"
|
||||
|
||||
return ""
|
||||
|
||||
def setData(self, row, column, item, data):
|
||||
"""
|
||||
Set the data in a cell - unused in this example
|
||||
"""
|
||||
|
||||
return None
|
||||
|
||||
def getTooltip(self, row, column, item):
|
||||
"""
|
||||
Return the tooltip for a cell
|
||||
"""
|
||||
currentColumn = self.gCustomColumnList[column]
|
||||
if currentColumn["name"] == "Tags":
|
||||
return str([item.name() for item in item.tags()])
|
||||
|
||||
if currentColumn["name"] == "Notes":
|
||||
return str(self.getNotes(item))
|
||||
return ""
|
||||
|
||||
def getFont(self, row, column, item):
|
||||
"""
|
||||
Return the tooltip for a cell
|
||||
"""
|
||||
return None
|
||||
|
||||
def getBackground(self, row, column, item):
|
||||
"""
|
||||
Return the background colour for a cell
|
||||
"""
|
||||
if not item.source().mediaSource().isMediaPresent():
|
||||
return QColor(80, 20, 20)
|
||||
return None
|
||||
|
||||
def getForeground(self, row, column, item):
|
||||
"""
|
||||
Return the text colour for a cell
|
||||
"""
|
||||
#if column == 1:
|
||||
# return QColor(255, 64, 64)
|
||||
return None
|
||||
|
||||
def getIcon(self, row, column, item):
|
||||
"""
|
||||
Return the icon for a cell
|
||||
"""
|
||||
currentColumn = self.gCustomColumnList[column]
|
||||
if currentColumn["name"] == "Colourspace":
|
||||
return QIcon("icons:LUT.png")
|
||||
|
||||
if currentColumn["name"] == "Shot Status":
|
||||
status = item.status()
|
||||
if status:
|
||||
return QIcon(gStatusTags[status])
|
||||
|
||||
if currentColumn["name"] == "MediaType":
|
||||
mediaType = item.mediaType()
|
||||
if mediaType == hiero.core.TrackItem.kVideo:
|
||||
return QIcon("icons:VideoOnly.png")
|
||||
elif mediaType == hiero.core.TrackItem.kAudio:
|
||||
return QIcon("icons:AudioOnly.png")
|
||||
|
||||
if currentColumn["name"] == "Artist":
|
||||
try:
|
||||
return QIcon(item.artist()["artistIcon"])
|
||||
except:
|
||||
return None
|
||||
return None
|
||||
|
||||
def getSizeHint(self, row, column, item):
|
||||
"""
|
||||
Return the size hint for a cell
|
||||
"""
|
||||
currentColumnName = self.gCustomColumnList[column]["name"]
|
||||
|
||||
if currentColumnName == "Thumbnail":
|
||||
return QSize(90, 50)
|
||||
|
||||
return QSize(50, 50)
|
||||
|
||||
def paintCell(self, row, column, item, painter, option):
|
||||
"""
|
||||
Paint a custom cell. Return True if the cell was painted, or False to continue
|
||||
with the default cell painting.
|
||||
"""
|
||||
currentColumn = self.gCustomColumnList[column]
|
||||
if currentColumn["name"] == "Tags":
|
||||
if option.state & QStyle.State_Selected:
|
||||
painter.fillRect(option.rect, option.palette.highlight())
|
||||
iconSize = 20
|
||||
r = QRect(option.rect.x(),
|
||||
option.rect.y() + (option.rect.height() - iconSize) / 2,
|
||||
iconSize, iconSize)
|
||||
tags = item.tags()
|
||||
if len(tags) > 0:
|
||||
painter.save()
|
||||
painter.setClipRect(option.rect)
|
||||
for tag in item.tags():
|
||||
M = tag.metadata()
|
||||
if not (M.hasKey("tag.status")
|
||||
or M.hasKey("tag.artistID")):
|
||||
QIcon(tag.icon()).paint(painter, r, Qt.AlignLeft)
|
||||
r.translate(r.width() + 2, 0)
|
||||
painter.restore()
|
||||
return True
|
||||
|
||||
if currentColumn["name"] == "Thumbnail":
|
||||
imageView = None
|
||||
pen = QPen()
|
||||
r = QRect(option.rect.x() + 2, (option.rect.y() +
|
||||
(option.rect.height() - 46) / 2),
|
||||
85, 46)
|
||||
if not item.source().mediaSource().isMediaPresent():
|
||||
imageView = QImage("icons:Offline.png")
|
||||
pen.setColor(QColor(Qt.red))
|
||||
|
||||
if item.mediaType() == hiero.core.TrackItem.MediaType.kAudio:
|
||||
imageView = QImage("icons:AudioOnly.png")
|
||||
#pen.setColor(QColor(Qt.green))
|
||||
painter.fillRect(r, QColor(45, 59, 45))
|
||||
|
||||
if option.state & QStyle.State_Selected:
|
||||
painter.fillRect(option.rect, option.palette.highlight())
|
||||
|
||||
tags = item.tags()
|
||||
painter.save()
|
||||
painter.setClipRect(option.rect)
|
||||
|
||||
if not imageView:
|
||||
try:
|
||||
imageView = item.thumbnail(item.sourceIn())
|
||||
pen.setColor(QColor(20, 20, 20))
|
||||
# If we're here, we probably have a TC error, no thumbnail, so get it from the source Clip...
|
||||
except:
|
||||
pen.setColor(QColor(Qt.red))
|
||||
|
||||
if not imageView:
|
||||
try:
|
||||
imageView = item.source().thumbnail()
|
||||
pen.setColor(QColor(Qt.yellow))
|
||||
except:
|
||||
imageView = QImage("icons:Offline.png")
|
||||
pen.setColor(QColor(Qt.red))
|
||||
|
||||
QIcon(QPixmap.fromImage(imageView)).paint(painter, r,
|
||||
Qt.AlignCenter)
|
||||
painter.setPen(pen)
|
||||
painter.drawRoundedRect(r, 1, 1)
|
||||
painter.restore()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def createEditor(self, row, column, item, view):
|
||||
"""
|
||||
Create an editing widget for a custom cell
|
||||
"""
|
||||
self.currentView = view
|
||||
|
||||
currentColumn = self.gCustomColumnList[column]
|
||||
if currentColumn["cellType"] == "readonly":
|
||||
cle = QLabel()
|
||||
cle.setEnabled(False)
|
||||
cle.setVisible(False)
|
||||
return cle
|
||||
|
||||
if currentColumn["name"] == "Colourspace":
|
||||
cb = QComboBox()
|
||||
for colourspace in self.gColourSpaces:
|
||||
cb.addItem(colourspace)
|
||||
cb.currentIndexChanged.connect(self.colourspaceChanged)
|
||||
return cb
|
||||
|
||||
if currentColumn["name"] == "Shot Status":
|
||||
cb = QComboBox()
|
||||
cb.addItem("")
|
||||
for key in gStatusTags.keys():
|
||||
cb.addItem(QIcon(gStatusTags[key]), key)
|
||||
cb.addItem("--")
|
||||
cb.currentIndexChanged.connect(self.statusChanged)
|
||||
|
||||
return cb
|
||||
|
||||
if currentColumn["name"] == "Artist":
|
||||
cb = QComboBox()
|
||||
cb.addItem("")
|
||||
for artist in gArtistList:
|
||||
cb.addItem(artist["artistName"])
|
||||
cb.addItem("--")
|
||||
cb.currentIndexChanged.connect(self.artistNameChanged)
|
||||
return cb
|
||||
return None
|
||||
|
||||
def setModelData(self, row, column, item, editor):
|
||||
return False
|
||||
|
||||
def dropMimeData(self, row, column, item, data, items):
|
||||
"""
|
||||
Handle a drag and drop operation - adds a Dragged Tag to the shot
|
||||
"""
|
||||
for thing in items:
|
||||
if isinstance(thing, hiero.core.Tag):
|
||||
item.addTag(thing)
|
||||
return None
|
||||
|
||||
def colourspaceChanged(self, index):
|
||||
"""
|
||||
This method is called when Colourspace widget changes index.
|
||||
"""
|
||||
index = self.sender().currentIndex()
|
||||
colourspace = self.gColourSpaces[index]
|
||||
selection = self.currentView.selection()
|
||||
project = selection[0].project()
|
||||
with project.beginUndo("Set Colourspace"):
|
||||
items = [
|
||||
item for item in selection
|
||||
if (item.mediaType() == hiero.core.TrackItem.MediaType.kVideo)
|
||||
]
|
||||
for trackItem in items:
|
||||
trackItem.setSourceMediaColourTransform(colourspace)
|
||||
|
||||
def statusChanged(self, arg):
|
||||
"""
|
||||
This method is called when Shot Status widget changes index.
|
||||
"""
|
||||
view = hiero.ui.activeView()
|
||||
selection = view.selection()
|
||||
status = self.sender().currentText()
|
||||
project = selection[0].project()
|
||||
with project.beginUndo("Set Status"):
|
||||
# A string of "--" characters denotes clear the status
|
||||
if status != "--":
|
||||
for trackItem in selection:
|
||||
trackItem.setStatus(status)
|
||||
else:
|
||||
for trackItem in selection:
|
||||
tTags = trackItem.tags()
|
||||
for tag in tTags:
|
||||
if tag.metadata().hasKey("tag.status"):
|
||||
trackItem.removeTag(tag)
|
||||
break
|
||||
|
||||
def artistNameChanged(self, arg):
|
||||
"""
|
||||
This method is called when Artist widget changes index.
|
||||
"""
|
||||
view = hiero.ui.activeView()
|
||||
selection = view.selection()
|
||||
name = self.sender().currentText()
|
||||
project = selection[0].project()
|
||||
with project.beginUndo("Assign Artist"):
|
||||
# A string of "--" denotes clear the assignee...
|
||||
if name != "--":
|
||||
for trackItem in selection:
|
||||
trackItem.setArtistByName(name)
|
||||
else:
|
||||
for trackItem in selection:
|
||||
tTags = trackItem.tags()
|
||||
for tag in tTags:
|
||||
if tag.metadata().hasKey("tag.artistID"):
|
||||
trackItem.removeTag(tag)
|
||||
break
|
||||
|
||||
|
||||
def _getArtistFromID(self, artistID):
|
||||
""" getArtistFromID -> returns an artist dictionary, by their given ID"""
|
||||
global gArtistList
|
||||
artist = [
|
||||
element for element in gArtistList
|
||||
if element["artistID"] == int(artistID)
|
||||
]
|
||||
if not artist:
|
||||
return None
|
||||
return artist[0]
|
||||
|
||||
|
||||
def _getArtistFromName(self, artistName):
|
||||
""" getArtistFromID -> returns an artist dictionary, by their given ID """
|
||||
global gArtistList
|
||||
artist = [
|
||||
element for element in gArtistList
|
||||
if element["artistName"] == artistName
|
||||
]
|
||||
if not artist:
|
||||
return None
|
||||
return artist[0]
|
||||
|
||||
|
||||
def _artist(self):
|
||||
"""_artist -> Returns the artist dictionary assigned to this shot"""
|
||||
artist = None
|
||||
tags = self.tags()
|
||||
for tag in tags:
|
||||
if tag.metadata().hasKey("tag.artistID"):
|
||||
artistID = tag.metadata().value("tag.artistID")
|
||||
artist = self.getArtistFromID(artistID)
|
||||
return artist
|
||||
|
||||
|
||||
def _updateArtistTag(self, artistDict):
|
||||
# A shot will only have one artist assigned. Check if one exists and set accordingly
|
||||
|
||||
artistTag = None
|
||||
tags = self.tags()
|
||||
for tag in tags:
|
||||
if tag.metadata().hasKey("tag.artistID"):
|
||||
artistTag = tag
|
||||
break
|
||||
|
||||
if not artistTag:
|
||||
artistTag = hiero.core.Tag("Artist")
|
||||
artistTag.setIcon(artistDict["artistIcon"])
|
||||
artistTag.metadata().setValue("tag.artistID",
|
||||
str(artistDict["artistID"]))
|
||||
artistTag.metadata().setValue("tag.artistName",
|
||||
str(artistDict["artistName"]))
|
||||
artistTag.metadata().setValue("tag.artistDepartment",
|
||||
str(artistDict["artistDepartment"]))
|
||||
self.sequence().editFinished()
|
||||
self.addTag(artistTag)
|
||||
self.sequence().editFinished()
|
||||
return
|
||||
|
||||
artistTag.setIcon(artistDict["artistIcon"])
|
||||
artistTag.metadata().setValue("tag.artistID", str(artistDict["artistID"]))
|
||||
artistTag.metadata().setValue("tag.artistName",
|
||||
str(artistDict["artistName"]))
|
||||
artistTag.metadata().setValue("tag.artistDepartment",
|
||||
str(artistDict["artistDepartment"]))
|
||||
self.sequence().editFinished()
|
||||
return
|
||||
|
||||
|
||||
def _setArtistByName(self, artistName):
|
||||
""" setArtistByName(artistName) -> sets the artist tag on a TrackItem by a given artistName string"""
|
||||
global gArtistList
|
||||
|
||||
artist = self.getArtistFromName(artistName)
|
||||
if not artist:
|
||||
print((
|
||||
"Artist name: {} was not found in "
|
||||
"the gArtistList.").format(artistName))
|
||||
return
|
||||
|
||||
# Do the update.
|
||||
self.updateArtistTag(artist)
|
||||
|
||||
|
||||
def _setArtistByID(self, artistID):
|
||||
""" setArtistByID(artistID) -> sets the artist tag on a TrackItem by a given artistID integer"""
|
||||
global gArtistList
|
||||
|
||||
artist = self.getArtistFromID(artistID)
|
||||
if not artist:
|
||||
print("Artist name: {} was not found in the gArtistList.".format(
|
||||
artistID))
|
||||
return
|
||||
|
||||
# Do the update.
|
||||
self.updateArtistTag(artist)
|
||||
|
||||
|
||||
# Inject status getter and setter methods into hiero.core.TrackItem
|
||||
hiero.core.TrackItem.artist = _artist
|
||||
hiero.core.TrackItem.setArtistByName = _setArtistByName
|
||||
hiero.core.TrackItem.setArtistByID = _setArtistByID
|
||||
hiero.core.TrackItem.getArtistFromName = _getArtistFromName
|
||||
hiero.core.TrackItem.getArtistFromID = _getArtistFromID
|
||||
hiero.core.TrackItem.updateArtistTag = _updateArtistTag
|
||||
|
||||
|
||||
def _status(self):
|
||||
"""status -> Returns the Shot status. None if no Status is set."""
|
||||
|
||||
status = None
|
||||
tags = self.tags()
|
||||
for tag in tags:
|
||||
if tag.metadata().hasKey("tag.status"):
|
||||
status = tag.metadata().value("tag.status")
|
||||
return status
|
||||
|
||||
|
||||
def _setStatus(self, status):
|
||||
"""setShotStatus(status) -> Method to set the Status of a Shot.
|
||||
Adds a special kind of status Tag to a TrackItem
|
||||
Example: myTrackItem.setStatus("Final")
|
||||
|
||||
@param status - a string, corresponding to the Status name
|
||||
"""
|
||||
global gStatusTags
|
||||
|
||||
# Get a valid Tag object from the Global list of statuses
|
||||
if status not in gStatusTags.keys():
|
||||
print("Status requested was not a valid Status string.")
|
||||
return
|
||||
|
||||
# A shot should only have one status. Check if one exists and set accordingly
|
||||
statusTag = None
|
||||
tags = self.tags()
|
||||
for tag in tags:
|
||||
if tag.metadata().hasKey("tag.status"):
|
||||
statusTag = tag
|
||||
break
|
||||
|
||||
if not statusTag:
|
||||
statusTag = hiero.core.Tag("Status")
|
||||
statusTag.setIcon(gStatusTags[status])
|
||||
statusTag.metadata().setValue("tag.status", status)
|
||||
self.addTag(statusTag)
|
||||
|
||||
statusTag.setIcon(gStatusTags[status])
|
||||
statusTag.metadata().setValue("tag.status", status)
|
||||
|
||||
self.sequence().editFinished()
|
||||
return
|
||||
|
||||
|
||||
# Inject status getter and setter methods into hiero.core.TrackItem
|
||||
hiero.core.TrackItem.setStatus = _setStatus
|
||||
hiero.core.TrackItem.status = _status
|
||||
|
||||
|
||||
# This is a convenience method for returning QActions with a triggered method based on the title string
|
||||
def titleStringTriggeredAction(title, method, icon=None):
|
||||
action = QAction(title, None)
|
||||
action.setIcon(QIcon(icon))
|
||||
|
||||
# We do this magic, so that the title string from the action is used to set the status
|
||||
def methodWrapper():
|
||||
method(title)
|
||||
|
||||
action.triggered.connect(methodWrapper)
|
||||
return action
|
||||
|
||||
|
||||
# Menu which adds a Set Status Menu to Timeline and Spreadsheet Views
|
||||
class SetStatusMenu(QMenu):
|
||||
def __init__(self):
|
||||
QMenu.__init__(self, "Set Status", None)
|
||||
|
||||
global gStatusTags
|
||||
self.statuses = gStatusTags
|
||||
self._statusActions = self.createStatusMenuActions()
|
||||
|
||||
# Add the Actions to the Menu.
|
||||
for act in self.menuActions:
|
||||
self.addAction(act)
|
||||
|
||||
hiero.core.events.registerInterest("kShowContextMenu/kTimeline",
|
||||
self.eventHandler)
|
||||
hiero.core.events.registerInterest("kShowContextMenu/kSpreadsheet",
|
||||
self.eventHandler)
|
||||
|
||||
def createStatusMenuActions(self):
|
||||
self.menuActions = []
|
||||
for status in self.statuses:
|
||||
self.menuActions += [
|
||||
titleStringTriggeredAction(
|
||||
status,
|
||||
self.setStatusFromMenuSelection,
|
||||
icon=gStatusTags[status])
|
||||
]
|
||||
|
||||
def setStatusFromMenuSelection(self, menuSelectionStatus):
|
||||
selectedShots = [
|
||||
item for item in self._selection
|
||||
if (isinstance(item, hiero.core.TrackItem))
|
||||
]
|
||||
selectedTracks = [
|
||||
item for item in self._selection
|
||||
if (isinstance(item, (hiero.core.VideoTrack,
|
||||
hiero.core.AudioTrack)))
|
||||
]
|
||||
|
||||
# If we have a Track Header Selection, no shots could be selected, so create shotSelection list
|
||||
if len(selectedTracks) >= 1:
|
||||
for track in selectedTracks:
|
||||
selectedShots += [
|
||||
item for item in track.items()
|
||||
if (isinstance(item, hiero.core.TrackItem))
|
||||
]
|
||||
|
||||
# It's possible no shots exist on the Track, in which case nothing is required
|
||||
if len(selectedShots) == 0:
|
||||
return
|
||||
|
||||
currentProject = selectedShots[0].project()
|
||||
|
||||
with currentProject.beginUndo("Set Status"):
|
||||
# Shots selected
|
||||
for shot in selectedShots:
|
||||
shot.setStatus(menuSelectionStatus)
|
||||
|
||||
# This handles events from the Project Bin View
|
||||
def eventHandler(self, event):
|
||||
if not hasattr(event.sender, "selection"):
|
||||
# Something has gone wrong, we should only be here if raised
|
||||
# by the Timeline/Spreadsheet view which gives a selection.
|
||||
return
|
||||
|
||||
# Set the current selection
|
||||
self._selection = event.sender.selection()
|
||||
|
||||
# Return if there's no Selection. We won't add the Menu.
|
||||
if len(self._selection) == 0:
|
||||
return
|
||||
|
||||
event.menu.addMenu(self)
|
||||
|
||||
|
||||
# Menu which adds a Set Status Menu to Timeline and Spreadsheet Views
|
||||
class AssignArtistMenu(QMenu):
|
||||
def __init__(self):
|
||||
QMenu.__init__(self, "Assign Artist", None)
|
||||
|
||||
global gArtistList
|
||||
self.artists = gArtistList
|
||||
self._artistsActions = self.createAssignArtistMenuActions()
|
||||
|
||||
# Add the Actions to the Menu.
|
||||
for act in self.menuActions:
|
||||
self.addAction(act)
|
||||
|
||||
hiero.core.events.registerInterest("kShowContextMenu/kTimeline",
|
||||
self.eventHandler)
|
||||
hiero.core.events.registerInterest("kShowContextMenu/kSpreadsheet",
|
||||
self.eventHandler)
|
||||
|
||||
def createAssignArtistMenuActions(self):
|
||||
self.menuActions = []
|
||||
for artist in self.artists:
|
||||
self.menuActions += [
|
||||
titleStringTriggeredAction(
|
||||
artist["artistName"],
|
||||
self.setArtistFromMenuSelection,
|
||||
icon=artist["artistIcon"])
|
||||
]
|
||||
|
||||
def setArtistFromMenuSelection(self, menuSelectionArtist):
|
||||
selectedShots = [
|
||||
item for item in self._selection
|
||||
if (isinstance(item, hiero.core.TrackItem))
|
||||
]
|
||||
selectedTracks = [
|
||||
item for item in self._selection
|
||||
if (isinstance(item, (hiero.core.VideoTrack,
|
||||
hiero.core.AudioTrack)))
|
||||
]
|
||||
|
||||
# If we have a Track Header Selection, no shots could be selected, so create shotSelection list
|
||||
if len(selectedTracks) >= 1:
|
||||
for track in selectedTracks:
|
||||
selectedShots += [
|
||||
item for item in track.items()
|
||||
if (isinstance(item, hiero.core.TrackItem))
|
||||
]
|
||||
|
||||
# It's possible no shots exist on the Track, in which case nothing is required
|
||||
if len(selectedShots) == 0:
|
||||
return
|
||||
|
||||
currentProject = selectedShots[0].project()
|
||||
|
||||
with currentProject.beginUndo("Assign Artist"):
|
||||
# Shots selected
|
||||
for shot in selectedShots:
|
||||
shot.setArtistByName(menuSelectionArtist)
|
||||
|
||||
# This handles events from the Project Bin View
|
||||
def eventHandler(self, event):
|
||||
if not hasattr(event.sender, "selection"):
|
||||
# Something has gone wrong, we should only be here if raised
|
||||
# by the Timeline/Spreadsheet view which gives a selection.
|
||||
return
|
||||
|
||||
# Set the current selection
|
||||
self._selection = event.sender.selection()
|
||||
|
||||
# Return if there's no Selection. We won't add the Menu.
|
||||
if len(self._selection) == 0:
|
||||
return
|
||||
|
||||
event.menu.addMenu(self)
|
||||
|
||||
|
||||
# Add the "Set Status" context menu to Timeline and Spreadsheet
|
||||
if kAddStatusMenu:
|
||||
setStatusMenu = SetStatusMenu()
|
||||
|
||||
if kAssignArtistMenu:
|
||||
assignArtistMenu = AssignArtistMenu()
|
||||
|
||||
# Register our custom columns
|
||||
hiero.ui.customColumn = CustomSpreadsheetColumns()
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
# Purge Unused Clips - Removes any unused Clips from a Project
|
||||
# Usage: Copy to ~/.hiero/Python/StartupUI
|
||||
# Demonstrates the use of hiero.core.find_items module.
|
||||
# Usage: Right-click on an item in the Bin View > "Purge Unused Clips"
|
||||
# Result: Any Clips not used in a Sequence in the active project will be removed
|
||||
# Requires Hiero 1.5v1 or later.
|
||||
# Version 1.1
|
||||
|
||||
import hiero
|
||||
import hiero.core.find_items
|
||||
try:
|
||||
from PySide.QtGui import *
|
||||
from PySide.QtCore import *
|
||||
except:
|
||||
from PySide2.QtGui import *
|
||||
from PySide2.QtWidgets import *
|
||||
from PySide2.QtCore import *
|
||||
|
||||
|
||||
class PurgeUnusedAction(QAction):
|
||||
def __init__(self):
|
||||
QAction.__init__(self, "Purge Unused Clips", None)
|
||||
self.triggered.connect(self.PurgeUnused)
|
||||
hiero.core.events.registerInterest("kShowContextMenu/kBin",
|
||||
self.eventHandler)
|
||||
self.setIcon(QIcon("icons:TagDelete.png"))
|
||||
|
||||
# Method to return whether a Bin is empty...
|
||||
def binIsEmpty(self, b):
|
||||
numBinItems = 0
|
||||
bItems = b.items()
|
||||
empty = False
|
||||
|
||||
if len(bItems) == 0:
|
||||
empty = True
|
||||
return empty
|
||||
else:
|
||||
for b in bItems:
|
||||
if isinstance(b, hiero.core.BinItem) or isinstance(
|
||||
b, hiero.core.Bin):
|
||||
numBinItems += 1
|
||||
if numBinItems == 0:
|
||||
empty = True
|
||||
|
||||
return empty
|
||||
|
||||
def PurgeUnused(self):
|
||||
|
||||
#Get selected items
|
||||
item = self.selectedItem
|
||||
proj = item.project()
|
||||
|
||||
# Build a list of Projects
|
||||
SEQS = hiero.core.findItems(proj, "Sequences")
|
||||
|
||||
# Build a list of Clips
|
||||
CLIPSTOREMOVE = hiero.core.findItems(proj, "Clips")
|
||||
|
||||
if len(SEQS) == 0:
|
||||
# Present Dialog Asking if User wants to remove Clips
|
||||
msgBox = QMessageBox()
|
||||
msgBox.setText("Purge Unused Clips")
|
||||
msgBox.setInformativeText(
|
||||
"You have no Sequences in this Project. Do you want to remove all Clips (%i) from Project: %s?"
|
||||
% (len(CLIPSTOREMOVE), proj.name()))
|
||||
msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
|
||||
msgBox.setDefaultButton(QMessageBox.Ok)
|
||||
ret = msgBox.exec_()
|
||||
if ret == QMessageBox.Cancel:
|
||||
print("Not purging anything.")
|
||||
elif ret == QMessageBox.Ok:
|
||||
with proj.beginUndo("Purge Unused Clips"):
|
||||
BINS = []
|
||||
for clip in CLIPSTOREMOVE:
|
||||
BI = clip.binItem()
|
||||
B = BI.parentBin()
|
||||
BINS += [B]
|
||||
print("Removing: {}".format(BI))
|
||||
try:
|
||||
B.removeItem(BI)
|
||||
except:
|
||||
print("Unable to remove: {}".format(BI))
|
||||
return
|
||||
|
||||
# For each sequence, iterate through each track Item, see if the Clip is in the CLIPS list.
|
||||
# Remaining items in CLIPS will be removed
|
||||
|
||||
for seq in SEQS:
|
||||
|
||||
#Loop through selected and make folders
|
||||
for track in seq:
|
||||
for trackitem in track:
|
||||
|
||||
if trackitem.source() in CLIPSTOREMOVE:
|
||||
CLIPSTOREMOVE.remove(trackitem.source())
|
||||
|
||||
# Present Dialog Asking if User wants to remove Clips
|
||||
msgBox = QMessageBox()
|
||||
msgBox.setText("Purge Unused Clips")
|
||||
msgBox.setInformativeText("Remove %i unused Clips from Project %s?" %
|
||||
(len(CLIPSTOREMOVE), proj.name()))
|
||||
msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
|
||||
msgBox.setDefaultButton(QMessageBox.Ok)
|
||||
ret = msgBox.exec_()
|
||||
|
||||
if ret == QMessageBox.Cancel:
|
||||
print("Cancel")
|
||||
return
|
||||
elif ret == QMessageBox.Ok:
|
||||
BINS = []
|
||||
with proj.beginUndo("Purge Unused Clips"):
|
||||
# Delete the rest of the Clips
|
||||
for clip in CLIPSTOREMOVE:
|
||||
BI = clip.binItem()
|
||||
B = BI.parentBin()
|
||||
BINS += [B]
|
||||
print("Removing: {}".format(BI))
|
||||
try:
|
||||
B.removeItem(BI)
|
||||
except:
|
||||
print("Unable to remove: {}".format(BI))
|
||||
|
||||
def eventHandler(self, event):
|
||||
if not hasattr(event.sender, "selection"):
|
||||
# Something has gone wrong, we shouldn't only be here if raised
|
||||
# by the Bin view which will give a selection.
|
||||
return
|
||||
|
||||
self.selectedItem = None
|
||||
s = event.sender.selection()
|
||||
|
||||
if len(s) >= 1:
|
||||
self.selectedItem = s[0]
|
||||
title = "Purge Unused Clips"
|
||||
self.setText(title)
|
||||
event.menu.addAction(self)
|
||||
|
||||
return
|
||||
|
||||
|
||||
# Instantiate the action to get it to register itself.
|
||||
PurgeUnusedAction = PurgeUnusedAction()
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
# nukeStyleKeyboardShortcuts, v1, 30/07/2012, Ant Nasce.
|
||||
# A few Nuke-Style File menu shortcuts for those whose muscle memory has set in...
|
||||
# Usage: Copy this file to ~/.hiero/Python/StartupUI/
|
||||
|
||||
import hiero.ui
|
||||
try:
|
||||
from PySide.QtGui import *
|
||||
from PySide.QtCore import *
|
||||
except:
|
||||
from PySide2.QtGui import *
|
||||
from PySide2.QtWidgets import *
|
||||
from PySide2.QtCore import *
|
||||
|
||||
#----------------------------------------------
|
||||
a = hiero.ui.findMenuAction('Import File(s)...')
|
||||
# Note: You probably best to make this 'Ctrl+R' - currently conflicts with "Red" in the Viewer!
|
||||
a.setShortcut(QKeySequence("R"))
|
||||
#----------------------------------------------
|
||||
a = hiero.ui.findMenuAction('Import Folder(s)...')
|
||||
a.setShortcut(QKeySequence('Shift+R'))
|
||||
#----------------------------------------------
|
||||
a = hiero.ui.findMenuAction("Import EDL/XML/AAF...")
|
||||
a.setShortcut(QKeySequence('Ctrl+Shift+O'))
|
||||
#----------------------------------------------
|
||||
a = hiero.ui.findMenuAction("Metadata View")
|
||||
a.setShortcut(QKeySequence("I"))
|
||||
#----------------------------------------------
|
||||
a = hiero.ui.findMenuAction("Edit Settings")
|
||||
a.setShortcut(QKeySequence("S"))
|
||||
#----------------------------------------------
|
||||
a = hiero.ui.findMenuAction("Monitor Output")
|
||||
if a:
|
||||
a.setShortcut(QKeySequence('Ctrl+U'))
|
||||
#----------------------------------------------
|
||||
|
|
@ -0,0 +1,424 @@
|
|||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2018 Daniel Flehner Heen (Storm Studios)
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in
|
||||
# all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
import os
|
||||
import sys
|
||||
import hiero.core
|
||||
import hiero.ui
|
||||
|
||||
try:
|
||||
from urllib import unquote
|
||||
|
||||
except ImportError:
|
||||
from urllib.parse import unquote # lint:ok
|
||||
|
||||
import opentimelineio as otio
|
||||
|
||||
|
||||
def get_transition_type(otio_item, otio_track):
|
||||
_in, _out = otio_track.neighbors_of(otio_item)
|
||||
|
||||
if isinstance(_in, otio.schema.Gap):
|
||||
_in = None
|
||||
|
||||
if isinstance(_out, otio.schema.Gap):
|
||||
_out = None
|
||||
|
||||
if _in and _out:
|
||||
return "dissolve"
|
||||
|
||||
elif _in and not _out:
|
||||
return "fade_out"
|
||||
|
||||
elif not _in and _out:
|
||||
return "fade_in"
|
||||
|
||||
else:
|
||||
return "unknown"
|
||||
|
||||
|
||||
def find_trackitem(name, hiero_track):
|
||||
for item in hiero_track.items():
|
||||
if item.name() == name:
|
||||
return item
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_neighboring_trackitems(otio_item, otio_track, hiero_track):
|
||||
_in, _out = otio_track.neighbors_of(otio_item)
|
||||
trackitem_in = None
|
||||
trackitem_out = None
|
||||
|
||||
if _in:
|
||||
trackitem_in = find_trackitem(_in.name, hiero_track)
|
||||
|
||||
if _out:
|
||||
trackitem_out = find_trackitem(_out.name, hiero_track)
|
||||
|
||||
return trackitem_in, trackitem_out
|
||||
|
||||
|
||||
def apply_transition(otio_track, otio_item, track):
|
||||
# Figure out type of transition
|
||||
transition_type = get_transition_type(otio_item, otio_track)
|
||||
|
||||
# Figure out track kind for getattr below
|
||||
if isinstance(track, hiero.core.VideoTrack):
|
||||
kind = ""
|
||||
|
||||
else:
|
||||
kind = "Audio"
|
||||
|
||||
try:
|
||||
# Gather TrackItems involved in transition
|
||||
item_in, item_out = get_neighboring_trackitems(
|
||||
otio_item,
|
||||
otio_track,
|
||||
track
|
||||
)
|
||||
|
||||
# Create transition object
|
||||
if transition_type == "dissolve":
|
||||
transition_func = getattr(
|
||||
hiero.core.Transition,
|
||||
"create{kind}DissolveTransition".format(kind=kind)
|
||||
)
|
||||
|
||||
transition = transition_func(
|
||||
item_in,
|
||||
item_out,
|
||||
otio_item.in_offset.value,
|
||||
otio_item.out_offset.value,
|
||||
)
|
||||
|
||||
elif transition_type == "fade_in":
|
||||
transition_func = getattr(
|
||||
hiero.core.Transition,
|
||||
'create{kind}FadeInTransition'.format(kind=kind)
|
||||
)
|
||||
transition = transition_func(item_out, otio_item.out_offset.value)
|
||||
|
||||
elif transition_type == "fade_out":
|
||||
transition_func = getattr(
|
||||
hiero.core.Transition,
|
||||
"create{kind}FadeOutTransition".format(kind=kind)
|
||||
)
|
||||
transition = transition_func(item_in, otio_item.in_offset.value)
|
||||
|
||||
else:
|
||||
# Unknown transition
|
||||
return
|
||||
|
||||
# Apply transition to track
|
||||
track.addTransition(transition)
|
||||
|
||||
except Exception as e:
|
||||
sys.stderr.write(
|
||||
'Unable to apply transition "{t}": "{e}"\n'.format(
|
||||
t=otio_item, e=e
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def prep_url(url_in):
|
||||
url = unquote(url_in)
|
||||
|
||||
if url.startswith("file://localhost/"):
|
||||
return url.replace("file://localhost/", "")
|
||||
|
||||
if url.startswith(os.sep):
|
||||
url = url[1:]
|
||||
|
||||
return url
|
||||
|
||||
|
||||
def create_offline_mediasource(otio_clip, path=None):
|
||||
hiero_rate = hiero.core.TimeBase(otio_clip.source_range.start_time.rate)
|
||||
|
||||
if isinstance(otio_clip.media_reference, otio.schema.ExternalReference):
|
||||
source_range = otio_clip.available_range()
|
||||
|
||||
else:
|
||||
source_range = otio_clip.source_range
|
||||
|
||||
if path is None:
|
||||
path = otio_clip.name
|
||||
|
||||
media = hiero.core.MediaSource.createOfflineVideoMediaSource(
|
||||
prep_url(path),
|
||||
source_range.start_time.value,
|
||||
source_range.duration.value,
|
||||
hiero_rate,
|
||||
source_range.start_time.value,
|
||||
)
|
||||
|
||||
return media
|
||||
|
||||
|
||||
def load_otio(otio_file):
|
||||
otio_timeline = otio.adapters.read_from_file(otio_file)
|
||||
build_sequence(otio_timeline)
|
||||
|
||||
|
||||
marker_color_map = {
|
||||
"PINK": "Magenta",
|
||||
"RED": "Red",
|
||||
"ORANGE": "Yellow",
|
||||
"YELLOW": "Yellow",
|
||||
"GREEN": "Green",
|
||||
"CYAN": "Cyan",
|
||||
"BLUE": "Blue",
|
||||
"PURPLE": "Magenta",
|
||||
"MAGENTA": "Magenta",
|
||||
"BLACK": "Blue",
|
||||
"WHITE": "Green",
|
||||
"MINT": "Cyan",
|
||||
}
|
||||
|
||||
|
||||
def get_tag(tagname, tagsbin):
|
||||
for tag in tagsbin.items():
|
||||
if tag.name() == tagname:
|
||||
return tag
|
||||
|
||||
if isinstance(tag, hiero.core.Bin):
|
||||
tag = get_tag(tagname, tag)
|
||||
|
||||
if tag is not None:
|
||||
return tag
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def add_metadata(metadata, hiero_item):
|
||||
for key, value in metadata.items():
|
||||
if isinstance(value, dict):
|
||||
add_metadata(value, hiero_item)
|
||||
continue
|
||||
|
||||
if value is not None:
|
||||
if not key.startswith("tag."):
|
||||
key = "tag." + key
|
||||
|
||||
hiero_item.metadata().setValue(key, str(value))
|
||||
|
||||
|
||||
def add_markers(otio_item, hiero_item, tagsbin):
|
||||
if isinstance(otio_item, (otio.schema.Stack, otio.schema.Clip)):
|
||||
markers = otio_item.markers
|
||||
|
||||
elif isinstance(otio_item, otio.schema.Timeline):
|
||||
markers = otio_item.tracks.markers
|
||||
|
||||
else:
|
||||
markers = []
|
||||
|
||||
for marker in markers:
|
||||
marker_color = marker.color
|
||||
|
||||
_tag = get_tag(marker.name, tagsbin)
|
||||
if _tag is None:
|
||||
_tag = get_tag(marker_color_map[marker_color], tagsbin)
|
||||
|
||||
if _tag is None:
|
||||
_tag = hiero.core.Tag(marker_color_map[marker.color])
|
||||
|
||||
tag = hiero_item.addTag(_tag)
|
||||
tag.setName(marker.name or marker_color_map[marker_color])
|
||||
|
||||
# Add metadata
|
||||
add_metadata(marker.metadata, tag)
|
||||
|
||||
|
||||
def create_track(otio_track, tracknum, track_kind):
|
||||
# Add track kind when dealing with nested stacks
|
||||
if isinstance(otio_track, otio.schema.Stack):
|
||||
otio_track.kind = track_kind
|
||||
|
||||
# Create a Track
|
||||
if otio_track.kind == otio.schema.TrackKind.Video:
|
||||
track = hiero.core.VideoTrack(
|
||||
otio_track.name or "Video{n}".format(n=tracknum)
|
||||
)
|
||||
|
||||
else:
|
||||
track = hiero.core.AudioTrack(
|
||||
otio_track.name or "Audio{n}".format(n=tracknum)
|
||||
)
|
||||
|
||||
return track
|
||||
|
||||
|
||||
def create_clip(otio_clip):
|
||||
# Create MediaSource
|
||||
otio_media = otio_clip.media_reference
|
||||
if isinstance(otio_media, otio.schema.ExternalReference):
|
||||
url = prep_url(otio_media.target_url)
|
||||
media = hiero.core.MediaSource(url)
|
||||
if media.isOffline():
|
||||
media = create_offline_mediasource(otio_clip, url)
|
||||
|
||||
else:
|
||||
media = create_offline_mediasource(otio_clip)
|
||||
|
||||
# Create Clip
|
||||
clip = hiero.core.Clip(media)
|
||||
|
||||
return clip
|
||||
|
||||
|
||||
def create_trackitem(playhead, track, otio_clip, clip, tagsbin):
|
||||
source_range = otio_clip.source_range
|
||||
|
||||
trackitem = track.createTrackItem(otio_clip.name)
|
||||
trackitem.setPlaybackSpeed(source_range.start_time.rate)
|
||||
trackitem.setSource(clip)
|
||||
|
||||
# Check for speed effects and adjust playback speed accordingly
|
||||
for effect in otio_clip.effects:
|
||||
if isinstance(effect, otio.schema.LinearTimeWarp):
|
||||
trackitem.setPlaybackSpeed(
|
||||
trackitem.playbackSpeed() * effect.time_scalar
|
||||
)
|
||||
|
||||
# If reverse playback speed swap source in and out
|
||||
if trackitem.playbackSpeed() < 0:
|
||||
source_out = source_range.start_time.value
|
||||
source_in = (
|
||||
source_range.start_time.value + source_range.duration.value
|
||||
) - 1
|
||||
timeline_in = playhead + source_out
|
||||
timeline_out = (timeline_in + source_range.duration.value) - 1
|
||||
else:
|
||||
# Normal playback speed
|
||||
source_in = source_range.start_time.value
|
||||
source_out = (
|
||||
source_range.start_time.value + source_range.duration.value
|
||||
) - 1
|
||||
timeline_in = playhead
|
||||
timeline_out = (timeline_in + source_range.duration.value) - 1
|
||||
|
||||
# Set source and timeline in/out points
|
||||
trackitem.setSourceIn(source_in)
|
||||
trackitem.setSourceOut(source_out)
|
||||
trackitem.setTimelineIn(timeline_in)
|
||||
trackitem.setTimelineOut(timeline_out)
|
||||
|
||||
# Add markers
|
||||
add_markers(otio_clip, trackitem, tagsbin)
|
||||
|
||||
return trackitem
|
||||
|
||||
|
||||
def build_sequence(
|
||||
otio_timeline, project=None, sequence=None, track_kind=None
|
||||
):
|
||||
|
||||
if project is None:
|
||||
if sequence:
|
||||
project = sequence.project()
|
||||
|
||||
else:
|
||||
# Per version 12.1v2 there is no way of getting active project
|
||||
project = hiero.core.projects(hiero.core.Project.kUserProjects)[-1]
|
||||
|
||||
projectbin = project.clipsBin()
|
||||
|
||||
if not sequence:
|
||||
# Create a Sequence
|
||||
sequence = hiero.core.Sequence(otio_timeline.name or "OTIOSequence")
|
||||
|
||||
# Set sequence settings from otio timeline if available
|
||||
if hasattr(otio_timeline, "global_start_time"):
|
||||
if otio_timeline.global_start_time:
|
||||
start_time = otio_timeline.global_start_time
|
||||
sequence.setFramerate(start_time.rate)
|
||||
sequence.setTimecodeStart(start_time.value)
|
||||
|
||||
# Create a Bin to hold clips
|
||||
projectbin.addItem(hiero.core.BinItem(sequence))
|
||||
|
||||
sequencebin = hiero.core.Bin(sequence.name())
|
||||
projectbin.addItem(sequencebin)
|
||||
|
||||
else:
|
||||
sequencebin = projectbin
|
||||
|
||||
# Get tagsBin
|
||||
tagsbin = hiero.core.project("Tag Presets").tagsBin()
|
||||
|
||||
# Add timeline markers
|
||||
add_markers(otio_timeline, sequence, tagsbin)
|
||||
|
||||
if isinstance(otio_timeline, otio.schema.Timeline):
|
||||
tracks = otio_timeline.tracks
|
||||
|
||||
else:
|
||||
tracks = [otio_timeline]
|
||||
|
||||
for tracknum, otio_track in enumerate(tracks):
|
||||
playhead = 0
|
||||
_transitions = []
|
||||
|
||||
# Add track to sequence
|
||||
track = create_track(otio_track, tracknum, track_kind)
|
||||
sequence.addTrack(track)
|
||||
|
||||
# iterate over items in track
|
||||
for itemnum, otio_clip in enumerate(otio_track):
|
||||
if isinstance(otio_clip, otio.schema.Stack):
|
||||
bar = hiero.ui.mainWindow().statusBar()
|
||||
bar.showMessage(
|
||||
"Nested sequences are created separately.", timeout=3000
|
||||
)
|
||||
build_sequence(otio_clip, project, otio_track.kind)
|
||||
|
||||
elif isinstance(otio_clip, otio.schema.Clip):
|
||||
# Create a Clip
|
||||
clip = create_clip(otio_clip)
|
||||
|
||||
# Add Clip to a Bin
|
||||
sequencebin.addItem(hiero.core.BinItem(clip))
|
||||
|
||||
# Create TrackItem
|
||||
trackitem = create_trackitem(
|
||||
playhead, track, otio_clip, clip, tagsbin
|
||||
)
|
||||
|
||||
# Add trackitem to track
|
||||
track.addTrackItem(trackitem)
|
||||
|
||||
# Update playhead
|
||||
playhead = trackitem.timelineOut() + 1
|
||||
|
||||
elif isinstance(otio_clip, otio.schema.Transition):
|
||||
# Store transitions for when all clips in the track are created
|
||||
_transitions.append((otio_track, otio_clip))
|
||||
|
||||
elif isinstance(otio_clip, otio.schema.Gap):
|
||||
# Hiero has no fillers, slugs or blanks at the moment
|
||||
playhead += otio_clip.source_range.duration.value
|
||||
|
||||
# Apply transitions we stored earlier now that all clips are present
|
||||
for otio_track, otio_item in _transitions:
|
||||
apply_transition(otio_track, otio_item, track)
|
||||
|
|
@ -0,0 +1,141 @@
|
|||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__author__ = "Daniel Flehner Heen"
|
||||
__credits__ = ["Jakub Jezek", "Daniel Flehner Heen"]
|
||||
|
||||
import hiero.ui
|
||||
import hiero.core
|
||||
|
||||
import PySide2.QtWidgets as qw
|
||||
|
||||
from ayon_core.hosts.hiero.api.otio.hiero_import import load_otio
|
||||
|
||||
|
||||
class OTIOProjectSelect(qw.QDialog):
|
||||
|
||||
def __init__(self, projects, *args, **kwargs):
|
||||
super(OTIOProjectSelect, self).__init__(*args, **kwargs)
|
||||
self.setWindowTitle("Please select active project")
|
||||
self.layout = qw.QVBoxLayout()
|
||||
|
||||
self.label = qw.QLabel(
|
||||
"Unable to determine which project to import sequence to.\n"
|
||||
"Please select one."
|
||||
)
|
||||
self.layout.addWidget(self.label)
|
||||
|
||||
self.projects = qw.QComboBox()
|
||||
self.projects.addItems(map(lambda p: p.name(), projects))
|
||||
self.layout.addWidget(self.projects)
|
||||
|
||||
QBtn = qw.QDialogButtonBox.Ok | qw.QDialogButtonBox.Cancel
|
||||
self.buttonBox = qw.QDialogButtonBox(QBtn)
|
||||
self.buttonBox.accepted.connect(self.accept)
|
||||
self.buttonBox.rejected.connect(self.reject)
|
||||
|
||||
self.layout.addWidget(self.buttonBox)
|
||||
self.setLayout(self.layout)
|
||||
|
||||
|
||||
def get_sequence(view):
|
||||
sequence = None
|
||||
if isinstance(view, hiero.ui.TimelineEditor):
|
||||
sequence = view.sequence()
|
||||
|
||||
elif isinstance(view, hiero.ui.BinView):
|
||||
for item in view.selection():
|
||||
if not hasattr(item, "acitveItem"):
|
||||
continue
|
||||
|
||||
if isinstance(item.activeItem(), hiero.core.Sequence):
|
||||
sequence = item.activeItem()
|
||||
|
||||
return sequence
|
||||
|
||||
|
||||
def OTIO_menu_action(event):
|
||||
# Menu actions
|
||||
otio_import_action = hiero.ui.createMenuAction(
|
||||
"Import OTIO...",
|
||||
open_otio_file,
|
||||
icon=None
|
||||
)
|
||||
|
||||
otio_add_track_action = hiero.ui.createMenuAction(
|
||||
"New Track(s) from OTIO...",
|
||||
open_otio_file,
|
||||
icon=None
|
||||
)
|
||||
otio_add_track_action.setEnabled(False)
|
||||
|
||||
hiero.ui.registerAction(otio_import_action)
|
||||
hiero.ui.registerAction(otio_add_track_action)
|
||||
|
||||
view = hiero.ui.currentContextMenuView()
|
||||
|
||||
if view:
|
||||
sequence = get_sequence(view)
|
||||
if sequence:
|
||||
otio_add_track_action.setEnabled(True)
|
||||
|
||||
for action in event.menu.actions():
|
||||
if action.text() == "Import":
|
||||
action.menu().addAction(otio_import_action)
|
||||
action.menu().addAction(otio_add_track_action)
|
||||
|
||||
elif action.text() == "New Track":
|
||||
action.menu().addAction(otio_add_track_action)
|
||||
|
||||
|
||||
def open_otio_file():
|
||||
files = hiero.ui.openFileBrowser(
|
||||
caption="Please select an OTIO file of choice",
|
||||
pattern="*.otio",
|
||||
requiredExtension=".otio"
|
||||
)
|
||||
|
||||
selection = None
|
||||
sequence = None
|
||||
|
||||
view = hiero.ui.currentContextMenuView()
|
||||
if view:
|
||||
sequence = get_sequence(view)
|
||||
selection = view.selection()
|
||||
|
||||
if sequence:
|
||||
project = sequence.project()
|
||||
|
||||
elif selection:
|
||||
project = selection[0].project()
|
||||
|
||||
elif len(hiero.core.projects()) > 1:
|
||||
dialog = OTIOProjectSelect(hiero.core.projects())
|
||||
if dialog.exec_():
|
||||
project = hiero.core.projects()[dialog.projects.currentIndex()]
|
||||
|
||||
else:
|
||||
bar = hiero.ui.mainWindow().statusBar()
|
||||
bar.showMessage(
|
||||
"OTIO Import aborted by user",
|
||||
timeout=3000
|
||||
)
|
||||
return
|
||||
|
||||
else:
|
||||
project = hiero.core.projects()[-1]
|
||||
|
||||
for otio_file in files:
|
||||
load_otio(otio_file, project, sequence)
|
||||
|
||||
|
||||
# HieroPlayer is quite limited and can't create transitions etc.
|
||||
if not hiero.core.isHieroPlayer():
|
||||
hiero.core.events.registerInterest(
|
||||
"kShowContextMenu/kBin",
|
||||
OTIO_menu_action
|
||||
)
|
||||
hiero.core.events.registerInterest(
|
||||
"kShowContextMenu/kTimeline",
|
||||
OTIO_menu_action
|
||||
)
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
import hiero.core
|
||||
import hiero.ui
|
||||
try:
|
||||
from PySide.QtGui import *
|
||||
from PySide.QtCore import *
|
||||
except:
|
||||
from PySide2.QtGui import *
|
||||
from PySide2.QtWidgets import *
|
||||
from PySide2.QtCore import *
|
||||
|
||||
|
||||
def setPosterFrame(posterFrame=.5):
|
||||
"""
|
||||
Update the poster frame of the given clipItmes
|
||||
posterFrame = .5 uses the centre frame, a value of 0 uses the first frame, a value of 1 uses the last frame
|
||||
"""
|
||||
view = hiero.ui.activeView()
|
||||
|
||||
selectedBinItems = view.selection()
|
||||
selectedClipItems = [(item.activeItem()
|
||||
if hasattr(item, "activeItem") else item)
|
||||
for item in selectedBinItems]
|
||||
|
||||
for clip in selectedClipItems:
|
||||
centreFrame = int(clip.duration() * posterFrame)
|
||||
clip.setPosterFrame(centreFrame)
|
||||
|
||||
|
||||
class SetPosterFrameAction(QAction):
|
||||
def __init__(self):
|
||||
QAction.__init__(self, "Set Poster Frame (centre)", None)
|
||||
self._selection = None
|
||||
|
||||
self.triggered.connect(lambda: setPosterFrame(.5))
|
||||
hiero.core.events.registerInterest("kShowContextMenu/kBin",
|
||||
self.eventHandler)
|
||||
|
||||
def eventHandler(self, event):
|
||||
view = event.sender
|
||||
# Add the Menu to the right-click menu
|
||||
event.menu.addAction(self)
|
||||
|
||||
|
||||
# The act of initialising the action adds it to the right-click menu...
|
||||
SetPosterFrameAction()
|
||||
|
|
@ -0,0 +1,198 @@
|
|||
<root presetname="pipeline" tasktype="hiero.exporters.FnShotProcessor.ShotProcessor">
|
||||
<startFrameIndex valuetype="int">991</startFrameIndex>
|
||||
<exportRoot valuetype="str">//10.11.0.184/171001_ftrack/tgbvfx/editorial/hiero/workspace/</exportRoot>
|
||||
<versionIndex valuetype="int">1</versionIndex>
|
||||
<cutUseHandles valuetype="bool">True</cutUseHandles>
|
||||
<versionPadding valuetype="int">3</versionPadding>
|
||||
<exportTemplate valuetype="list">
|
||||
<SequenceItem valuetype="tuple">
|
||||
<SequenceItem valuetype="str">{shot}/editorial_raw.%04d.{fileext}</SequenceItem>
|
||||
<SequenceItem valuetype="hiero.exporters.FnSymLinkExporter.SymLinkPreset">
|
||||
<root presetname="hiero.exporters.FnSymLinkExporter.SymLinkExporter" tasktype="hiero.exporters.FnSymLinkExporter.SymLinkExporter">
|
||||
<colourspace valuetype="str">default</colourspace>
|
||||
<file_type valuetype="unicode">exr</file_type>
|
||||
<readAllLinesForExport valuetype="bool">False</readAllLinesForExport>
|
||||
<channels valuetype="str">all</channels>
|
||||
<includeAudio valuetype="bool">False</includeAudio>
|
||||
<burninDataEnabled valuetype="bool">False</burninDataEnabled>
|
||||
<useSingleSocket valuetype="bool">False</useSingleSocket>
|
||||
<additionalNodesEnabled valuetype="bool">False</additionalNodesEnabled>
|
||||
<deleteAudio valuetype="bool">True</deleteAudio>
|
||||
<additionalNodesData valuetype="list" />
|
||||
<dpx valuetype="dict">
|
||||
<datatype valuetype="str">8 bit</datatype>
|
||||
<transfer valuetype="str">(auto detect)</transfer>
|
||||
<bigEndian valuetype="bool">True</bigEndian>
|
||||
<fill valuetype="bool">False</fill>
|
||||
</dpx>
|
||||
<includeEffects valuetype="bool">False</includeEffects>
|
||||
<burninData valuetype="dict">
|
||||
<burnIn_bottomRight valuetype="NoneType">None</burnIn_bottomRight>
|
||||
<burnIn_topLeft valuetype="NoneType">None</burnIn_topLeft>
|
||||
<burnIn_topMiddle valuetype="NoneType">None</burnIn_topMiddle>
|
||||
<burnIn_padding valuetype="NoneType">None</burnIn_padding>
|
||||
<burnIn_topRight valuetype="NoneType">None</burnIn_topRight>
|
||||
<burnIn_textSize valuetype="NoneType">None</burnIn_textSize>
|
||||
<burnIn_bottomLeft valuetype="NoneType">None</burnIn_bottomLeft>
|
||||
<burnIn_bottomMiddle valuetype="NoneType">None</burnIn_bottomMiddle>
|
||||
<burnIn_font valuetype="NoneType">None</burnIn_font>
|
||||
</burninData>
|
||||
<exr valuetype="dict">
|
||||
<compression valuetype="str">Zip (16 scanline)</compression>
|
||||
<datatype valuetype="str">32 bit float</datatype>
|
||||
<noprefix valuetype="bool">False</noprefix>
|
||||
<write_full_layer_names valuetype="bool">False</write_full_layer_names>
|
||||
<standard_layer_name_format valuetype="bool">False</standard_layer_name_format>
|
||||
<interleave valuetype="str">channels, layers and views</interleave>
|
||||
<dw_compression_level valuetype="float">45.0</dw_compression_level>
|
||||
<truncateChannelNames valuetype="bool">False</truncateChannelNames>
|
||||
<metadata valuetype="str">all metadata</metadata>
|
||||
</exr>
|
||||
<writeNodeName valuetype="str">Write_{ext}</writeNodeName>
|
||||
<reformat valuetype="dict">
|
||||
<filter valuetype="str">Cubic</filter>
|
||||
<to_type valuetype="str">None</to_type>
|
||||
<scale valuetype="float">1.0</scale>
|
||||
<center valuetype="bool">True</center>
|
||||
<resize valuetype="str">width</resize>
|
||||
</reformat>
|
||||
<keepNukeScript valuetype="bool">False</keepNukeScript>
|
||||
<method valuetype="str">Blend</method>
|
||||
</root>
|
||||
</SequenceItem>
|
||||
</SequenceItem>
|
||||
<SequenceItem valuetype="tuple">
|
||||
<SequenceItem valuetype="str">{shot}/editorial.%04d.{ext}</SequenceItem>
|
||||
<SequenceItem valuetype="hiero.exporters.FnTranscodeExporter.TranscodePreset">
|
||||
<root presetname="hiero.exporters.FnTranscodeExporter.TranscodeExporter" tasktype="hiero.exporters.FnTranscodeExporter.TranscodeExporter">
|
||||
<colourspace valuetype="str">default</colourspace>
|
||||
<file_type valuetype="unicode">exr</file_type>
|
||||
<readAllLinesForExport valuetype="bool">False</readAllLinesForExport>
|
||||
<channels valuetype="str">all</channels>
|
||||
<includeAudio valuetype="bool">False</includeAudio>
|
||||
<burninDataEnabled valuetype="bool">False</burninDataEnabled>
|
||||
<useSingleSocket valuetype="bool">False</useSingleSocket>
|
||||
<additionalNodesEnabled valuetype="bool">False</additionalNodesEnabled>
|
||||
<deleteAudio valuetype="bool">True</deleteAudio>
|
||||
<additionalNodesData valuetype="list" />
|
||||
<dpx valuetype="dict">
|
||||
<datatype valuetype="str">8 bit</datatype>
|
||||
<transfer valuetype="str">(auto detect)</transfer>
|
||||
<bigEndian valuetype="bool">True</bigEndian>
|
||||
<fill valuetype="bool">False</fill>
|
||||
</dpx>
|
||||
<includeEffects valuetype="bool">True</includeEffects>
|
||||
<burninData valuetype="dict">
|
||||
<burnIn_bottomRight valuetype="NoneType">None</burnIn_bottomRight>
|
||||
<burnIn_topLeft valuetype="NoneType">None</burnIn_topLeft>
|
||||
<burnIn_topMiddle valuetype="NoneType">None</burnIn_topMiddle>
|
||||
<burnIn_padding valuetype="NoneType">None</burnIn_padding>
|
||||
<burnIn_topRight valuetype="NoneType">None</burnIn_topRight>
|
||||
<burnIn_textSize valuetype="NoneType">None</burnIn_textSize>
|
||||
<burnIn_bottomLeft valuetype="NoneType">None</burnIn_bottomLeft>
|
||||
<burnIn_bottomMiddle valuetype="NoneType">None</burnIn_bottomMiddle>
|
||||
<burnIn_font valuetype="NoneType">None</burnIn_font>
|
||||
</burninData>
|
||||
<exr valuetype="dict">
|
||||
<compression valuetype="str">Zip (16 scanline)</compression>
|
||||
<datatype valuetype="str">16 bit half</datatype>
|
||||
<noprefix valuetype="bool">False</noprefix>
|
||||
<write_full_layer_names valuetype="bool">False</write_full_layer_names>
|
||||
<standard_layer_name_format valuetype="bool">False</standard_layer_name_format>
|
||||
<interleave valuetype="str">channels, layers and views</interleave>
|
||||
<dw_compression_level valuetype="float">45.0</dw_compression_level>
|
||||
<truncateChannelNames valuetype="bool">False</truncateChannelNames>
|
||||
<metadata valuetype="str">all metadata</metadata>
|
||||
</exr>
|
||||
<writeNodeName valuetype="str">Write_{ext}</writeNodeName>
|
||||
<reformat valuetype="dict">
|
||||
<filter valuetype="str">Cubic</filter>
|
||||
<to_type valuetype="str">To Sequence Resolution</to_type>
|
||||
<scale valuetype="float">1.0</scale>
|
||||
<center valuetype="bool">True</center>
|
||||
<resize valuetype="str">width</resize>
|
||||
</reformat>
|
||||
<keepNukeScript valuetype="bool">False</keepNukeScript>
|
||||
<method valuetype="str">Blend</method>
|
||||
</root>
|
||||
</SequenceItem>
|
||||
</SequenceItem>
|
||||
<SequenceItem valuetype="tuple">
|
||||
<SequenceItem valuetype="str">{shot}/editorial.nk</SequenceItem>
|
||||
<SequenceItem valuetype="hiero.exporters.FnNukeShotExporter.NukeShotPreset">
|
||||
<root presetname="hiero.exporters.FnNukeShotExporter.NukeShotExporter" tasktype="hiero.exporters.FnNukeShotExporter.NukeShotExporter">
|
||||
<postProcessScript valuetype="bool">True</postProcessScript>
|
||||
<colourspace valuetype="str">default</colourspace>
|
||||
<file_type valuetype="unicode">mov</file_type>
|
||||
<annotationsPreCompPaths valuetype="list" />
|
||||
<channels valuetype="str">rgb</channels>
|
||||
<includeAudio valuetype="bool">False</includeAudio>
|
||||
<readPaths valuetype="list" />
|
||||
<connectTracks valuetype="bool">False</connectTracks>
|
||||
<useSingleSocket valuetype="bool">False</useSingleSocket>
|
||||
<collateSequence valuetype="bool">False</collateSequence>
|
||||
<additionalNodesData valuetype="list" />
|
||||
<collateShotNames valuetype="bool">True</collateShotNames>
|
||||
<includeEffects valuetype="bool">True</includeEffects>
|
||||
<writePaths valuetype="list">
|
||||
<SequenceItem valuetype="str">{shot}/editorial_raw.%04d.{fileext}</SequenceItem>
|
||||
</writePaths>
|
||||
<reformat valuetype="dict">
|
||||
<filter valuetype="str">Cubic</filter>
|
||||
<to_type valuetype="str">None</to_type>
|
||||
<scale valuetype="float">1.0</scale>
|
||||
<center valuetype="bool">True</center>
|
||||
<resize valuetype="str">width</resize>
|
||||
</reformat>
|
||||
<keepNukeScript valuetype="bool">False</keepNukeScript>
|
||||
<method valuetype="str">Blend</method>
|
||||
<includeAnnotations valuetype="bool">False</includeAnnotations>
|
||||
<enable valuetype="bool">True</enable>
|
||||
<showAnnotations valuetype="bool">True</showAnnotations>
|
||||
<mov valuetype="dict">
|
||||
<b_frames valuetype="int">0</b_frames>
|
||||
<bitrate_tolerance valuetype="int">40000000</bitrate_tolerance>
|
||||
<gop_size valuetype="int">12</gop_size>
|
||||
<quality_max valuetype="int">31</quality_max>
|
||||
<quality_min valuetype="int">2</quality_min>
|
||||
<codec valuetype="str">avc1	H.264</codec>
|
||||
<ycbcr_matrix_type valuetype="str">Auto</ycbcr_matrix_type>
|
||||
<encoder valuetype="str">mov32</encoder>
|
||||
<bitrate valuetype="int">20000</bitrate>
|
||||
</mov>
|
||||
<readAllLinesForExport valuetype="bool">False</readAllLinesForExport>
|
||||
<deleteAudio valuetype="bool">True</deleteAudio>
|
||||
<collateCustomStart valuetype="bool">True</collateCustomStart>
|
||||
<burninDataEnabled valuetype="bool">False</burninDataEnabled>
|
||||
<additionalNodesEnabled valuetype="bool">False</additionalNodesEnabled>
|
||||
<timelineWriteNode valuetype="str">{shot}/editorial_raw.%04d.{fileext}</timelineWriteNode>
|
||||
<burninData valuetype="dict">
|
||||
<burnIn_bottomRight valuetype="NoneType">None</burnIn_bottomRight>
|
||||
<burnIn_topLeft valuetype="NoneType">None</burnIn_topLeft>
|
||||
<burnIn_topMiddle valuetype="NoneType">None</burnIn_topMiddle>
|
||||
<burnIn_padding valuetype="NoneType">None</burnIn_padding>
|
||||
<burnIn_topRight valuetype="NoneType">None</burnIn_topRight>
|
||||
<burnIn_bottomMiddle valuetype="NoneType">None</burnIn_bottomMiddle>
|
||||
<burnIn_bottomLeft valuetype="NoneType">None</burnIn_bottomLeft>
|
||||
<burnIn_textSize valuetype="NoneType">None</burnIn_textSize>
|
||||
<burnIn_font valuetype="NoneType">None</burnIn_font>
|
||||
</burninData>
|
||||
<dpx valuetype="dict">
|
||||
<datatype valuetype="str">8 bit</datatype>
|
||||
<transfer valuetype="str">(auto detect)</transfer>
|
||||
<bigEndian valuetype="bool">True</bigEndian>
|
||||
<fill valuetype="bool">False</fill>
|
||||
</dpx>
|
||||
<writeNodeName valuetype="str">Write_{ext}</writeNodeName>
|
||||
<collateTracks valuetype="bool">False</collateTracks>
|
||||
</root>
|
||||
</SequenceItem>
|
||||
</SequenceItem>
|
||||
</exportTemplate>
|
||||
<excludeTags valuetype="list" />
|
||||
<includeTags valuetype="list" />
|
||||
<includeRetimes valuetype="bool">False</includeRetimes>
|
||||
<startFrameSource valuetype="str">Custom</startFrameSource>
|
||||
<cutLength valuetype="bool">True</cutLength>
|
||||
<cutHandles valuetype="int">10</cutHandles>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,198 @@
|
|||
<root presetname="pipeline" tasktype="hiero.exporters.FnShotProcessor.ShotProcessor">
|
||||
<startFrameIndex valuetype="int">991</startFrameIndex>
|
||||
<exportRoot valuetype="str">//10.11.0.184/171001_ftrack/tgbvfx/editorial/hiero/workspace/</exportRoot>
|
||||
<versionIndex valuetype="int">1</versionIndex>
|
||||
<cutUseHandles valuetype="bool">True</cutUseHandles>
|
||||
<versionPadding valuetype="int">3</versionPadding>
|
||||
<exportTemplate valuetype="list">
|
||||
<SequenceItem valuetype="tuple">
|
||||
<SequenceItem valuetype="str">{shot}/editorial_raw.%04d.{fileext}</SequenceItem>
|
||||
<SequenceItem valuetype="hiero.exporters.FnSymLinkExporter.SymLinkPreset">
|
||||
<root presetname="hiero.exporters.FnSymLinkExporter.SymLinkExporter" tasktype="hiero.exporters.FnSymLinkExporter.SymLinkExporter">
|
||||
<colourspace valuetype="str">default</colourspace>
|
||||
<file_type valuetype="unicode">exr</file_type>
|
||||
<readAllLinesForExport valuetype="bool">False</readAllLinesForExport>
|
||||
<channels valuetype="str">all</channels>
|
||||
<includeAudio valuetype="bool">False</includeAudio>
|
||||
<burninDataEnabled valuetype="bool">False</burninDataEnabled>
|
||||
<useSingleSocket valuetype="bool">False</useSingleSocket>
|
||||
<additionalNodesEnabled valuetype="bool">False</additionalNodesEnabled>
|
||||
<deleteAudio valuetype="bool">True</deleteAudio>
|
||||
<additionalNodesData valuetype="list" />
|
||||
<dpx valuetype="dict">
|
||||
<datatype valuetype="str">8 bit</datatype>
|
||||
<transfer valuetype="str">(auto detect)</transfer>
|
||||
<bigEndian valuetype="bool">True</bigEndian>
|
||||
<fill valuetype="bool">False</fill>
|
||||
</dpx>
|
||||
<includeEffects valuetype="bool">False</includeEffects>
|
||||
<burninData valuetype="dict">
|
||||
<burnIn_bottomRight valuetype="NoneType">None</burnIn_bottomRight>
|
||||
<burnIn_topLeft valuetype="NoneType">None</burnIn_topLeft>
|
||||
<burnIn_topMiddle valuetype="NoneType">None</burnIn_topMiddle>
|
||||
<burnIn_padding valuetype="NoneType">None</burnIn_padding>
|
||||
<burnIn_topRight valuetype="NoneType">None</burnIn_topRight>
|
||||
<burnIn_textSize valuetype="NoneType">None</burnIn_textSize>
|
||||
<burnIn_bottomLeft valuetype="NoneType">None</burnIn_bottomLeft>
|
||||
<burnIn_bottomMiddle valuetype="NoneType">None</burnIn_bottomMiddle>
|
||||
<burnIn_font valuetype="NoneType">None</burnIn_font>
|
||||
</burninData>
|
||||
<exr valuetype="dict">
|
||||
<compression valuetype="str">Zip (16 scanline)</compression>
|
||||
<datatype valuetype="str">32 bit float</datatype>
|
||||
<noprefix valuetype="bool">False</noprefix>
|
||||
<write_full_layer_names valuetype="bool">False</write_full_layer_names>
|
||||
<standard_layer_name_format valuetype="bool">False</standard_layer_name_format>
|
||||
<interleave valuetype="str">channels, layers and views</interleave>
|
||||
<dw_compression_level valuetype="float">45.0</dw_compression_level>
|
||||
<truncateChannelNames valuetype="bool">False</truncateChannelNames>
|
||||
<metadata valuetype="str">all metadata</metadata>
|
||||
</exr>
|
||||
<writeNodeName valuetype="str">Write_{ext}</writeNodeName>
|
||||
<reformat valuetype="dict">
|
||||
<filter valuetype="str">Cubic</filter>
|
||||
<to_type valuetype="str">None</to_type>
|
||||
<scale valuetype="float">1.0</scale>
|
||||
<center valuetype="bool">True</center>
|
||||
<resize valuetype="str">width</resize>
|
||||
</reformat>
|
||||
<keepNukeScript valuetype="bool">False</keepNukeScript>
|
||||
<method valuetype="str">Blend</method>
|
||||
</root>
|
||||
</SequenceItem>
|
||||
</SequenceItem>
|
||||
<SequenceItem valuetype="tuple">
|
||||
<SequenceItem valuetype="str">{shot}/editorial.%04d.{ext}</SequenceItem>
|
||||
<SequenceItem valuetype="hiero.exporters.FnTranscodeExporter.TranscodePreset">
|
||||
<root presetname="hiero.exporters.FnTranscodeExporter.TranscodeExporter" tasktype="hiero.exporters.FnTranscodeExporter.TranscodeExporter">
|
||||
<colourspace valuetype="str">default</colourspace>
|
||||
<file_type valuetype="unicode">exr</file_type>
|
||||
<readAllLinesForExport valuetype="bool">False</readAllLinesForExport>
|
||||
<channels valuetype="str">all</channels>
|
||||
<includeAudio valuetype="bool">False</includeAudio>
|
||||
<burninDataEnabled valuetype="bool">False</burninDataEnabled>
|
||||
<useSingleSocket valuetype="bool">False</useSingleSocket>
|
||||
<additionalNodesEnabled valuetype="bool">False</additionalNodesEnabled>
|
||||
<deleteAudio valuetype="bool">True</deleteAudio>
|
||||
<additionalNodesData valuetype="list" />
|
||||
<dpx valuetype="dict">
|
||||
<datatype valuetype="str">8 bit</datatype>
|
||||
<transfer valuetype="str">(auto detect)</transfer>
|
||||
<bigEndian valuetype="bool">True</bigEndian>
|
||||
<fill valuetype="bool">False</fill>
|
||||
</dpx>
|
||||
<includeEffects valuetype="bool">True</includeEffects>
|
||||
<burninData valuetype="dict">
|
||||
<burnIn_bottomRight valuetype="NoneType">None</burnIn_bottomRight>
|
||||
<burnIn_topLeft valuetype="NoneType">None</burnIn_topLeft>
|
||||
<burnIn_topMiddle valuetype="NoneType">None</burnIn_topMiddle>
|
||||
<burnIn_padding valuetype="NoneType">None</burnIn_padding>
|
||||
<burnIn_topRight valuetype="NoneType">None</burnIn_topRight>
|
||||
<burnIn_textSize valuetype="NoneType">None</burnIn_textSize>
|
||||
<burnIn_bottomLeft valuetype="NoneType">None</burnIn_bottomLeft>
|
||||
<burnIn_bottomMiddle valuetype="NoneType">None</burnIn_bottomMiddle>
|
||||
<burnIn_font valuetype="NoneType">None</burnIn_font>
|
||||
</burninData>
|
||||
<exr valuetype="dict">
|
||||
<compression valuetype="str">Zip (16 scanline)</compression>
|
||||
<datatype valuetype="str">16 bit half</datatype>
|
||||
<noprefix valuetype="bool">False</noprefix>
|
||||
<write_full_layer_names valuetype="bool">False</write_full_layer_names>
|
||||
<standard_layer_name_format valuetype="bool">False</standard_layer_name_format>
|
||||
<interleave valuetype="str">channels, layers and views</interleave>
|
||||
<dw_compression_level valuetype="float">45.0</dw_compression_level>
|
||||
<truncateChannelNames valuetype="bool">False</truncateChannelNames>
|
||||
<metadata valuetype="str">all metadata</metadata>
|
||||
</exr>
|
||||
<writeNodeName valuetype="str">Write_{ext}</writeNodeName>
|
||||
<reformat valuetype="dict">
|
||||
<filter valuetype="str">Cubic</filter>
|
||||
<to_type valuetype="str">To Sequence Resolution</to_type>
|
||||
<scale valuetype="float">1.0</scale>
|
||||
<center valuetype="bool">True</center>
|
||||
<resize valuetype="str">width</resize>
|
||||
</reformat>
|
||||
<keepNukeScript valuetype="bool">False</keepNukeScript>
|
||||
<method valuetype="str">Blend</method>
|
||||
</root>
|
||||
</SequenceItem>
|
||||
</SequenceItem>
|
||||
<SequenceItem valuetype="tuple">
|
||||
<SequenceItem valuetype="str">{shot}/editorial.nk</SequenceItem>
|
||||
<SequenceItem valuetype="hiero.exporters.FnNukeShotExporter.NukeShotPreset">
|
||||
<root presetname="hiero.exporters.FnNukeShotExporter.NukeShotExporter" tasktype="hiero.exporters.FnNukeShotExporter.NukeShotExporter">
|
||||
<postProcessScript valuetype="bool">True</postProcessScript>
|
||||
<colourspace valuetype="str">default</colourspace>
|
||||
<file_type valuetype="unicode">mov</file_type>
|
||||
<annotationsPreCompPaths valuetype="list" />
|
||||
<channels valuetype="str">rgb</channels>
|
||||
<includeAudio valuetype="bool">False</includeAudio>
|
||||
<readPaths valuetype="list" />
|
||||
<connectTracks valuetype="bool">False</connectTracks>
|
||||
<useSingleSocket valuetype="bool">False</useSingleSocket>
|
||||
<collateSequence valuetype="bool">False</collateSequence>
|
||||
<additionalNodesData valuetype="list" />
|
||||
<collateShotNames valuetype="bool">True</collateShotNames>
|
||||
<includeEffects valuetype="bool">True</includeEffects>
|
||||
<writePaths valuetype="list">
|
||||
<SequenceItem valuetype="str">{shot}/editorial_raw.%04d.{fileext}</SequenceItem>
|
||||
</writePaths>
|
||||
<reformat valuetype="dict">
|
||||
<filter valuetype="str">Cubic</filter>
|
||||
<to_type valuetype="str">None</to_type>
|
||||
<scale valuetype="float">1.0</scale>
|
||||
<center valuetype="bool">True</center>
|
||||
<resize valuetype="str">width</resize>
|
||||
</reformat>
|
||||
<keepNukeScript valuetype="bool">False</keepNukeScript>
|
||||
<method valuetype="str">Blend</method>
|
||||
<includeAnnotations valuetype="bool">False</includeAnnotations>
|
||||
<enable valuetype="bool">True</enable>
|
||||
<showAnnotations valuetype="bool">True</showAnnotations>
|
||||
<mov valuetype="dict">
|
||||
<b_frames valuetype="int">0</b_frames>
|
||||
<bitrate_tolerance valuetype="int">40000000</bitrate_tolerance>
|
||||
<gop_size valuetype="int">12</gop_size>
|
||||
<quality_max valuetype="int">31</quality_max>
|
||||
<quality_min valuetype="int">2</quality_min>
|
||||
<codec valuetype="str">avc1	H.264</codec>
|
||||
<ycbcr_matrix_type valuetype="str">Auto</ycbcr_matrix_type>
|
||||
<encoder valuetype="str">mov32</encoder>
|
||||
<bitrate valuetype="int">20000</bitrate>
|
||||
</mov>
|
||||
<readAllLinesForExport valuetype="bool">False</readAllLinesForExport>
|
||||
<deleteAudio valuetype="bool">True</deleteAudio>
|
||||
<collateCustomStart valuetype="bool">True</collateCustomStart>
|
||||
<burninDataEnabled valuetype="bool">False</burninDataEnabled>
|
||||
<additionalNodesEnabled valuetype="bool">False</additionalNodesEnabled>
|
||||
<timelineWriteNode valuetype="str">{shot}/editorial_raw.%04d.{fileext}</timelineWriteNode>
|
||||
<burninData valuetype="dict">
|
||||
<burnIn_bottomRight valuetype="NoneType">None</burnIn_bottomRight>
|
||||
<burnIn_topLeft valuetype="NoneType">None</burnIn_topLeft>
|
||||
<burnIn_topMiddle valuetype="NoneType">None</burnIn_topMiddle>
|
||||
<burnIn_padding valuetype="NoneType">None</burnIn_padding>
|
||||
<burnIn_topRight valuetype="NoneType">None</burnIn_topRight>
|
||||
<burnIn_bottomMiddle valuetype="NoneType">None</burnIn_bottomMiddle>
|
||||
<burnIn_bottomLeft valuetype="NoneType">None</burnIn_bottomLeft>
|
||||
<burnIn_textSize valuetype="NoneType">None</burnIn_textSize>
|
||||
<burnIn_font valuetype="NoneType">None</burnIn_font>
|
||||
</burninData>
|
||||
<dpx valuetype="dict">
|
||||
<datatype valuetype="str">8 bit</datatype>
|
||||
<transfer valuetype="str">(auto detect)</transfer>
|
||||
<bigEndian valuetype="bool">True</bigEndian>
|
||||
<fill valuetype="bool">False</fill>
|
||||
</dpx>
|
||||
<writeNodeName valuetype="str">Write_{ext}</writeNodeName>
|
||||
<collateTracks valuetype="bool">False</collateTracks>
|
||||
</root>
|
||||
</SequenceItem>
|
||||
</SequenceItem>
|
||||
</exportTemplate>
|
||||
<excludeTags valuetype="list" />
|
||||
<includeTags valuetype="list" />
|
||||
<includeRetimes valuetype="bool">False</includeRetimes>
|
||||
<startFrameSource valuetype="str">Custom</startFrameSource>
|
||||
<cutLength valuetype="bool">True</cutLength>
|
||||
<cutHandles valuetype="int">10</cutHandles>
|
||||
</root>
|
||||
|
|
@ -0,0 +1,198 @@
|
|||
<root presetname="pipeline" tasktype="hiero.exporters.FnShotProcessor.ShotProcessor">
|
||||
<startFrameIndex valuetype="int">991</startFrameIndex>
|
||||
<exportRoot valuetype="str">//10.11.0.184/171001_ftrack/tgbvfx/editorial/hiero/workspace/</exportRoot>
|
||||
<versionIndex valuetype="int">1</versionIndex>
|
||||
<cutUseHandles valuetype="bool">True</cutUseHandles>
|
||||
<versionPadding valuetype="int">3</versionPadding>
|
||||
<exportTemplate valuetype="list">
|
||||
<SequenceItem valuetype="tuple">
|
||||
<SequenceItem valuetype="str">{shot}/editorial_raw.%04d.{fileext}</SequenceItem>
|
||||
<SequenceItem valuetype="hiero.exporters.FnSymLinkExporter.SymLinkPreset">
|
||||
<root presetname="hiero.exporters.FnSymLinkExporter.SymLinkExporter" tasktype="hiero.exporters.FnSymLinkExporter.SymLinkExporter">
|
||||
<colourspace valuetype="str">default</colourspace>
|
||||
<file_type valuetype="unicode">exr</file_type>
|
||||
<readAllLinesForExport valuetype="bool">False</readAllLinesForExport>
|
||||
<channels valuetype="str">all</channels>
|
||||
<includeAudio valuetype="bool">False</includeAudio>
|
||||
<burninDataEnabled valuetype="bool">False</burninDataEnabled>
|
||||
<useSingleSocket valuetype="bool">False</useSingleSocket>
|
||||
<additionalNodesEnabled valuetype="bool">False</additionalNodesEnabled>
|
||||
<deleteAudio valuetype="bool">True</deleteAudio>
|
||||
<additionalNodesData valuetype="list" />
|
||||
<dpx valuetype="dict">
|
||||
<datatype valuetype="str">8 bit</datatype>
|
||||
<transfer valuetype="str">(auto detect)</transfer>
|
||||
<bigEndian valuetype="bool">True</bigEndian>
|
||||
<fill valuetype="bool">False</fill>
|
||||
</dpx>
|
||||
<includeEffects valuetype="bool">False</includeEffects>
|
||||
<burninData valuetype="dict">
|
||||
<burnIn_bottomRight valuetype="NoneType">None</burnIn_bottomRight>
|
||||
<burnIn_topLeft valuetype="NoneType">None</burnIn_topLeft>
|
||||
<burnIn_topMiddle valuetype="NoneType">None</burnIn_topMiddle>
|
||||
<burnIn_padding valuetype="NoneType">None</burnIn_padding>
|
||||
<burnIn_topRight valuetype="NoneType">None</burnIn_topRight>
|
||||
<burnIn_textSize valuetype="NoneType">None</burnIn_textSize>
|
||||
<burnIn_bottomLeft valuetype="NoneType">None</burnIn_bottomLeft>
|
||||
<burnIn_bottomMiddle valuetype="NoneType">None</burnIn_bottomMiddle>
|
||||
<burnIn_font valuetype="NoneType">None</burnIn_font>
|
||||
</burninData>
|
||||
<exr valuetype="dict">
|
||||
<compression valuetype="str">Zip (16 scanline)</compression>
|
||||
<datatype valuetype="str">32 bit float</datatype>
|
||||
<noprefix valuetype="bool">False</noprefix>
|
||||
<write_full_layer_names valuetype="bool">False</write_full_layer_names>
|
||||
<standard_layer_name_format valuetype="bool">False</standard_layer_name_format>
|
||||
<interleave valuetype="str">channels, layers and views</interleave>
|
||||
<dw_compression_level valuetype="float">45.0</dw_compression_level>
|
||||
<truncateChannelNames valuetype="bool">False</truncateChannelNames>
|
||||
<metadata valuetype="str">all metadata</metadata>
|
||||
</exr>
|
||||
<writeNodeName valuetype="str">Write_{ext}</writeNodeName>
|
||||
<reformat valuetype="dict">
|
||||
<filter valuetype="str">Cubic</filter>
|
||||
<to_type valuetype="str">None</to_type>
|
||||
<scale valuetype="float">1.0</scale>
|
||||
<center valuetype="bool">True</center>
|
||||
<resize valuetype="str">width</resize>
|
||||
</reformat>
|
||||
<keepNukeScript valuetype="bool">False</keepNukeScript>
|
||||
<method valuetype="str">Blend</method>
|
||||
</root>
|
||||
</SequenceItem>
|
||||
</SequenceItem>
|
||||
<SequenceItem valuetype="tuple">
|
||||
<SequenceItem valuetype="str">{shot}/editorial.%04d.{ext}</SequenceItem>
|
||||
<SequenceItem valuetype="hiero.exporters.FnTranscodeExporter.TranscodePreset">
|
||||
<root presetname="hiero.exporters.FnTranscodeExporter.TranscodeExporter" tasktype="hiero.exporters.FnTranscodeExporter.TranscodeExporter">
|
||||
<colourspace valuetype="str">default</colourspace>
|
||||
<file_type valuetype="unicode">exr</file_type>
|
||||
<readAllLinesForExport valuetype="bool">False</readAllLinesForExport>
|
||||
<channels valuetype="str">all</channels>
|
||||
<includeAudio valuetype="bool">False</includeAudio>
|
||||
<burninDataEnabled valuetype="bool">False</burninDataEnabled>
|
||||
<useSingleSocket valuetype="bool">False</useSingleSocket>
|
||||
<additionalNodesEnabled valuetype="bool">False</additionalNodesEnabled>
|
||||
<deleteAudio valuetype="bool">True</deleteAudio>
|
||||
<additionalNodesData valuetype="list" />
|
||||
<dpx valuetype="dict">
|
||||
<datatype valuetype="str">8 bit</datatype>
|
||||
<transfer valuetype="str">(auto detect)</transfer>
|
||||
<bigEndian valuetype="bool">True</bigEndian>
|
||||
<fill valuetype="bool">False</fill>
|
||||
</dpx>
|
||||
<includeEffects valuetype="bool">True</includeEffects>
|
||||
<burninData valuetype="dict">
|
||||
<burnIn_bottomRight valuetype="NoneType">None</burnIn_bottomRight>
|
||||
<burnIn_topLeft valuetype="NoneType">None</burnIn_topLeft>
|
||||
<burnIn_topMiddle valuetype="NoneType">None</burnIn_topMiddle>
|
||||
<burnIn_padding valuetype="NoneType">None</burnIn_padding>
|
||||
<burnIn_topRight valuetype="NoneType">None</burnIn_topRight>
|
||||
<burnIn_textSize valuetype="NoneType">None</burnIn_textSize>
|
||||
<burnIn_bottomLeft valuetype="NoneType">None</burnIn_bottomLeft>
|
||||
<burnIn_bottomMiddle valuetype="NoneType">None</burnIn_bottomMiddle>
|
||||
<burnIn_font valuetype="NoneType">None</burnIn_font>
|
||||
</burninData>
|
||||
<exr valuetype="dict">
|
||||
<compression valuetype="str">Zip (16 scanline)</compression>
|
||||
<datatype valuetype="str">16 bit half</datatype>
|
||||
<noprefix valuetype="bool">False</noprefix>
|
||||
<write_full_layer_names valuetype="bool">False</write_full_layer_names>
|
||||
<standard_layer_name_format valuetype="bool">False</standard_layer_name_format>
|
||||
<interleave valuetype="str">channels, layers and views</interleave>
|
||||
<dw_compression_level valuetype="float">45.0</dw_compression_level>
|
||||
<truncateChannelNames valuetype="bool">False</truncateChannelNames>
|
||||
<metadata valuetype="str">all metadata</metadata>
|
||||
</exr>
|
||||
<writeNodeName valuetype="str">Write_{ext}</writeNodeName>
|
||||
<reformat valuetype="dict">
|
||||
<filter valuetype="str">Cubic</filter>
|
||||
<to_type valuetype="str">To Sequence Resolution</to_type>
|
||||
<scale valuetype="float">1.0</scale>
|
||||
<center valuetype="bool">True</center>
|
||||
<resize valuetype="str">width</resize>
|
||||
</reformat>
|
||||
<keepNukeScript valuetype="bool">False</keepNukeScript>
|
||||
<method valuetype="str">Blend</method>
|
||||
</root>
|
||||
</SequenceItem>
|
||||
</SequenceItem>
|
||||
<SequenceItem valuetype="tuple">
|
||||
<SequenceItem valuetype="str">{shot}/editorial.nk</SequenceItem>
|
||||
<SequenceItem valuetype="hiero.exporters.FnNukeShotExporter.NukeShotPreset">
|
||||
<root presetname="hiero.exporters.FnNukeShotExporter.NukeShotExporter" tasktype="hiero.exporters.FnNukeShotExporter.NukeShotExporter">
|
||||
<postProcessScript valuetype="bool">True</postProcessScript>
|
||||
<colourspace valuetype="str">default</colourspace>
|
||||
<file_type valuetype="unicode">mov</file_type>
|
||||
<annotationsPreCompPaths valuetype="list" />
|
||||
<channels valuetype="str">rgb</channels>
|
||||
<includeAudio valuetype="bool">False</includeAudio>
|
||||
<readPaths valuetype="list" />
|
||||
<connectTracks valuetype="bool">False</connectTracks>
|
||||
<useSingleSocket valuetype="bool">False</useSingleSocket>
|
||||
<collateSequence valuetype="bool">False</collateSequence>
|
||||
<additionalNodesData valuetype="list" />
|
||||
<collateShotNames valuetype="bool">True</collateShotNames>
|
||||
<includeEffects valuetype="bool">True</includeEffects>
|
||||
<writePaths valuetype="list">
|
||||
<SequenceItem valuetype="str">{shot}/editorial_raw.%04d.{fileext}</SequenceItem>
|
||||
</writePaths>
|
||||
<reformat valuetype="dict">
|
||||
<filter valuetype="str">Cubic</filter>
|
||||
<to_type valuetype="str">None</to_type>
|
||||
<scale valuetype="float">1.0</scale>
|
||||
<center valuetype="bool">True</center>
|
||||
<resize valuetype="str">width</resize>
|
||||
</reformat>
|
||||
<keepNukeScript valuetype="bool">False</keepNukeScript>
|
||||
<method valuetype="str">Blend</method>
|
||||
<includeAnnotations valuetype="bool">False</includeAnnotations>
|
||||
<enable valuetype="bool">True</enable>
|
||||
<showAnnotations valuetype="bool">True</showAnnotations>
|
||||
<mov valuetype="dict">
|
||||
<b_frames valuetype="int">0</b_frames>
|
||||
<bitrate_tolerance valuetype="int">40000000</bitrate_tolerance>
|
||||
<gop_size valuetype="int">12</gop_size>
|
||||
<quality_max valuetype="int">31</quality_max>
|
||||
<quality_min valuetype="int">2</quality_min>
|
||||
<codec valuetype="str">avc1	H.264</codec>
|
||||
<ycbcr_matrix_type valuetype="str">Auto</ycbcr_matrix_type>
|
||||
<encoder valuetype="str">mov32</encoder>
|
||||
<bitrate valuetype="int">20000</bitrate>
|
||||
</mov>
|
||||
<readAllLinesForExport valuetype="bool">False</readAllLinesForExport>
|
||||
<deleteAudio valuetype="bool">True</deleteAudio>
|
||||
<collateCustomStart valuetype="bool">True</collateCustomStart>
|
||||
<burninDataEnabled valuetype="bool">False</burninDataEnabled>
|
||||
<additionalNodesEnabled valuetype="bool">False</additionalNodesEnabled>
|
||||
<timelineWriteNode valuetype="str">{shot}/editorial_raw.%04d.{fileext}</timelineWriteNode>
|
||||
<burninData valuetype="dict">
|
||||
<burnIn_bottomRight valuetype="NoneType">None</burnIn_bottomRight>
|
||||
<burnIn_topLeft valuetype="NoneType">None</burnIn_topLeft>
|
||||
<burnIn_topMiddle valuetype="NoneType">None</burnIn_topMiddle>
|
||||
<burnIn_padding valuetype="NoneType">None</burnIn_padding>
|
||||
<burnIn_topRight valuetype="NoneType">None</burnIn_topRight>
|
||||
<burnIn_bottomMiddle valuetype="NoneType">None</burnIn_bottomMiddle>
|
||||
<burnIn_bottomLeft valuetype="NoneType">None</burnIn_bottomLeft>
|
||||
<burnIn_textSize valuetype="NoneType">None</burnIn_textSize>
|
||||
<burnIn_font valuetype="NoneType">None</burnIn_font>
|
||||
</burninData>
|
||||
<dpx valuetype="dict">
|
||||
<datatype valuetype="str">8 bit</datatype>
|
||||
<transfer valuetype="str">(auto detect)</transfer>
|
||||
<bigEndian valuetype="bool">True</bigEndian>
|
||||
<fill valuetype="bool">False</fill>
|
||||
</dpx>
|
||||
<writeNodeName valuetype="str">Write_{ext}</writeNodeName>
|
||||
<collateTracks valuetype="bool">False</collateTracks>
|
||||
</root>
|
||||
</SequenceItem>
|
||||
</SequenceItem>
|
||||
</exportTemplate>
|
||||
<excludeTags valuetype="list" />
|
||||
<includeTags valuetype="list" />
|
||||
<includeRetimes valuetype="bool">False</includeRetimes>
|
||||
<startFrameSource valuetype="str">Custom</startFrameSource>
|
||||
<cutLength valuetype="bool">True</cutLength>
|
||||
<cutHandles valuetype="int">10</cutHandles>
|
||||
</root>
|
||||
26
server_addon/hiero/client/ayon_hiero/api/style.css
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
QWidget {
|
||||
font-size: 13px;
|
||||
}
|
||||
|
||||
QSpinBox {
|
||||
padding: 2;
|
||||
max-width: 8em;
|
||||
}
|
||||
|
||||
QLineEdit {
|
||||
padding: 2;
|
||||
min-width: 15em;
|
||||
}
|
||||
|
||||
QVBoxLayout {
|
||||
min-width: 15em;
|
||||
background-color: #201f1f;
|
||||
}
|
||||
|
||||
QComboBox {
|
||||
min-width: 8em;
|
||||
}
|
||||
|
||||
#sectionContent {
|
||||
background-color: #2E2D2D;
|
||||
}
|
||||
197
server_addon/hiero/client/ayon_hiero/api/tags.py
Normal file
|
|
@ -0,0 +1,197 @@
|
|||
import json
|
||||
import re
|
||||
import hiero
|
||||
|
||||
import ayon_api
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
from ayon_core.pipeline import get_current_project_name
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def tag_data():
|
||||
return {
|
||||
"[Lenses]": {
|
||||
"Set lense here": {
|
||||
"editable": "1",
|
||||
"note": "Adjust parameters of your lense and then drop to clip. Remember! You can always overwrite on clip", # noqa
|
||||
"icon": "lense.png",
|
||||
"metadata": {
|
||||
"focalLengthMm": 57
|
||||
|
||||
}
|
||||
}
|
||||
},
|
||||
# "NukeScript": {
|
||||
# "editable": "1",
|
||||
# "note": "Collecting track items to Nuke scripts.",
|
||||
# "icon": "icons:TagNuke.png",
|
||||
# "metadata": {
|
||||
# "productType": "nukescript",
|
||||
# "productName": "main"
|
||||
# }
|
||||
# },
|
||||
"Comment": {
|
||||
"editable": "1",
|
||||
"note": "Comment on a shot.",
|
||||
"icon": "icons:TagComment.png",
|
||||
"metadata": {
|
||||
"productType": "comment",
|
||||
"productName": "main"
|
||||
}
|
||||
},
|
||||
"FrameMain": {
|
||||
"editable": "1",
|
||||
"note": "Publishing a frame product.",
|
||||
"icon": "z_layer_main.png",
|
||||
"metadata": {
|
||||
"productType": "frame",
|
||||
"productName": "main",
|
||||
"format": "png"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def create_tag(key, data):
|
||||
"""
|
||||
Creating Tag object.
|
||||
|
||||
Args:
|
||||
key (str): name of tag
|
||||
data (dict): parameters of tag
|
||||
|
||||
Returns:
|
||||
object: Tag object
|
||||
"""
|
||||
tag = hiero.core.Tag(str(key))
|
||||
return update_tag(tag, data)
|
||||
|
||||
|
||||
def update_tag(tag, data):
|
||||
"""
|
||||
Fixing Tag object.
|
||||
|
||||
Args:
|
||||
tag (obj): Tag object
|
||||
data (dict): parameters of tag
|
||||
"""
|
||||
# set icon if any available in input data
|
||||
if data.get("icon"):
|
||||
tag.setIcon(str(data["icon"]))
|
||||
|
||||
# get metadata of tag
|
||||
mtd = tag.metadata()
|
||||
# get metadata key from data
|
||||
data_mtd = data.get("metadata", {})
|
||||
|
||||
# set all data metadata to tag metadata
|
||||
for _k, _v in data_mtd.items():
|
||||
value = str(_v)
|
||||
if isinstance(_v, dict):
|
||||
value = json.dumps(_v)
|
||||
|
||||
# set the value
|
||||
mtd.setValue(
|
||||
"tag.{}".format(str(_k)),
|
||||
value
|
||||
)
|
||||
|
||||
# set note description of tag
|
||||
tag.setNote(str(data["note"]))
|
||||
return tag
|
||||
|
||||
|
||||
def add_tags_to_workfile():
|
||||
"""
|
||||
Will create default tags from presets.
|
||||
"""
|
||||
from .lib import get_current_project
|
||||
|
||||
def add_tag_to_bin(root_bin, name, data):
|
||||
# for Tags to be created in root level Bin
|
||||
# at first check if any of input data tag is not already created
|
||||
done_tag = next((t for t in root_bin.items()
|
||||
if str(name) in t.name()), None)
|
||||
|
||||
if not done_tag:
|
||||
# create Tag
|
||||
tag = create_tag(name, data)
|
||||
tag.setName(str(name))
|
||||
|
||||
log.debug("__ creating tag: {}".format(tag))
|
||||
# adding Tag to Root Bin
|
||||
root_bin.addItem(tag)
|
||||
else:
|
||||
# update only non hierarchy tags
|
||||
update_tag(done_tag, data)
|
||||
done_tag.setName(str(name))
|
||||
log.debug("__ updating tag: {}".format(done_tag))
|
||||
|
||||
# get project and root bin object
|
||||
project = get_current_project()
|
||||
root_bin = project.tagsBin()
|
||||
|
||||
if "Tag Presets" in project.name():
|
||||
return
|
||||
|
||||
log.debug("Setting default tags on project: {}".format(project.name()))
|
||||
|
||||
# get hiero tags.json
|
||||
nks_pres_tags = tag_data()
|
||||
|
||||
# Get project task types.
|
||||
project_name = get_current_project_name()
|
||||
project_entity = ayon_api.get_project(project_name)
|
||||
task_types = project_entity["taskTypes"]
|
||||
nks_pres_tags["[Tasks]"] = {}
|
||||
log.debug("__ tasks: {}".format(task_types))
|
||||
for task_type in task_types:
|
||||
task_type_name = task_type["name"]
|
||||
nks_pres_tags["[Tasks]"][task_type_name.lower()] = {
|
||||
"editable": "1",
|
||||
"note": task_type_name,
|
||||
"icon": "icons:TagGood.png",
|
||||
"metadata": {
|
||||
"productType": "task",
|
||||
"type": task_type_name
|
||||
}
|
||||
}
|
||||
|
||||
# loop through tag data dict and create deep tag structure
|
||||
for _k, _val in nks_pres_tags.items():
|
||||
# check if key is not decorated with [] so it is defined as bin
|
||||
bin_find = None
|
||||
pattern = re.compile(r"\[(.*)\]")
|
||||
_bin_finds = pattern.findall(_k)
|
||||
# if there is available any then pop it to string
|
||||
if _bin_finds:
|
||||
bin_find = _bin_finds.pop()
|
||||
|
||||
# if bin was found then create or update
|
||||
if bin_find:
|
||||
root_add = False
|
||||
# first check if in root lever is not already created bins
|
||||
bins = [b for b in root_bin.items()
|
||||
if b.name() in str(bin_find)]
|
||||
|
||||
if bins:
|
||||
bin = bins.pop()
|
||||
else:
|
||||
root_add = True
|
||||
# create Bin object for processing
|
||||
bin = hiero.core.Bin(str(bin_find))
|
||||
|
||||
# update or create tags in the bin
|
||||
for __k, __v in _val.items():
|
||||
add_tag_to_bin(bin, __k, __v)
|
||||
|
||||
# finally add the Bin object to the root level Bin
|
||||
if root_add:
|
||||
# adding Tag to Root Bin
|
||||
root_bin.addItem(bin)
|
||||
else:
|
||||
add_tag_to_bin(root_bin, _k, _val)
|
||||
|
||||
log.info("Default Tags were set...")
|
||||
72
server_addon/hiero/client/ayon_hiero/api/workio.py
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
import os
|
||||
import hiero
|
||||
|
||||
from ayon_core.lib import Logger
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
|
||||
def file_extensions():
|
||||
return [".hrox"]
|
||||
|
||||
|
||||
def has_unsaved_changes():
|
||||
# There are no methods for querying unsaved changes to a project, so
|
||||
# enforcing to always save.
|
||||
# but we could at least check if a current open script has a path
|
||||
project = hiero.core.projects()[-1]
|
||||
if project.path():
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def save_file(filepath):
|
||||
file = os.path.basename(filepath)
|
||||
project = hiero.core.projects()[-1]
|
||||
|
||||
if project:
|
||||
log.info("Saving project: `{}` as '{}'".format(project.name(), file))
|
||||
project.saveAs(filepath)
|
||||
else:
|
||||
log.info("Creating new project...")
|
||||
project = hiero.core.newProject()
|
||||
project.saveAs(filepath)
|
||||
|
||||
|
||||
def open_file(filepath):
|
||||
"""Manually fire the kBeforeProjectLoad event in order to work around a bug in Hiero.
|
||||
The Foundry has logged this bug as:
|
||||
Bug 40413 - Python API - kBeforeProjectLoad event type is not triggered
|
||||
when calling hiero.core.openProject() (only triggered through UI)
|
||||
It exists in all versions of Hiero through (at least) v1.9v1b12.
|
||||
|
||||
Once this bug is fixed, a version check will need to be added here in order to
|
||||
prevent accidentally firing this event twice. The following commented-out code
|
||||
is just an example, and will need to be updated when the bug is fixed to catch the
|
||||
correct versions."""
|
||||
# if (hiero.core.env['VersionMajor'] < 1 or
|
||||
# hiero.core.env['VersionMajor'] == 1 and hiero.core.env['VersionMinor'] < 10:
|
||||
hiero.core.events.sendEvent("kBeforeProjectLoad", None)
|
||||
|
||||
project = hiero.core.projects()[-1]
|
||||
|
||||
# Close previous project if its different to the current project.
|
||||
filepath = filepath.replace(os.path.sep, "/")
|
||||
if project.path().replace(os.path.sep, "/") != filepath:
|
||||
# open project file
|
||||
hiero.core.openProject(filepath)
|
||||
project.close()
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def current_file():
|
||||
current_file = hiero.core.projects()[-1].path()
|
||||
if not current_file:
|
||||
return None
|
||||
return os.path.normpath(current_file)
|
||||
|
||||
|
||||
def work_root(session):
|
||||
return os.path.normpath(session["AYON_WORKDIR"]).replace("\\", "/")
|
||||
|
|
@ -0,0 +1,262 @@
|
|||
from copy import deepcopy
|
||||
import ayon_core.hosts.hiero.api as phiero
|
||||
# from ayon_core.hosts.hiero.api import plugin, lib
|
||||
# reload(lib)
|
||||
# reload(plugin)
|
||||
# reload(phiero)
|
||||
|
||||
|
||||
class CreateShotClip(phiero.Creator):
|
||||
"""Publishable clip"""
|
||||
|
||||
label = "Create Publishable Clip"
|
||||
product_type = "clip"
|
||||
icon = "film"
|
||||
defaults = ["Main"]
|
||||
|
||||
gui_tracks = [track.name()
|
||||
for track in phiero.get_current_sequence().videoTracks()]
|
||||
gui_name = "AYON publish attributes creator"
|
||||
gui_info = "Define sequential rename and fill hierarchy data."
|
||||
gui_inputs = {
|
||||
"renameHierarchy": {
|
||||
"type": "section",
|
||||
"label": "Shot Hierarchy And Rename Settings",
|
||||
"target": "ui",
|
||||
"order": 0,
|
||||
"value": {
|
||||
"hierarchy": {
|
||||
"value": "{folder}/{sequence}",
|
||||
"type": "QLineEdit",
|
||||
"label": "Shot Parent Hierarchy",
|
||||
"target": "tag",
|
||||
"toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa
|
||||
"order": 0},
|
||||
"clipRename": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Rename clips",
|
||||
"target": "ui",
|
||||
"toolTip": "Renaming selected clips on fly", # noqa
|
||||
"order": 1},
|
||||
"clipName": {
|
||||
"value": "{sequence}{shot}",
|
||||
"type": "QLineEdit",
|
||||
"label": "Clip Name Template",
|
||||
"target": "ui",
|
||||
"toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa
|
||||
"order": 2},
|
||||
"countFrom": {
|
||||
"value": 10,
|
||||
"type": "QSpinBox",
|
||||
"label": "Count sequence from",
|
||||
"target": "ui",
|
||||
"toolTip": "Set when the sequence number stafrom", # noqa
|
||||
"order": 3},
|
||||
"countSteps": {
|
||||
"value": 10,
|
||||
"type": "QSpinBox",
|
||||
"label": "Stepping number",
|
||||
"target": "ui",
|
||||
"toolTip": "What number is adding every new step", # noqa
|
||||
"order": 4},
|
||||
}
|
||||
},
|
||||
"hierarchyData": {
|
||||
"type": "dict",
|
||||
"label": "Shot Template Keywords",
|
||||
"target": "tag",
|
||||
"order": 1,
|
||||
"value": {
|
||||
"folder": {
|
||||
"value": "shots",
|
||||
"type": "QLineEdit",
|
||||
"label": "{folder}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 0},
|
||||
"episode": {
|
||||
"value": "ep01",
|
||||
"type": "QLineEdit",
|
||||
"label": "{episode}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 1},
|
||||
"sequence": {
|
||||
"value": "sq01",
|
||||
"type": "QLineEdit",
|
||||
"label": "{sequence}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 2},
|
||||
"track": {
|
||||
"value": "{_track_}",
|
||||
"type": "QLineEdit",
|
||||
"label": "{track}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 3},
|
||||
"shot": {
|
||||
"value": "sh###",
|
||||
"type": "QLineEdit",
|
||||
"label": "{shot}",
|
||||
"target": "tag",
|
||||
"toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
|
||||
"order": 4}
|
||||
}
|
||||
},
|
||||
"verticalSync": {
|
||||
"type": "section",
|
||||
"label": "Vertical Synchronization Of Attributes",
|
||||
"target": "ui",
|
||||
"order": 2,
|
||||
"value": {
|
||||
"vSyncOn": {
|
||||
"value": True,
|
||||
"type": "QCheckBox",
|
||||
"label": "Enable Vertical Sync",
|
||||
"target": "ui",
|
||||
"toolTip": "Switch on if you want clips above each other to share its attributes", # noqa
|
||||
"order": 0},
|
||||
"vSyncTrack": {
|
||||
"value": gui_tracks, # noqa
|
||||
"type": "QComboBox",
|
||||
"label": "Hero track",
|
||||
"target": "ui",
|
||||
"toolTip": "Select driving track name which should be hero for all others", # noqa
|
||||
"order": 1}
|
||||
}
|
||||
},
|
||||
"publishSettings": {
|
||||
"type": "section",
|
||||
"label": "Publish Settings",
|
||||
"target": "ui",
|
||||
"order": 3,
|
||||
"value": {
|
||||
"productName": {
|
||||
"value": ["<track_name>", "main", "bg", "fg", "bg",
|
||||
"animatic"],
|
||||
"type": "QComboBox",
|
||||
"label": "Product Name",
|
||||
"target": "ui",
|
||||
"toolTip": "chose product name pattern, if <track_name> is selected, name of track layer will be used", # noqa
|
||||
"order": 0},
|
||||
"productType": {
|
||||
"value": ["plate", "take"],
|
||||
"type": "QComboBox",
|
||||
"label": "Product Type",
|
||||
"target": "ui", "toolTip": "What use of this product is for", # noqa
|
||||
"order": 1},
|
||||
"reviewTrack": {
|
||||
"value": ["< none >"] + gui_tracks,
|
||||
"type": "QComboBox",
|
||||
"label": "Use Review Track",
|
||||
"target": "ui",
|
||||
"toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa
|
||||
"order": 2},
|
||||
"audio": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Include audio",
|
||||
"target": "tag",
|
||||
"toolTip": "Process products with corresponding audio", # noqa
|
||||
"order": 3},
|
||||
"sourceResolution": {
|
||||
"value": False,
|
||||
"type": "QCheckBox",
|
||||
"label": "Source resolution",
|
||||
"target": "tag",
|
||||
"toolTip": "Is resolution taken from timeline or source?", # noqa
|
||||
"order": 4},
|
||||
}
|
||||
},
|
||||
"frameRangeAttr": {
|
||||
"type": "section",
|
||||
"label": "Shot Attributes",
|
||||
"target": "ui",
|
||||
"order": 4,
|
||||
"value": {
|
||||
"workfileFrameStart": {
|
||||
"value": 1001,
|
||||
"type": "QSpinBox",
|
||||
"label": "Workfiles Start Frame",
|
||||
"target": "tag",
|
||||
"toolTip": "Set workfile starting frame number", # noqa
|
||||
"order": 0
|
||||
},
|
||||
"handleStart": {
|
||||
"value": 0,
|
||||
"type": "QSpinBox",
|
||||
"label": "Handle Start",
|
||||
"target": "tag",
|
||||
"toolTip": "Handle at start of clip", # noqa
|
||||
"order": 1
|
||||
},
|
||||
"handleEnd": {
|
||||
"value": 0,
|
||||
"type": "QSpinBox",
|
||||
"label": "Handle End",
|
||||
"target": "tag",
|
||||
"toolTip": "Handle at end of clip", # noqa
|
||||
"order": 2
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
presets = None
|
||||
|
||||
def process(self):
|
||||
# Creator copy of object attributes that are modified during `process`
|
||||
presets = deepcopy(self.presets)
|
||||
gui_inputs = deepcopy(self.gui_inputs)
|
||||
|
||||
# get key pairs from presets and match it on ui inputs
|
||||
for k, v in gui_inputs.items():
|
||||
if v["type"] in ("dict", "section"):
|
||||
# nested dictionary (only one level allowed
|
||||
# for sections and dict)
|
||||
for _k, _v in v["value"].items():
|
||||
if presets.get(_k):
|
||||
gui_inputs[k][
|
||||
"value"][_k]["value"] = presets[_k]
|
||||
if presets.get(k):
|
||||
gui_inputs[k]["value"] = presets[k]
|
||||
|
||||
# open widget for plugins inputs
|
||||
widget = self.widget(self.gui_name, self.gui_info, gui_inputs)
|
||||
widget.exec_()
|
||||
|
||||
if len(self.selected) < 1:
|
||||
return
|
||||
|
||||
if not widget.result:
|
||||
print("Operation aborted")
|
||||
return
|
||||
|
||||
self.rename_add = 0
|
||||
|
||||
# get ui output for track name for vertical sync
|
||||
v_sync_track = widget.result["vSyncTrack"]["value"]
|
||||
|
||||
# sort selected trackItems by
|
||||
sorted_selected_track_items = list()
|
||||
unsorted_selected_track_items = list()
|
||||
for _ti in self.selected:
|
||||
if _ti.parent().name() in v_sync_track:
|
||||
sorted_selected_track_items.append(_ti)
|
||||
else:
|
||||
unsorted_selected_track_items.append(_ti)
|
||||
|
||||
sorted_selected_track_items.extend(unsorted_selected_track_items)
|
||||
|
||||
kwargs = {
|
||||
"ui_inputs": widget.result,
|
||||
"avalon": self.data
|
||||
}
|
||||
|
||||
for i, track_item in enumerate(sorted_selected_track_items):
|
||||
self.rename_index = i
|
||||
|
||||
# convert track item to timeline media pool item
|
||||
phiero.PublishClip(self, track_item, **kwargs).convert()
|
||||
230
server_addon/hiero/client/ayon_hiero/plugins/load/load_clip.py
Normal file
|
|
@ -0,0 +1,230 @@
|
|||
import ayon_api
|
||||
|
||||
from ayon_core.pipeline import get_representation_path
|
||||
from ayon_core.lib.transcoding import (
|
||||
VIDEO_EXTENSIONS,
|
||||
IMAGE_EXTENSIONS
|
||||
)
|
||||
import ayon_core.hosts.hiero.api as phiero
|
||||
|
||||
|
||||
class LoadClip(phiero.SequenceLoader):
|
||||
"""Load a product to timeline as clip
|
||||
|
||||
Place clip to timeline on its asset origin timings collected
|
||||
during conforming to project
|
||||
"""
|
||||
|
||||
product_types = {"render2d", "source", "plate", "render", "review"}
|
||||
representations = {"*"}
|
||||
extensions = set(
|
||||
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
|
||||
)
|
||||
|
||||
label = "Load as clip"
|
||||
order = -10
|
||||
icon = "code-fork"
|
||||
color = "orange"
|
||||
|
||||
# for loader multiselection
|
||||
sequence = None
|
||||
track = None
|
||||
|
||||
# presets
|
||||
clip_color_last = "green"
|
||||
clip_color = "red"
|
||||
|
||||
clip_name_template = "{asset}_{subset}_{representation}"
|
||||
|
||||
@classmethod
|
||||
def apply_settings(cls, project_settings):
|
||||
plugin_type_settings = (
|
||||
project_settings
|
||||
.get("hiero", {})
|
||||
.get("load", {})
|
||||
)
|
||||
|
||||
if not plugin_type_settings:
|
||||
return
|
||||
|
||||
plugin_name = cls.__name__
|
||||
|
||||
# Look for plugin settings in host specific settings
|
||||
plugin_settings = plugin_type_settings.get(plugin_name)
|
||||
if not plugin_settings:
|
||||
return
|
||||
|
||||
print(">>> We have preset for {}".format(plugin_name))
|
||||
for option, value in plugin_settings.items():
|
||||
if option == "representations":
|
||||
continue
|
||||
|
||||
if option == "clip_name_template":
|
||||
# TODO remove the formatting replacement
|
||||
value = (
|
||||
value
|
||||
.replace("{folder[name]}", "{asset}")
|
||||
.replace("{product[name]}", "{subset}")
|
||||
)
|
||||
|
||||
if option == "enabled" and value is False:
|
||||
print(" - is disabled by preset")
|
||||
else:
|
||||
print(" - setting `{}`: `{}`".format(option, value))
|
||||
setattr(cls, option, value)
|
||||
|
||||
def load(self, context, name, namespace, options):
|
||||
# add clip name template to options
|
||||
options.update({
|
||||
"clipNameTemplate": self.clip_name_template
|
||||
})
|
||||
# in case loader uses multiselection
|
||||
if self.track and self.sequence:
|
||||
options.update({
|
||||
"sequence": self.sequence,
|
||||
"track": self.track,
|
||||
"clipNameTemplate": self.clip_name_template
|
||||
})
|
||||
|
||||
# load clip to timeline and get main variables
|
||||
path = self.filepath_from_context(context)
|
||||
track_item = phiero.ClipLoader(self, context, path, **options).load()
|
||||
namespace = namespace or track_item.name()
|
||||
version_entity = context["version"]
|
||||
version_attributes = version_entity["attrib"]
|
||||
version_name = version_entity["version"]
|
||||
colorspace = version_attributes.get("colorSpace")
|
||||
object_name = self.clip_name_template.format(
|
||||
**context["representation"]["context"])
|
||||
|
||||
# set colorspace
|
||||
if colorspace:
|
||||
track_item.source().setSourceMediaColourTransform(colorspace)
|
||||
|
||||
# add additional metadata from the version to imprint Avalon knob
|
||||
add_keys = [
|
||||
"frameStart", "frameEnd", "source", "author",
|
||||
"fps", "handleStart", "handleEnd"
|
||||
]
|
||||
|
||||
# move all version data keys to tag data
|
||||
data_imprint = {
|
||||
key: version_attributes.get(key, str(None))
|
||||
for key in add_keys
|
||||
|
||||
}
|
||||
|
||||
# add variables related to version context
|
||||
data_imprint.update({
|
||||
"version": version_name,
|
||||
"colorspace": colorspace,
|
||||
"objectName": object_name
|
||||
})
|
||||
|
||||
# update color of clip regarding the version order
|
||||
self.set_item_color(
|
||||
context["project"]["name"], track_item, version_entity
|
||||
)
|
||||
|
||||
# deal with multiselection
|
||||
self.multiselection(track_item)
|
||||
|
||||
self.log.info("Loader done: `{}`".format(name))
|
||||
|
||||
return phiero.containerise(
|
||||
track_item,
|
||||
name, namespace, context,
|
||||
self.__class__.__name__,
|
||||
data_imprint)
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def update(self, container, context):
|
||||
""" Updating previously loaded clips
|
||||
"""
|
||||
version_entity = context["version"]
|
||||
repre_entity = context["representation"]
|
||||
|
||||
# load clip to timeline and get main variables
|
||||
name = container["name"]
|
||||
namespace = container["namespace"]
|
||||
track_item = phiero.get_track_items(
|
||||
track_item_name=namespace).pop()
|
||||
|
||||
version_attributes = version_entity["attrib"]
|
||||
version_name = version_entity["version"]
|
||||
colorspace = version_attributes.get("colorSpace")
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
file = get_representation_path(repre_entity).replace("\\", "/")
|
||||
clip = track_item.source()
|
||||
|
||||
# reconnect media to new path
|
||||
clip.reconnectMedia(file)
|
||||
|
||||
# set colorspace
|
||||
if colorspace:
|
||||
clip.setSourceMediaColourTransform(colorspace)
|
||||
|
||||
# add additional metadata from the version to imprint metadata knob
|
||||
|
||||
# move all version data keys to tag data
|
||||
data_imprint = {}
|
||||
for key in [
|
||||
"frameStart",
|
||||
"frameEnd",
|
||||
"source",
|
||||
"author",
|
||||
"fps",
|
||||
"handleStart",
|
||||
"handleEnd",
|
||||
]:
|
||||
data_imprint.update({
|
||||
key: version_attributes.get(key, str(None))
|
||||
})
|
||||
|
||||
# add variables related to version context
|
||||
data_imprint.update({
|
||||
"representation": repre_entity["id"],
|
||||
"version": version_name,
|
||||
"colorspace": colorspace,
|
||||
"objectName": object_name
|
||||
})
|
||||
|
||||
# update color of clip regarding the version order
|
||||
self.set_item_color(
|
||||
context["project"]["name"], track_item, version_entity
|
||||
)
|
||||
|
||||
return phiero.update_container(track_item, data_imprint)
|
||||
|
||||
def remove(self, container):
|
||||
""" Removing previously loaded clips
|
||||
"""
|
||||
# load clip to timeline and get main variables
|
||||
namespace = container['namespace']
|
||||
track_item = phiero.get_track_items(
|
||||
track_item_name=namespace).pop()
|
||||
track = track_item.parent()
|
||||
|
||||
# remove track item from track
|
||||
track.removeItem(track_item)
|
||||
|
||||
@classmethod
|
||||
def multiselection(cls, track_item):
|
||||
if not cls.track:
|
||||
cls.track = track_item.parent()
|
||||
cls.sequence = cls.track.parent()
|
||||
|
||||
@classmethod
|
||||
def set_item_color(cls, project_name, track_item, version_entity):
|
||||
last_version_entity = ayon_api.get_last_version_by_product_id(
|
||||
project_name, version_entity["productId"], fields={"id"}
|
||||
)
|
||||
clip = track_item.source()
|
||||
# set clip colour
|
||||
if version_entity["id"] == last_version_entity["id"]:
|
||||
clip.binItem().setColor(cls.clip_color_last)
|
||||
else:
|
||||
clip.binItem().setColor(cls.clip_color)
|
||||
|
|
@ -0,0 +1,305 @@
|
|||
import json
|
||||
from collections import OrderedDict
|
||||
import six
|
||||
|
||||
from ayon_core.pipeline import (
|
||||
AVALON_CONTAINER_ID,
|
||||
load,
|
||||
get_representation_path,
|
||||
)
|
||||
from ayon_core.hosts.hiero import api as phiero
|
||||
from ayon_core.lib import Logger
|
||||
|
||||
|
||||
class LoadEffects(load.LoaderPlugin):
|
||||
"""Loading colorspace soft effect exported from nukestudio"""
|
||||
|
||||
product_types = {"effect"}
|
||||
representations = {"*"}
|
||||
extension = {"json"}
|
||||
|
||||
label = "Load Effects"
|
||||
order = 0
|
||||
icon = "cc"
|
||||
color = "white"
|
||||
|
||||
log = Logger.get_logger(__name__)
|
||||
|
||||
def load(self, context, name, namespace, data):
|
||||
"""
|
||||
Loading function to get the soft effects to particular read node
|
||||
|
||||
Arguments:
|
||||
context (dict): context of version
|
||||
name (str): name of the version
|
||||
namespace (str): Folder name.
|
||||
data (dict): compulsory attribute > not used
|
||||
|
||||
Returns:
|
||||
nuke node: containerised nuke node object
|
||||
"""
|
||||
active_sequence = phiero.get_current_sequence()
|
||||
active_track = phiero.get_current_track(
|
||||
active_sequence, "Loaded_{}".format(name))
|
||||
|
||||
# get main variables
|
||||
namespace = namespace or context["folder"]["name"]
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
clip_in = context["folder"]["attrib"]["clipIn"]
|
||||
clip_out = context["folder"]["attrib"]["clipOut"]
|
||||
|
||||
data_imprint = {
|
||||
"objectName": object_name,
|
||||
"children_names": []
|
||||
}
|
||||
|
||||
# getting file path
|
||||
file = self.filepath_from_context(context)
|
||||
file = file.replace("\\", "/")
|
||||
|
||||
if self._shared_loading(
|
||||
file,
|
||||
active_track,
|
||||
clip_in,
|
||||
clip_out,
|
||||
data_imprint
|
||||
):
|
||||
self.containerise(
|
||||
active_track,
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
object_name=object_name,
|
||||
context=context,
|
||||
loader=self.__class__.__name__,
|
||||
data=data_imprint)
|
||||
|
||||
def _shared_loading(
|
||||
self,
|
||||
file,
|
||||
active_track,
|
||||
clip_in,
|
||||
clip_out,
|
||||
data_imprint,
|
||||
update=False
|
||||
):
|
||||
# getting data from json file with unicode conversion
|
||||
with open(file, "r") as f:
|
||||
json_f = {self.byteify(key): self.byteify(value)
|
||||
for key, value in json.load(f).items()}
|
||||
|
||||
# get correct order of nodes by positions on track and subtrack
|
||||
nodes_order = self.reorder_nodes(json_f)
|
||||
|
||||
used_subtracks = {
|
||||
stitem.name(): stitem
|
||||
for stitem in phiero.flatten(active_track.subTrackItems())
|
||||
}
|
||||
|
||||
loaded = False
|
||||
for index_order, (ef_name, ef_val) in enumerate(nodes_order.items()):
|
||||
new_name = "{}_loaded".format(ef_name)
|
||||
if new_name not in used_subtracks:
|
||||
effect_track_item = active_track.createEffect(
|
||||
effectType=ef_val["class"],
|
||||
timelineIn=clip_in,
|
||||
timelineOut=clip_out,
|
||||
subTrackIndex=index_order
|
||||
|
||||
)
|
||||
effect_track_item.setName(new_name)
|
||||
else:
|
||||
effect_track_item = used_subtracks[new_name]
|
||||
|
||||
node = effect_track_item.node()
|
||||
for knob_name, knob_value in ef_val["node"].items():
|
||||
if (
|
||||
not knob_value
|
||||
or knob_name == "name"
|
||||
):
|
||||
continue
|
||||
|
||||
try:
|
||||
# assume list means animation
|
||||
# except 4 values could be RGBA or vector
|
||||
if isinstance(knob_value, list) and len(knob_value) > 4:
|
||||
node[knob_name].setAnimated()
|
||||
for i, value in enumerate(knob_value):
|
||||
if isinstance(value, list):
|
||||
# list can have vector animation
|
||||
for ci, cv in enumerate(value):
|
||||
node[knob_name].setValueAt(
|
||||
cv,
|
||||
(clip_in + i),
|
||||
ci
|
||||
)
|
||||
else:
|
||||
# list is single values
|
||||
node[knob_name].setValueAt(
|
||||
value,
|
||||
(clip_in + i)
|
||||
)
|
||||
else:
|
||||
node[knob_name].setValue(knob_value)
|
||||
except NameError:
|
||||
self.log.warning("Knob: {} cannot be set".format(
|
||||
knob_name))
|
||||
|
||||
# register all loaded children
|
||||
data_imprint["children_names"].append(new_name)
|
||||
|
||||
# make sure containerisation will happen
|
||||
loaded = True
|
||||
|
||||
return loaded
|
||||
|
||||
def update(self, container, context):
|
||||
""" Updating previously loaded effects
|
||||
"""
|
||||
version_entity = context["version"]
|
||||
repre_entity = context["representation"]
|
||||
active_track = container["_item"]
|
||||
file = get_representation_path(repre_entity).replace("\\", "/")
|
||||
|
||||
# get main variables
|
||||
name = container['name']
|
||||
namespace = container['namespace']
|
||||
|
||||
# get timeline in out data
|
||||
version_attributes = version_entity["attrib"]
|
||||
clip_in = version_attributes["clipIn"]
|
||||
clip_out = version_attributes["clipOut"]
|
||||
|
||||
object_name = "{}_{}".format(name, namespace)
|
||||
|
||||
# Disable previously created nodes
|
||||
used_subtracks = {
|
||||
stitem.name(): stitem
|
||||
for stitem in phiero.flatten(active_track.subTrackItems())
|
||||
}
|
||||
container = phiero.get_track_openpype_data(
|
||||
active_track, object_name
|
||||
)
|
||||
|
||||
loaded_subtrack_items = container["children_names"]
|
||||
for loaded_stitem in loaded_subtrack_items:
|
||||
if loaded_stitem not in used_subtracks:
|
||||
continue
|
||||
item_to_remove = used_subtracks.pop(loaded_stitem)
|
||||
# TODO: find a way to erase nodes
|
||||
self.log.debug(
|
||||
"This node needs to be removed: {}".format(item_to_remove))
|
||||
|
||||
data_imprint = {
|
||||
"objectName": object_name,
|
||||
"name": name,
|
||||
"representation": repre_entity["id"],
|
||||
"children_names": []
|
||||
}
|
||||
|
||||
if self._shared_loading(
|
||||
file,
|
||||
active_track,
|
||||
clip_in,
|
||||
clip_out,
|
||||
data_imprint,
|
||||
update=True
|
||||
):
|
||||
return phiero.update_container(active_track, data_imprint)
|
||||
|
||||
def reorder_nodes(self, data):
|
||||
new_order = OrderedDict()
|
||||
trackNums = [v["trackIndex"] for k, v in data.items()
|
||||
if isinstance(v, dict)]
|
||||
subTrackNums = [v["subTrackIndex"] for k, v in data.items()
|
||||
if isinstance(v, dict)]
|
||||
|
||||
for trackIndex in range(
|
||||
min(trackNums), max(trackNums) + 1):
|
||||
for subTrackIndex in range(
|
||||
min(subTrackNums), max(subTrackNums) + 1):
|
||||
item = self.get_item(data, trackIndex, subTrackIndex)
|
||||
if item is not {}:
|
||||
new_order.update(item)
|
||||
return new_order
|
||||
|
||||
def get_item(self, data, trackIndex, subTrackIndex):
|
||||
return {key: val for key, val in data.items()
|
||||
if isinstance(val, dict)
|
||||
if subTrackIndex == val["subTrackIndex"]
|
||||
if trackIndex == val["trackIndex"]}
|
||||
|
||||
def byteify(self, input):
|
||||
"""
|
||||
Converts unicode strings to strings
|
||||
It goes through all dictionary
|
||||
|
||||
Arguments:
|
||||
input (dict/str): input
|
||||
|
||||
Returns:
|
||||
dict: with fixed values and keys
|
||||
|
||||
"""
|
||||
|
||||
if isinstance(input, dict):
|
||||
return {self.byteify(key): self.byteify(value)
|
||||
for key, value in input.items()}
|
||||
elif isinstance(input, list):
|
||||
return [self.byteify(element) for element in input]
|
||||
elif isinstance(input, six.text_type):
|
||||
return str(input)
|
||||
else:
|
||||
return input
|
||||
|
||||
def switch(self, container, context):
|
||||
self.update(container, context)
|
||||
|
||||
def remove(self, container):
|
||||
pass
|
||||
|
||||
def containerise(
|
||||
self,
|
||||
track,
|
||||
name,
|
||||
namespace,
|
||||
object_name,
|
||||
context,
|
||||
loader=None,
|
||||
data=None
|
||||
):
|
||||
"""Bundle Hiero's object into an assembly and imprint it with metadata
|
||||
|
||||
Containerisation enables a tracking of version, author and origin
|
||||
for loaded assets.
|
||||
|
||||
Arguments:
|
||||
track (hiero.core.VideoTrack): object to imprint as container
|
||||
name (str): Name of resulting assembly
|
||||
namespace (str): Namespace under which to host container
|
||||
object_name (str): name of container
|
||||
context (dict): Asset information
|
||||
loader (str, optional): Name of node used to produce this
|
||||
container.
|
||||
|
||||
Returns:
|
||||
track_item (hiero.core.TrackItem): containerised object
|
||||
|
||||
"""
|
||||
|
||||
data_imprint = {
|
||||
object_name: {
|
||||
"schema": "openpype:container-2.0",
|
||||
"id": AVALON_CONTAINER_ID,
|
||||
"name": str(name),
|
||||
"namespace": str(namespace),
|
||||
"loader": str(loader),
|
||||
"representation": context["representation"]["id"],
|
||||
}
|
||||
}
|
||||
|
||||
if data:
|
||||
for k, v in data.items():
|
||||
data_imprint[object_name].update({k: v})
|
||||
|
||||
self.log.debug("_ data_imprint: {}".format(data_imprint))
|
||||
phiero.set_track_openpype_tag(track, data_imprint)
|
||||
|
|
@ -0,0 +1,212 @@
|
|||
import re
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectClipEffects(pyblish.api.InstancePlugin):
|
||||
"""Collect soft effects instances."""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.078
|
||||
label = "Collect Clip Effects Instances"
|
||||
families = ["clip"]
|
||||
|
||||
effect_categories = []
|
||||
|
||||
def process(self, instance):
|
||||
product_type = "effect"
|
||||
effects = {}
|
||||
review = instance.data.get("review")
|
||||
review_track_index = instance.context.data.get("reviewTrackIndex")
|
||||
item = instance.data["item"]
|
||||
|
||||
if "audio" in instance.data["productType"]:
|
||||
return
|
||||
|
||||
# frame range
|
||||
self.handle_start = instance.data["handleStart"]
|
||||
self.handle_end = instance.data["handleEnd"]
|
||||
self.clip_in = int(item.timelineIn())
|
||||
self.clip_out = int(item.timelineOut())
|
||||
self.clip_in_h = self.clip_in - self.handle_start
|
||||
self.clip_out_h = self.clip_out + self.handle_end
|
||||
|
||||
track_item = instance.data["item"]
|
||||
track = track_item.parent()
|
||||
track_index = track.trackIndex()
|
||||
tracks_effect_items = instance.context.data.get("tracksEffectItems")
|
||||
clip_effect_items = instance.data.get("clipEffectItems")
|
||||
|
||||
# add clips effects to track's:
|
||||
if clip_effect_items:
|
||||
tracks_effect_items[track_index] = clip_effect_items
|
||||
|
||||
# process all effects and divide them to instance
|
||||
for _track_index, sub_track_items in tracks_effect_items.items():
|
||||
# skip if track index is the same as review track index
|
||||
if review and review_track_index == _track_index:
|
||||
continue
|
||||
for sitem in sub_track_items:
|
||||
# make sure this subtrack item is relative of track item
|
||||
if ((track_item not in sitem.linkedItems())
|
||||
and (len(sitem.linkedItems()) > 0)):
|
||||
continue
|
||||
|
||||
if not (track_index <= _track_index):
|
||||
continue
|
||||
|
||||
effect = self.add_effect(_track_index, sitem)
|
||||
if effect:
|
||||
effects.update(effect)
|
||||
|
||||
# skip any without effects
|
||||
if not effects:
|
||||
return
|
||||
|
||||
product_name = instance.data.get("productName")
|
||||
effects.update({"assignTo": product_name})
|
||||
|
||||
product_name_split = re.findall(r'[A-Z][^A-Z]*', product_name)
|
||||
|
||||
if len(product_name_split) > 0:
|
||||
root_name = product_name.replace(product_name_split[0], "")
|
||||
product_name_split.insert(0, root_name.capitalize())
|
||||
|
||||
product_name_split.insert(0, "effect")
|
||||
|
||||
effect_categories = {
|
||||
x["name"]: x["effect_classes"] for x in self.effect_categories
|
||||
}
|
||||
|
||||
category_by_effect = {"": ""}
|
||||
for key, values in effect_categories.items():
|
||||
for cls in values:
|
||||
category_by_effect[cls] = key
|
||||
|
||||
effects_categorized = {k: {} for k in effect_categories.keys()}
|
||||
effects_categorized[""] = {}
|
||||
for key, value in effects.items():
|
||||
if key == "assignTo":
|
||||
continue
|
||||
|
||||
# Some classes can have a number in them. Like Text2.
|
||||
found_cls = ""
|
||||
for cls in category_by_effect.keys():
|
||||
if cls in value["class"]:
|
||||
found_cls = cls
|
||||
|
||||
effects_categorized[category_by_effect[found_cls]][key] = value
|
||||
|
||||
categories = list(effects_categorized.keys())
|
||||
for category in categories:
|
||||
if not effects_categorized[category]:
|
||||
effects_categorized.pop(category)
|
||||
continue
|
||||
|
||||
effects_categorized[category]["assignTo"] = effects["assignTo"]
|
||||
|
||||
for category, effects in effects_categorized.items():
|
||||
product_name = "".join(product_name_split)
|
||||
product_name += category.capitalize()
|
||||
|
||||
# create new instance and inherit data
|
||||
data = {}
|
||||
for key, value in instance.data.items():
|
||||
if "clipEffectItems" in key:
|
||||
continue
|
||||
data[key] = value
|
||||
|
||||
data.update({
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type],
|
||||
"name": product_name + "_" + data["folderPath"],
|
||||
"label": "{} - {}".format(
|
||||
data["folderPath"], product_name
|
||||
),
|
||||
"effects": effects,
|
||||
})
|
||||
|
||||
# create new instance
|
||||
_instance = instance.context.create_instance(**data)
|
||||
self.log.info("Created instance `{}`".format(_instance))
|
||||
self.log.debug("instance.data `{}`".format(_instance.data))
|
||||
|
||||
def test_overlap(self, effect_t_in, effect_t_out):
|
||||
covering_exp = bool(
|
||||
(effect_t_in <= self.clip_in)
|
||||
and (effect_t_out >= self.clip_out)
|
||||
)
|
||||
overlaying_right_exp = bool(
|
||||
(effect_t_in < self.clip_out)
|
||||
and (effect_t_out >= self.clip_out)
|
||||
)
|
||||
overlaying_left_exp = bool(
|
||||
(effect_t_out > self.clip_in)
|
||||
and (effect_t_in <= self.clip_in)
|
||||
)
|
||||
|
||||
return any((
|
||||
covering_exp,
|
||||
overlaying_right_exp,
|
||||
overlaying_left_exp
|
||||
))
|
||||
|
||||
def add_effect(self, track_index, sitem):
|
||||
track = sitem.parentTrack().name()
|
||||
# node serialization
|
||||
node = sitem.node()
|
||||
node_serialized = self.node_serialization(node)
|
||||
node_name = sitem.name()
|
||||
node_class = node.Class()
|
||||
|
||||
# collect timelineIn/Out
|
||||
effect_t_in = int(sitem.timelineIn())
|
||||
effect_t_out = int(sitem.timelineOut())
|
||||
|
||||
if not self.test_overlap(effect_t_in, effect_t_out):
|
||||
return
|
||||
|
||||
self.log.debug("node_name: `{}`".format(node_name))
|
||||
self.log.debug("node_class: `{}`".format(node_class))
|
||||
|
||||
return {node_name: {
|
||||
"class": node_class,
|
||||
"timelineIn": effect_t_in,
|
||||
"timelineOut": effect_t_out,
|
||||
"subTrackIndex": sitem.subTrackIndex(),
|
||||
"trackIndex": track_index,
|
||||
"track": track,
|
||||
"node": node_serialized
|
||||
}}
|
||||
|
||||
def node_serialization(self, node):
|
||||
node_serialized = {}
|
||||
|
||||
# adding ignoring knob keys
|
||||
_ignoring_keys = ['invert_mask', 'help', 'mask',
|
||||
'xpos', 'ypos', 'layer', 'process_mask', 'channel',
|
||||
'channels', 'maskChannelMask', 'maskChannelInput',
|
||||
'note_font', 'note_font_size', 'unpremult',
|
||||
'postage_stamp_frame', 'maskChannel', 'export_cc',
|
||||
'select_cccid', 'mix', 'version', 'matrix']
|
||||
|
||||
# loop through all knobs and collect not ignored
|
||||
# and any with any value
|
||||
for knob in node.knobs().keys():
|
||||
# skip nodes in ignore keys
|
||||
if knob in _ignoring_keys:
|
||||
continue
|
||||
|
||||
# get animation if node is animated
|
||||
if node[knob].isAnimated():
|
||||
# grab animation including handles
|
||||
knob_anim = [node[knob].getValueAt(i)
|
||||
for i in range(
|
||||
self.clip_in_h, self.clip_out_h + 1)]
|
||||
|
||||
node_serialized[knob] = knob_anim
|
||||
else:
|
||||
node_serialized[knob] = node[knob].value()
|
||||
|
||||
return node_serialized
|
||||
|
|
@ -0,0 +1,151 @@
|
|||
from pprint import pformat
|
||||
import re
|
||||
import ast
|
||||
import json
|
||||
|
||||
import pyblish.api
|
||||
|
||||
|
||||
class CollectFrameTagInstances(pyblish.api.ContextPlugin):
|
||||
"""Collect frames from tags.
|
||||
|
||||
Tag is expected to have metadata:
|
||||
{
|
||||
"productType": "frame"
|
||||
"productName": "main"
|
||||
}
|
||||
"""
|
||||
|
||||
order = pyblish.api.CollectorOrder
|
||||
label = "Collect Frames"
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, context):
|
||||
self._context = context
|
||||
|
||||
# collect all sequence tags
|
||||
product_data = self._create_frame_product_data_sequence(context)
|
||||
|
||||
self.log.debug("__ product_data: {}".format(
|
||||
pformat(product_data)
|
||||
))
|
||||
|
||||
# create instances
|
||||
self._create_instances(product_data)
|
||||
|
||||
def _get_tag_data(self, tag):
|
||||
data = {}
|
||||
|
||||
# get tag metadata attribute
|
||||
tag_data = tag.metadata()
|
||||
|
||||
# convert tag metadata to normal keys names and values to correct types
|
||||
for k, v in dict(tag_data).items():
|
||||
key = k.replace("tag.", "")
|
||||
|
||||
try:
|
||||
# capture exceptions which are related to strings only
|
||||
if re.match(r"^[\d]+$", v):
|
||||
value = int(v)
|
||||
elif re.match(r"^True$", v):
|
||||
value = True
|
||||
elif re.match(r"^False$", v):
|
||||
value = False
|
||||
elif re.match(r"^None$", v):
|
||||
value = None
|
||||
elif re.match(r"^[\w\d_]+$", v):
|
||||
value = v
|
||||
else:
|
||||
value = ast.literal_eval(v)
|
||||
except (ValueError, SyntaxError):
|
||||
value = v
|
||||
|
||||
data[key] = value
|
||||
|
||||
return data
|
||||
|
||||
def _create_frame_product_data_sequence(self, context):
|
||||
|
||||
sequence_tags = []
|
||||
sequence = context.data["activeTimeline"]
|
||||
|
||||
# get all publishable sequence frames
|
||||
publish_frames = range(int(sequence.duration() + 1))
|
||||
|
||||
self.log.debug("__ publish_frames: {}".format(
|
||||
pformat(publish_frames)
|
||||
))
|
||||
|
||||
# get all sequence tags
|
||||
for tag in sequence.tags():
|
||||
tag_data = self._get_tag_data(tag)
|
||||
self.log.debug("__ tag_data: {}".format(
|
||||
pformat(tag_data)
|
||||
))
|
||||
if not tag_data:
|
||||
continue
|
||||
|
||||
product_type = tag_data.get("productType")
|
||||
if product_type is None:
|
||||
product_type = tag_data.get("family")
|
||||
if not product_type:
|
||||
continue
|
||||
|
||||
if product_type != "frame":
|
||||
continue
|
||||
|
||||
sequence_tags.append(tag_data)
|
||||
|
||||
self.log.debug("__ sequence_tags: {}".format(
|
||||
pformat(sequence_tags)
|
||||
))
|
||||
|
||||
# first collect all available product tag frames
|
||||
product_data = {}
|
||||
context_folder_path = context.data["folderEntity"]["path"]
|
||||
|
||||
for tag_data in sequence_tags:
|
||||
frame = int(tag_data["start"])
|
||||
|
||||
if frame not in publish_frames:
|
||||
continue
|
||||
|
||||
product_name = tag_data.get("productName")
|
||||
if product_name is None:
|
||||
product_name = tag_data["subset"]
|
||||
|
||||
if product_name in product_data:
|
||||
# update existing product key
|
||||
product_data[product_name]["frames"].append(frame)
|
||||
else:
|
||||
# create new product key
|
||||
product_data[product_name] = {
|
||||
"frames": [frame],
|
||||
"format": tag_data["format"],
|
||||
"folderPath": context_folder_path
|
||||
}
|
||||
return product_data
|
||||
|
||||
def _create_instances(self, product_data):
|
||||
# create instance per product
|
||||
product_type = "image"
|
||||
for product_name, product_data in product_data.items():
|
||||
name = "frame" + product_name.title()
|
||||
data = {
|
||||
"name": name,
|
||||
"label": "{} {}".format(name, product_data["frames"]),
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type, "frame"],
|
||||
"folderPath": product_data["folderPath"],
|
||||
"productName": name,
|
||||
"format": product_data["format"],
|
||||
"frames": product_data["frames"]
|
||||
}
|
||||
self._context.create_instance(**data)
|
||||
|
||||
self.log.info(
|
||||
"Created instance: {}".format(
|
||||
json.dumps(data, sort_keys=True, indent=4)
|
||||
)
|
||||
)
|
||||
|
|
@ -0,0 +1,33 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectClipTagTasks(api.InstancePlugin):
|
||||
"""Collect Tags from selected track items."""
|
||||
|
||||
order = api.CollectorOrder - 0.077
|
||||
label = "Collect Tag Tasks"
|
||||
hosts = ["hiero"]
|
||||
families = ["shot"]
|
||||
|
||||
def process(self, instance):
|
||||
# gets tags
|
||||
tags = instance.data["tags"]
|
||||
|
||||
tasks = {}
|
||||
for tag in tags:
|
||||
t_metadata = dict(tag.metadata())
|
||||
t_product_type = t_metadata.get("tag.productType")
|
||||
if t_product_type is None:
|
||||
t_product_type = t_metadata.get("tag.family", "")
|
||||
|
||||
# gets only task product type tags and collect labels
|
||||
if "task" in t_product_type:
|
||||
t_task_name = t_metadata.get("tag.label", "")
|
||||
t_task_type = t_metadata.get("tag.type", "")
|
||||
tasks[t_task_name] = {"type": t_task_type}
|
||||
|
||||
instance.data["tasks"] = tasks
|
||||
|
||||
self.log.info("Collected Tasks from Tags: `{}`".format(
|
||||
instance.data["tasks"]))
|
||||
return
|
||||
|
|
@ -0,0 +1,104 @@
|
|||
# from ayon_core import plugins
|
||||
import os
|
||||
import json
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
|
||||
|
||||
class ExtractClipEffects(publish.Extractor):
|
||||
"""Extract clip effects instances."""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Export Clip Effects"
|
||||
families = ["effect"]
|
||||
|
||||
def process(self, instance):
|
||||
item = instance.data["item"]
|
||||
effects = instance.data.get("effects")
|
||||
|
||||
# skip any without effects
|
||||
if not effects:
|
||||
return
|
||||
|
||||
product_name = instance.data.get("productName")
|
||||
product_type = instance.data["productType"]
|
||||
|
||||
self.log.debug("creating staging dir")
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
transfers = list()
|
||||
if "transfers" not in instance.data:
|
||||
instance.data["transfers"] = list()
|
||||
|
||||
ext = "json"
|
||||
file = product_name + "." + ext
|
||||
|
||||
# when instance is created during collection part
|
||||
resources_dir = instance.data["resourcesDir"]
|
||||
|
||||
# change paths in effects to files
|
||||
for k, effect in effects.items():
|
||||
if "assignTo" in k:
|
||||
continue
|
||||
trn = self.copy_linked_files(effect, resources_dir)
|
||||
if trn:
|
||||
transfers.append((trn[0], trn[1]))
|
||||
|
||||
instance.data["transfers"].extend(transfers)
|
||||
self.log.debug("_ transfers: `{}`".format(
|
||||
instance.data["transfers"]))
|
||||
|
||||
# create representations
|
||||
instance.data["representations"] = list()
|
||||
|
||||
transfer_data = [
|
||||
"handleStart", "handleEnd",
|
||||
"sourceStart", "sourceStartH", "sourceEnd", "sourceEndH",
|
||||
"frameStart", "frameEnd",
|
||||
"clipIn", "clipOut", "clipInH", "clipOutH",
|
||||
"folderPath", "version"
|
||||
]
|
||||
|
||||
# pass data to version
|
||||
version_data = dict()
|
||||
version_data.update({k: instance.data[k] for k in transfer_data})
|
||||
|
||||
# add to data of representation
|
||||
version_data.update({
|
||||
"colorspace": item.sourceMediaColourTransform(),
|
||||
"colorspaceScript": instance.context.data["colorspace"],
|
||||
"families": [product_type, "plate"],
|
||||
# TODO find out if 'subset' is needed (and 'productName')
|
||||
"subset": product_name,
|
||||
"productName": product_name,
|
||||
"fps": instance.context.data["fps"]
|
||||
})
|
||||
instance.data["versionData"] = version_data
|
||||
|
||||
representation = {
|
||||
'files': file,
|
||||
'stagingDir': staging_dir,
|
||||
'name': product_type + ext.title(),
|
||||
'ext': ext
|
||||
}
|
||||
instance.data["representations"].append(representation)
|
||||
|
||||
self.log.debug("_ representations: `{}`".format(
|
||||
instance.data["representations"]))
|
||||
|
||||
self.log.debug("_ version_data: `{}`".format(
|
||||
instance.data["versionData"]))
|
||||
|
||||
with open(os.path.join(staging_dir, file), "w") as outfile:
|
||||
outfile.write(json.dumps(effects, indent=4, sort_keys=True))
|
||||
|
||||
def copy_linked_files(self, effect, dst_dir):
|
||||
for k, v in effect["node"].items():
|
||||
if k in "file" and v != '':
|
||||
base_name = os.path.basename(v)
|
||||
dst = os.path.join(dst_dir, base_name).replace("\\", "/")
|
||||
|
||||
# add it to the json
|
||||
effect["node"][k] = dst
|
||||
return (v, dst)
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.lib import (
|
||||
get_oiio_tool_args,
|
||||
run_subprocess,
|
||||
)
|
||||
from ayon_core.pipeline import publish
|
||||
|
||||
|
||||
class ExtractFrames(publish.Extractor):
|
||||
"""Extracts frames"""
|
||||
|
||||
order = pyblish.api.ExtractorOrder
|
||||
label = "Extract Frames"
|
||||
hosts = ["hiero"]
|
||||
families = ["frame"]
|
||||
movie_extensions = ["mov", "mp4"]
|
||||
|
||||
def process(self, instance):
|
||||
oiio_tool_args = get_oiio_tool_args("oiiotool")
|
||||
staging_dir = self.staging_dir(instance)
|
||||
output_template = os.path.join(staging_dir, instance.data["name"])
|
||||
sequence = instance.context.data["activeTimeline"]
|
||||
|
||||
files = []
|
||||
for frame in instance.data["frames"]:
|
||||
track_item = sequence.trackItemAt(frame)
|
||||
media_source = track_item.source().mediaSource()
|
||||
input_path = media_source.fileinfos()[0].filename()
|
||||
input_frame = (
|
||||
track_item.mapTimelineToSource(frame) +
|
||||
track_item.source().mediaSource().startTime()
|
||||
)
|
||||
output_ext = instance.data["format"]
|
||||
output_path = output_template
|
||||
output_path += ".{:04d}.{}".format(int(frame), output_ext)
|
||||
|
||||
args = list(oiio_tool_args)
|
||||
|
||||
ext = os.path.splitext(input_path)[1][1:]
|
||||
if ext in self.movie_extensions:
|
||||
args.extend(["--subimage", str(int(input_frame))])
|
||||
else:
|
||||
args.extend(["--frames", str(int(input_frame))])
|
||||
|
||||
if ext == "exr":
|
||||
args.extend(["--powc", "0.45,0.45,0.45,1.0"])
|
||||
|
||||
args.extend([input_path, "-o", output_path])
|
||||
output = run_subprocess(args)
|
||||
|
||||
failed_output = "oiiotool produced no output."
|
||||
if failed_output in output:
|
||||
raise ValueError(
|
||||
"oiiotool processing failed. Args: {}".format(args)
|
||||
)
|
||||
|
||||
files.append(output_path)
|
||||
|
||||
# Feedback to user because "oiiotool" can make the publishing
|
||||
# appear unresponsive.
|
||||
self.log.info(
|
||||
"Processed {} of {} frames".format(
|
||||
instance.data["frames"].index(frame) + 1,
|
||||
len(instance.data["frames"])
|
||||
)
|
||||
)
|
||||
|
||||
if len(files) == 1:
|
||||
instance.data["representations"] = [
|
||||
{
|
||||
"name": output_ext,
|
||||
"ext": output_ext,
|
||||
"files": os.path.basename(files[0]),
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
]
|
||||
else:
|
||||
instance.data["representations"] = [
|
||||
{
|
||||
"name": output_ext,
|
||||
"ext": output_ext,
|
||||
"files": [os.path.basename(x) for x in files],
|
||||
"stagingDir": staging_dir
|
||||
}
|
||||
]
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
import os
|
||||
import pyblish.api
|
||||
|
||||
from ayon_core.pipeline import publish
|
||||
|
||||
|
||||
class ExtractThumbnail(publish.Extractor):
|
||||
"""
|
||||
Extractor for track item's tumbnails
|
||||
"""
|
||||
|
||||
label = "Extract Thumbnail"
|
||||
order = pyblish.api.ExtractorOrder
|
||||
families = ["plate", "take"]
|
||||
hosts = ["hiero"]
|
||||
|
||||
def process(self, instance):
|
||||
# create representation data
|
||||
if "representations" not in instance.data:
|
||||
instance.data["representations"] = []
|
||||
|
||||
staging_dir = self.staging_dir(instance)
|
||||
|
||||
self.create_thumbnail(staging_dir, instance)
|
||||
|
||||
def create_thumbnail(self, staging_dir, instance):
|
||||
track_item = instance.data["item"]
|
||||
track_item_name = track_item.name()
|
||||
|
||||
# frames
|
||||
duration = track_item.sourceDuration()
|
||||
frame_start = track_item.sourceIn()
|
||||
self.log.debug(
|
||||
"__ frame_start: `{}`, duration: `{}`".format(
|
||||
frame_start, duration))
|
||||
|
||||
# get thumbnail frame from the middle
|
||||
thumb_frame = int(frame_start + (duration / 2))
|
||||
|
||||
thumb_file = "{}thumbnail{}{}".format(
|
||||
track_item_name, thumb_frame, ".png")
|
||||
thumb_path = os.path.join(staging_dir, thumb_file)
|
||||
|
||||
thumbnail = track_item.thumbnail(thumb_frame, "colour").save(
|
||||
thumb_path,
|
||||
format='png'
|
||||
)
|
||||
self.log.debug(
|
||||
"__ thumb_path: `{}`, frame: `{}`".format(thumbnail, thumb_frame))
|
||||
|
||||
self.log.info("Thumbnail was generated to: {}".format(thumb_path))
|
||||
thumb_representation = {
|
||||
'files': thumb_file,
|
||||
'stagingDir': staging_dir,
|
||||
'name': "thumbnail",
|
||||
'thumbnail': True,
|
||||
'ext': "png"
|
||||
}
|
||||
instance.data["representations"].append(
|
||||
thumb_representation)
|
||||
|
|
@ -0,0 +1,24 @@
|
|||
from pyblish import api
|
||||
|
||||
from ayon_core.lib import version_up
|
||||
|
||||
|
||||
class IntegrateVersionUpWorkfile(api.ContextPlugin):
|
||||
"""Save as new workfile version"""
|
||||
|
||||
order = api.IntegratorOrder + 10.1
|
||||
label = "Version-up Workfile"
|
||||
hosts = ["hiero"]
|
||||
|
||||
optional = True
|
||||
active = True
|
||||
|
||||
def process(self, context):
|
||||
project = context.data["activeProject"]
|
||||
path = context.data.get("currentFile")
|
||||
new_path = version_up(path)
|
||||
|
||||
if project:
|
||||
project.saveAs(new_path)
|
||||
|
||||
self.log.info("Project workfile was versioned up")
|
||||
|
|
@ -0,0 +1,448 @@
|
|||
import pyblish
|
||||
|
||||
from ayon_core.pipeline import AYON_INSTANCE_ID, AVALON_INSTANCE_ID
|
||||
from ayon_core.pipeline.editorial import is_overlapping_otio_ranges
|
||||
|
||||
from ayon_core.hosts.hiero import api as phiero
|
||||
from ayon_core.hosts.hiero.api.otio import hiero_export
|
||||
|
||||
import hiero
|
||||
# # developer reload modules
|
||||
from pprint import pformat
|
||||
|
||||
|
||||
class PrecollectInstances(pyblish.api.ContextPlugin):
|
||||
"""Collect all Track items selection."""
|
||||
|
||||
order = pyblish.api.CollectorOrder - 0.49
|
||||
label = "Precollect Instances"
|
||||
hosts = ["hiero"]
|
||||
|
||||
audio_track_items = []
|
||||
|
||||
def process(self, context):
|
||||
self.otio_timeline = context.data["otioTimeline"]
|
||||
timeline_selection = phiero.get_timeline_selection()
|
||||
selected_timeline_items = phiero.get_track_items(
|
||||
selection=timeline_selection,
|
||||
check_tagged=True,
|
||||
check_enabled=True
|
||||
)
|
||||
|
||||
# only return enabled track items
|
||||
if not selected_timeline_items:
|
||||
selected_timeline_items = phiero.get_track_items(
|
||||
check_enabled=True, check_tagged=True)
|
||||
|
||||
self.log.info(
|
||||
"Processing enabled track items: {}".format(
|
||||
selected_timeline_items))
|
||||
|
||||
# add all tracks subtreck effect items to context
|
||||
all_tracks = hiero.ui.activeSequence().videoTracks()
|
||||
tracks_effect_items = self.collect_sub_track_items(all_tracks)
|
||||
context.data["tracksEffectItems"] = tracks_effect_items
|
||||
|
||||
# process all selected timeline track items
|
||||
for track_item in selected_timeline_items:
|
||||
data = {}
|
||||
clip_name = track_item.name()
|
||||
source_clip = track_item.source()
|
||||
self.log.debug("clip_name: {}".format(clip_name))
|
||||
|
||||
# get openpype tag data
|
||||
tag_data = phiero.get_trackitem_openpype_data(track_item)
|
||||
self.log.debug("__ tag_data: {}".format(pformat(tag_data)))
|
||||
|
||||
if not tag_data:
|
||||
continue
|
||||
|
||||
if tag_data.get("id") not in {
|
||||
AYON_INSTANCE_ID, AVALON_INSTANCE_ID
|
||||
}:
|
||||
continue
|
||||
|
||||
# get clips subtracks and annotations
|
||||
annotations = self.clip_annotations(source_clip)
|
||||
subtracks = self.clip_subtrack(track_item)
|
||||
self.log.debug("Annotations: {}".format(annotations))
|
||||
self.log.debug(">> Subtracks: {}".format(subtracks))
|
||||
|
||||
# solve handles length
|
||||
tag_data["handleStart"] = min(
|
||||
tag_data["handleStart"], int(track_item.handleInLength()))
|
||||
tag_data["handleEnd"] = min(
|
||||
tag_data["handleEnd"], int(track_item.handleOutLength()))
|
||||
|
||||
# add audio to families
|
||||
with_audio = False
|
||||
if tag_data.pop("audio"):
|
||||
with_audio = True
|
||||
|
||||
# add tag data to instance data
|
||||
data.update({
|
||||
k: v for k, v in tag_data.items()
|
||||
if k not in ("id", "applieswhole", "label")
|
||||
})
|
||||
# Backward compatibility fix of 'entity_type' > 'folder_type'
|
||||
if "parents" in data:
|
||||
for parent in data["parents"]:
|
||||
if "entity_type" in parent:
|
||||
parent["folder_type"] = parent.pop("entity_type")
|
||||
|
||||
folder_path, folder_name = self._get_folder_data(tag_data)
|
||||
|
||||
families = [str(f) for f in tag_data["families"]]
|
||||
|
||||
# TODO: remove backward compatibility
|
||||
product_name = tag_data.get("productName")
|
||||
if product_name is None:
|
||||
# backward compatibility: subset -> productName
|
||||
product_name = tag_data.get("subset")
|
||||
|
||||
# backward compatibility: product_name should not be missing
|
||||
if not product_name:
|
||||
self.log.error(
|
||||
"Product name is not defined for: {}".format(folder_path))
|
||||
|
||||
# TODO: remove backward compatibility
|
||||
product_type = tag_data.get("productType")
|
||||
if product_type is None:
|
||||
# backward compatibility: family -> productType
|
||||
product_type = tag_data.get("family")
|
||||
|
||||
# backward compatibility: product_type should not be missing
|
||||
if not product_type:
|
||||
self.log.error(
|
||||
"Product type is not defined for: {}".format(folder_path))
|
||||
|
||||
# form label
|
||||
label = "{} -".format(folder_path)
|
||||
if folder_name != clip_name:
|
||||
label += " ({})".format(clip_name)
|
||||
label += " {}".format(product_name)
|
||||
|
||||
data.update({
|
||||
"name": "{}_{}".format(folder_path, product_name),
|
||||
"label": label,
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
"folderPath": folder_path,
|
||||
"asset_name": folder_name,
|
||||
"item": track_item,
|
||||
"families": families,
|
||||
"publish": tag_data["publish"],
|
||||
"fps": context.data["fps"],
|
||||
|
||||
# clip's effect
|
||||
"clipEffectItems": subtracks,
|
||||
"clipAnnotations": annotations,
|
||||
|
||||
# add all additional tags
|
||||
"tags": phiero.get_track_item_tags(track_item),
|
||||
"newAssetPublishing": True
|
||||
})
|
||||
|
||||
# otio clip data
|
||||
otio_data = self.get_otio_clip_instance_data(track_item) or {}
|
||||
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
|
||||
data.update(otio_data)
|
||||
self.log.debug("__ data: {}".format(pformat(data)))
|
||||
|
||||
# add resolution
|
||||
self.get_resolution_to_data(data, context)
|
||||
|
||||
# create instance
|
||||
instance = context.create_instance(**data)
|
||||
|
||||
# add colorspace data
|
||||
instance.data.update({
|
||||
"versionData": {
|
||||
"colorspace": track_item.sourceMediaColourTransform(),
|
||||
}
|
||||
})
|
||||
|
||||
# create shot instance for shot attributes create/update
|
||||
self.create_shot_instance(context, **data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.info(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
if not with_audio:
|
||||
continue
|
||||
|
||||
# create audio product instance
|
||||
self.create_audio_instance(context, **data)
|
||||
|
||||
# add audioReview attribute to plate instance data
|
||||
# if reviewTrack is on
|
||||
if tag_data.get("reviewTrack") is not None:
|
||||
instance.data["reviewAudio"] = True
|
||||
|
||||
def get_resolution_to_data(self, data, context):
|
||||
assert data.get("otioClip"), "Missing `otioClip` data"
|
||||
|
||||
# solve source resolution option
|
||||
if data.get("sourceResolution", None):
|
||||
otio_clip_metadata = data[
|
||||
"otioClip"].media_reference.metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_clip_metadata[
|
||||
"openpype.source.width"],
|
||||
"resolutionHeight": otio_clip_metadata[
|
||||
"openpype.source.height"],
|
||||
"pixelAspect": otio_clip_metadata[
|
||||
"openpype.source.pixelAspect"]
|
||||
})
|
||||
else:
|
||||
otio_tl_metadata = context.data["otioTimeline"].metadata
|
||||
data.update({
|
||||
"resolutionWidth": otio_tl_metadata["openpype.timeline.width"],
|
||||
"resolutionHeight": otio_tl_metadata[
|
||||
"openpype.timeline.height"],
|
||||
"pixelAspect": otio_tl_metadata[
|
||||
"openpype.timeline.pixelAspect"]
|
||||
})
|
||||
|
||||
def create_shot_instance(self, context, **data):
|
||||
product_name = "shotMain"
|
||||
master_layer = data.get("heroTrack")
|
||||
hierarchy_data = data.get("hierarchyData")
|
||||
item = data.get("item")
|
||||
clip_name = item.name()
|
||||
|
||||
if not master_layer:
|
||||
return
|
||||
|
||||
if not hierarchy_data:
|
||||
return
|
||||
|
||||
folder_path = data["folderPath"]
|
||||
folder_name = data["asset_name"]
|
||||
|
||||
product_type = "shot"
|
||||
|
||||
# form label
|
||||
label = "{} -".format(folder_path)
|
||||
if folder_name != clip_name:
|
||||
label += " ({}) ".format(clip_name)
|
||||
label += " {}".format(product_name)
|
||||
|
||||
data.update({
|
||||
"name": "{}_{}".format(folder_path, product_name),
|
||||
"label": label,
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type]
|
||||
})
|
||||
|
||||
instance = context.create_instance(**data)
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
def _get_folder_data(self, data):
|
||||
folder_path = data.pop("folderPath", None)
|
||||
|
||||
if data.get("asset_name"):
|
||||
folder_name = data["asset_name"]
|
||||
else:
|
||||
folder_name = data["asset"]
|
||||
|
||||
# backward compatibility for clip tags
|
||||
# which are missing folderPath key
|
||||
# TODO remove this in future versions
|
||||
if not folder_path:
|
||||
hierarchy_path = data["hierarchy"]
|
||||
folder_path = "/{}/{}".format(
|
||||
hierarchy_path,
|
||||
folder_name
|
||||
)
|
||||
|
||||
return folder_path, folder_name
|
||||
|
||||
def create_audio_instance(self, context, **data):
|
||||
product_name = "audioMain"
|
||||
master_layer = data.get("heroTrack")
|
||||
|
||||
if not master_layer:
|
||||
return
|
||||
|
||||
item = data.get("item")
|
||||
clip_name = item.name()
|
||||
|
||||
# test if any audio clips
|
||||
if not self.test_any_audio(item):
|
||||
return
|
||||
|
||||
folder_path = data["folderPath"]
|
||||
asset_name = data["asset_name"]
|
||||
|
||||
product_type = "audio"
|
||||
|
||||
# form label
|
||||
label = "{} -".format(folder_path)
|
||||
if asset_name != clip_name:
|
||||
label += " ({}) ".format(clip_name)
|
||||
label += " {}".format(product_name)
|
||||
|
||||
data.update({
|
||||
"name": "{}_{}".format(folder_path, product_name),
|
||||
"label": label,
|
||||
"productName": product_name,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type, "clip"]
|
||||
})
|
||||
# remove review track attr if any
|
||||
data.pop("reviewTrack")
|
||||
|
||||
# create instance
|
||||
instance = context.create_instance(**data)
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug(
|
||||
"_ instance.data: {}".format(pformat(instance.data)))
|
||||
|
||||
def test_any_audio(self, track_item):
|
||||
# collect all audio tracks to class variable
|
||||
if not self.audio_track_items:
|
||||
for otio_clip in self.otio_timeline.each_clip():
|
||||
if otio_clip.parent().kind != "Audio":
|
||||
continue
|
||||
self.audio_track_items.append(otio_clip)
|
||||
|
||||
# get track item timeline range
|
||||
timeline_range = self.create_otio_time_range_from_timeline_item_data(
|
||||
track_item)
|
||||
|
||||
# loop through audio track items and search for overlapping clip
|
||||
for otio_audio in self.audio_track_items:
|
||||
parent_range = otio_audio.range_in_parent()
|
||||
|
||||
# if any overaling clip found then return True
|
||||
if is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=False):
|
||||
return True
|
||||
|
||||
def get_otio_clip_instance_data(self, track_item):
|
||||
"""
|
||||
Return otio objects for timeline, track and clip
|
||||
|
||||
Args:
|
||||
timeline_item_data (dict): timeline_item_data from list returned by
|
||||
resolve.get_current_timeline_items()
|
||||
otio_timeline (otio.schema.Timeline): otio object
|
||||
|
||||
Returns:
|
||||
dict: otio clip object
|
||||
|
||||
"""
|
||||
ti_track_name = track_item.parent().name()
|
||||
timeline_range = self.create_otio_time_range_from_timeline_item_data(
|
||||
track_item)
|
||||
for otio_clip in self.otio_timeline.each_clip():
|
||||
track_name = otio_clip.parent().name
|
||||
parent_range = otio_clip.range_in_parent()
|
||||
if ti_track_name != track_name:
|
||||
continue
|
||||
if otio_clip.name != track_item.name():
|
||||
continue
|
||||
self.log.debug("__ parent_range: {}".format(parent_range))
|
||||
self.log.debug("__ timeline_range: {}".format(timeline_range))
|
||||
if is_overlapping_otio_ranges(
|
||||
parent_range, timeline_range, strict=True):
|
||||
|
||||
# add pypedata marker to otio_clip metadata
|
||||
for marker in otio_clip.markers:
|
||||
if phiero.OPENPYPE_TAG_NAME in marker.name:
|
||||
otio_clip.metadata.update(marker.metadata)
|
||||
return {"otioClip": otio_clip}
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def create_otio_time_range_from_timeline_item_data(track_item):
|
||||
timeline = phiero.get_current_sequence()
|
||||
frame_start = int(track_item.timelineIn())
|
||||
frame_duration = int(track_item.duration())
|
||||
fps = timeline.framerate().toFloat()
|
||||
|
||||
return hiero_export.create_otio_time_range(
|
||||
frame_start, frame_duration, fps)
|
||||
|
||||
def collect_sub_track_items(self, tracks):
|
||||
"""
|
||||
Returns dictionary with track index as key and list of subtracks
|
||||
"""
|
||||
# collect all subtrack items
|
||||
sub_track_items = {}
|
||||
for track in tracks:
|
||||
effect_items = track.subTrackItems()
|
||||
|
||||
# skip if no clips on track > need track with effect only
|
||||
if not effect_items:
|
||||
continue
|
||||
|
||||
# skip all disabled tracks
|
||||
if not track.isEnabled():
|
||||
continue
|
||||
|
||||
track_index = track.trackIndex()
|
||||
_sub_track_items = phiero.flatten(effect_items)
|
||||
|
||||
_sub_track_items = list(_sub_track_items)
|
||||
# continue only if any subtrack items are collected
|
||||
if not _sub_track_items:
|
||||
continue
|
||||
|
||||
enabled_sti = []
|
||||
# loop all found subtrack items and check if they are enabled
|
||||
for _sti in _sub_track_items:
|
||||
# checking if not enabled
|
||||
if not _sti.isEnabled():
|
||||
continue
|
||||
if isinstance(_sti, hiero.core.Annotation):
|
||||
continue
|
||||
# collect the subtrack item
|
||||
enabled_sti.append(_sti)
|
||||
|
||||
# continue only if any subtrack items are collected
|
||||
if not enabled_sti:
|
||||
continue
|
||||
|
||||
# add collection of subtrackitems to dict
|
||||
sub_track_items[track_index] = enabled_sti
|
||||
|
||||
return sub_track_items
|
||||
|
||||
@staticmethod
|
||||
def clip_annotations(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.Annotation
|
||||
"""
|
||||
annotations = []
|
||||
subTrackItems = phiero.flatten(clip.subTrackItems())
|
||||
annotations += [item for item in subTrackItems if isinstance(
|
||||
item, hiero.core.Annotation)]
|
||||
return annotations
|
||||
|
||||
@staticmethod
|
||||
def clip_subtrack(clip):
|
||||
"""
|
||||
Returns list of Clip's hiero.core.SubTrackItem
|
||||
"""
|
||||
subtracks = []
|
||||
subTrackItems = phiero.flatten(clip.parent().subTrackItems())
|
||||
for item in subTrackItems:
|
||||
if "TimeWarp" in item.name():
|
||||
continue
|
||||
# avoid all annotation
|
||||
if isinstance(item, hiero.core.Annotation):
|
||||
continue
|
||||
# avoid all disabled
|
||||
if not item.isEnabled():
|
||||
continue
|
||||
subtracks.append(item)
|
||||
return subtracks
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
import os
|
||||
import tempfile
|
||||
from pprint import pformat
|
||||
|
||||
import pyblish.api
|
||||
from qtpy.QtGui import QPixmap
|
||||
|
||||
import hiero.ui
|
||||
|
||||
from ayon_core.hosts.hiero.api.otio import hiero_export
|
||||
|
||||
|
||||
class PrecollectWorkfile(pyblish.api.ContextPlugin):
|
||||
"""Inject the current working file into context"""
|
||||
|
||||
label = "Precollect Workfile"
|
||||
order = pyblish.api.CollectorOrder - 0.491
|
||||
|
||||
def process(self, context):
|
||||
folder_path = context.data["folderPath"]
|
||||
folder_name = folder_path.split("/")[-1]
|
||||
|
||||
active_timeline = hiero.ui.activeSequence()
|
||||
project = active_timeline.project()
|
||||
fps = active_timeline.framerate().toFloat()
|
||||
|
||||
# adding otio timeline to context
|
||||
otio_timeline = hiero_export.create_otio_timeline()
|
||||
|
||||
# get workfile thumbnail paths
|
||||
tmp_staging = tempfile.mkdtemp(prefix="pyblish_tmp_")
|
||||
thumbnail_name = "workfile_thumbnail.png"
|
||||
thumbnail_path = os.path.join(tmp_staging, thumbnail_name)
|
||||
|
||||
# search for all windows with name of actual sequence
|
||||
_windows = [w for w in hiero.ui.windowManager().windows()
|
||||
if active_timeline.name() in w.windowTitle()]
|
||||
|
||||
# export window to thumb path
|
||||
QPixmap.grabWidget(_windows[-1]).save(thumbnail_path, 'png')
|
||||
|
||||
# thumbnail
|
||||
thumb_representation = {
|
||||
'files': thumbnail_name,
|
||||
'stagingDir': tmp_staging,
|
||||
'name': "thumbnail",
|
||||
'thumbnail': True,
|
||||
'ext': "png"
|
||||
}
|
||||
|
||||
# get workfile paths
|
||||
current_file = project.path()
|
||||
staging_dir, base_name = os.path.split(current_file)
|
||||
|
||||
# creating workfile representation
|
||||
workfile_representation = {
|
||||
'name': 'hrox',
|
||||
'ext': 'hrox',
|
||||
'files': base_name,
|
||||
"stagingDir": staging_dir,
|
||||
}
|
||||
product_type = "workfile"
|
||||
instance_data = {
|
||||
"label": "{} - {}Main".format(
|
||||
folder_path, product_type),
|
||||
"name": "{}_{}".format(folder_name, product_type),
|
||||
"folderPath": folder_path,
|
||||
# TODO use 'get_product_name'
|
||||
"productName": "{}{}Main".format(
|
||||
folder_name, product_type.capitalize()
|
||||
),
|
||||
"item": project,
|
||||
"productType": product_type,
|
||||
"family": product_type,
|
||||
"families": [product_type],
|
||||
"representations": [workfile_representation, thumb_representation]
|
||||
}
|
||||
|
||||
# create instance with workfile
|
||||
instance = context.create_instance(**instance_data)
|
||||
|
||||
# update context with main project attributes
|
||||
context_data = {
|
||||
"activeProject": project,
|
||||
"activeTimeline": active_timeline,
|
||||
"otioTimeline": otio_timeline,
|
||||
"currentFile": current_file,
|
||||
"colorspace": self.get_colorspace(project),
|
||||
"fps": fps
|
||||
}
|
||||
self.log.debug("__ context_data: {}".format(pformat(context_data)))
|
||||
context.data.update(context_data)
|
||||
|
||||
self.log.info("Creating instance: {}".format(instance))
|
||||
self.log.debug("__ instance.data: {}".format(pformat(instance.data)))
|
||||
self.log.debug("__ context_data: {}".format(pformat(context_data)))
|
||||
|
||||
def get_colorspace(self, project):
|
||||
# get workfile's colorspace properties
|
||||
return {
|
||||
"useOCIOEnvironmentOverride": project.useOCIOEnvironmentOverride(),
|
||||
"lutSetting16Bit": project.lutSetting16Bit(),
|
||||
"lutSetting8Bit": project.lutSetting8Bit(),
|
||||
"lutSettingFloat": project.lutSettingFloat(),
|
||||
"lutSettingLog": project.lutSettingLog(),
|
||||
"lutSettingViewer": project.lutSettingViewer(),
|
||||
"lutSettingWorkingSpace": project.lutSettingWorkingSpace(),
|
||||
"lutUseOCIOForExport": project.lutUseOCIOForExport(),
|
||||
"ocioConfigName": project.ocioConfigName(),
|
||||
"ocioConfigPath": project.ocioConfigPath()
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
from pyblish import api
|
||||
|
||||
|
||||
class CollectClipTagComments(api.InstancePlugin):
|
||||
"""Collect comments from tags on selected track items and their sources."""
|
||||
|
||||
order = api.CollectorOrder + 0.013
|
||||
label = "Collect Comments"
|
||||
hosts = ["hiero"]
|
||||
families = ["clip"]
|
||||
|
||||
def process(self, instance):
|
||||
# Collect comments.
|
||||
instance.data["comments"] = []
|
||||
|
||||
# Exclude non-tagged instances.
|
||||
for tag in instance.data["tags"]:
|
||||
if tag["name"].lower() == "comment":
|
||||
instance.data["comments"].append(
|
||||
tag["metadata"]["tag.note"]
|
||||
)
|
||||
|
||||
# Find tags on the source clip.
|
||||
tags = instance.data["item"].source().tags()
|
||||
for tag in tags:
|
||||
if tag.name().lower() == "comment":
|
||||
instance.data["comments"].append(
|
||||
tag.metadata().dict()["tag.note"]
|
||||
)
|
||||
|
||||
# Update label with comments counter.
|
||||
instance.data["label"] = "{} - comments:{}".format(
|
||||
instance.data["label"],
|
||||
len(instance.data["comments"])
|
||||
)
|
||||
|
|
@ -0,0 +1,167 @@
|
|||
from pyblish import api
|
||||
import hiero
|
||||
import math
|
||||
from ayon_core.hosts.hiero.api.otio.hiero_export import create_otio_time_range
|
||||
|
||||
class PrecollectRetime(api.InstancePlugin):
|
||||
"""Calculate Retiming of selected track items."""
|
||||
|
||||
order = api.CollectorOrder - 0.578
|
||||
label = "Precollect Retime"
|
||||
hosts = ["hiero"]
|
||||
families = ['retime_']
|
||||
|
||||
def process(self, instance):
|
||||
if not instance.data.get("versionData"):
|
||||
instance.data["versionData"] = {}
|
||||
|
||||
# get basic variables
|
||||
otio_clip = instance.data["otioClip"]
|
||||
|
||||
source_range = otio_clip.source_range
|
||||
oc_source_fps = source_range.start_time.rate
|
||||
oc_source_in = source_range.start_time.value
|
||||
|
||||
handle_start = instance.data["handleStart"]
|
||||
handle_end = instance.data["handleEnd"]
|
||||
frame_start = instance.data["frameStart"]
|
||||
|
||||
track_item = instance.data["item"]
|
||||
|
||||
# define basic clip frame range variables
|
||||
timeline_in = int(track_item.timelineIn())
|
||||
timeline_out = int(track_item.timelineOut())
|
||||
source_in = int(track_item.sourceIn())
|
||||
source_out = int(track_item.sourceOut())
|
||||
speed = track_item.playbackSpeed()
|
||||
|
||||
self.log.debug((
|
||||
"_BEFORE: \n timeline_in: `{0}`,\n timeline_out: `{1}`, \n "
|
||||
"source_in: `{2}`,\n source_out: `{3}`,\n speed: `{4}`,\n "
|
||||
"handle_start: `{5}`,\n handle_end: `{6}`").format(
|
||||
timeline_in,
|
||||
timeline_out,
|
||||
source_in,
|
||||
source_out,
|
||||
speed,
|
||||
handle_start,
|
||||
handle_end
|
||||
))
|
||||
|
||||
# loop within subtrack items
|
||||
time_warp_nodes = []
|
||||
source_in_change = 0
|
||||
source_out_change = 0
|
||||
for s_track_item in track_item.linkedItems():
|
||||
if isinstance(s_track_item, hiero.core.EffectTrackItem) \
|
||||
and "TimeWarp" in s_track_item.node().Class():
|
||||
|
||||
# adding timewarp attribute to instance
|
||||
time_warp_nodes = []
|
||||
|
||||
# ignore item if not enabled
|
||||
if s_track_item.isEnabled():
|
||||
node = s_track_item.node()
|
||||
name = node["name"].value()
|
||||
look_up = node["lookup"].value()
|
||||
animated = node["lookup"].isAnimated()
|
||||
if animated:
|
||||
look_up = [
|
||||
((node["lookup"].getValueAt(i)) - i)
|
||||
for i in range(
|
||||
(timeline_in - handle_start),
|
||||
(timeline_out + handle_end) + 1)
|
||||
]
|
||||
# calculate difference
|
||||
diff_in = (node["lookup"].getValueAt(
|
||||
timeline_in)) - timeline_in
|
||||
diff_out = (node["lookup"].getValueAt(
|
||||
timeline_out)) - timeline_out
|
||||
|
||||
# calculate source
|
||||
source_in_change += diff_in
|
||||
source_out_change += diff_out
|
||||
|
||||
# calculate speed
|
||||
speed_in = (node["lookup"].getValueAt(timeline_in) / (
|
||||
float(timeline_in) * .01)) * .01
|
||||
speed_out = (node["lookup"].getValueAt(timeline_out) / (
|
||||
float(timeline_out) * .01)) * .01
|
||||
|
||||
# calculate handles
|
||||
handle_start = int(
|
||||
math.ceil(
|
||||
(handle_start * speed_in * 1000) / 1000.0)
|
||||
)
|
||||
|
||||
handle_end = int(
|
||||
math.ceil(
|
||||
(handle_end * speed_out * 1000) / 1000.0)
|
||||
)
|
||||
self.log.debug(
|
||||
("diff_in, diff_out", diff_in, diff_out))
|
||||
self.log.debug(
|
||||
("source_in_change, source_out_change",
|
||||
source_in_change, source_out_change))
|
||||
|
||||
time_warp_nodes.append({
|
||||
"Class": "TimeWarp",
|
||||
"name": name,
|
||||
"lookup": look_up
|
||||
})
|
||||
|
||||
self.log.debug(
|
||||
"timewarp source in changes: in {}, out {}".format(
|
||||
source_in_change, source_out_change))
|
||||
|
||||
# recalculate handles by the speed
|
||||
handle_start *= speed
|
||||
handle_end *= speed
|
||||
self.log.debug("speed: handle_start: '{0}', handle_end: '{1}'".format(
|
||||
handle_start, handle_end))
|
||||
|
||||
# recalculate source with timewarp and by the speed
|
||||
source_in += int(source_in_change)
|
||||
source_out += int(source_out_change * speed)
|
||||
|
||||
source_in_h = int(source_in - math.ceil(
|
||||
(handle_start * 1000) / 1000.0))
|
||||
source_out_h = int(source_out + math.ceil(
|
||||
(handle_end * 1000) / 1000.0))
|
||||
|
||||
self.log.debug(
|
||||
"retimed: source_in_h: '{0}', source_out_h: '{1}'".format(
|
||||
source_in_h, source_out_h))
|
||||
|
||||
# add all data to Instance
|
||||
instance.data["handleStart"] = handle_start
|
||||
instance.data["handleEnd"] = handle_end
|
||||
instance.data["sourceIn"] = source_in
|
||||
instance.data["sourceOut"] = source_out
|
||||
instance.data["sourceInH"] = source_in_h
|
||||
instance.data["sourceOutH"] = source_out_h
|
||||
instance.data["speed"] = speed
|
||||
|
||||
source_handle_start = source_in_h - source_in
|
||||
# frame_start = instance.data["frameStart"] + source_handle_start
|
||||
duration = source_out_h - source_in_h
|
||||
frame_end = int(frame_start + duration - (handle_start + handle_end))
|
||||
|
||||
instance.data["versionData"].update({
|
||||
"retime": True,
|
||||
"speed": speed,
|
||||
"timewarps": time_warp_nodes,
|
||||
"frameStart": frame_start,
|
||||
"frameEnd": frame_end,
|
||||
"handleStart": abs(source_handle_start),
|
||||
"handleEnd": source_out_h - source_out
|
||||
})
|
||||
self.log.debug("versionData: {}".format(instance.data["versionData"]))
|
||||
self.log.debug("sourceIn: {}".format(instance.data["sourceIn"]))
|
||||
self.log.debug("sourceOut: {}".format(instance.data["sourceOut"]))
|
||||
self.log.debug("speed: {}".format(instance.data["speed"]))
|
||||
|
||||
# change otio clip data
|
||||
instance.data["otioClip"].source_range = create_otio_time_range(
|
||||
oc_source_in, (source_out - source_in + 1), oc_source_fps)
|
||||
self.log.debug("otioClip: {}".format(instance.data["otioClip"]))
|
||||
33
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/__init__.py
vendored
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# https://developers.google.com/protocol-buffers/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
# Copyright 2007 Google Inc. All Rights Reserved.
|
||||
|
||||
__version__ = '3.20.1'
|
||||
26
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/any_pb2.py
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: google/protobuf/any.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"&\n\x03\x41ny\x12\x10\n\x08type_url\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x0c\x42v\n\x13\x63om.google.protobufB\x08\x41nyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.any_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\010AnyProtoP\001Z,google.golang.org/protobuf/types/known/anypb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'
|
||||
_ANY._serialized_start=46
|
||||
_ANY._serialized_end=84
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
32
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/api_pb2.py
vendored
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: google/protobuf/api.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
from google.protobuf import source_context_pb2 as google_dot_protobuf_dot_source__context__pb2
|
||||
from google.protobuf import type_pb2 as google_dot_protobuf_dot_type__pb2
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19google/protobuf/api.proto\x12\x0fgoogle.protobuf\x1a$google/protobuf/source_context.proto\x1a\x1agoogle/protobuf/type.proto\"\x81\x02\n\x03\x41pi\x12\x0c\n\x04name\x18\x01 \x01(\t\x12(\n\x07methods\x18\x02 \x03(\x0b\x32\x17.google.protobuf.Method\x12(\n\x07options\x18\x03 \x03(\x0b\x32\x17.google.protobuf.Option\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x36\n\x0esource_context\x18\x05 \x01(\x0b\x32\x1e.google.protobuf.SourceContext\x12&\n\x06mixins\x18\x06 \x03(\x0b\x32\x16.google.protobuf.Mixin\x12\'\n\x06syntax\x18\x07 \x01(\x0e\x32\x17.google.protobuf.Syntax\"\xd5\x01\n\x06Method\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x18\n\x10request_type_url\x18\x02 \x01(\t\x12\x19\n\x11request_streaming\x18\x03 \x01(\x08\x12\x19\n\x11response_type_url\x18\x04 \x01(\t\x12\x1a\n\x12response_streaming\x18\x05 \x01(\x08\x12(\n\x07options\x18\x06 \x03(\x0b\x32\x17.google.protobuf.Option\x12\'\n\x06syntax\x18\x07 \x01(\x0e\x32\x17.google.protobuf.Syntax\"#\n\x05Mixin\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04root\x18\x02 \x01(\tBv\n\x13\x63om.google.protobufB\x08\x41piProtoP\x01Z,google.golang.org/protobuf/types/known/apipb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.api_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\010ApiProtoP\001Z,google.golang.org/protobuf/types/known/apipb\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'
|
||||
_API._serialized_start=113
|
||||
_API._serialized_end=370
|
||||
_METHOD._serialized_start=373
|
||||
_METHOD._serialized_end=586
|
||||
_MIXIN._serialized_start=588
|
||||
_MIXIN._serialized_end=623
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
0
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/compiler/__init__.py
vendored
Normal file
35
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/compiler/plugin_pb2.py
vendored
Normal file
|
|
@ -0,0 +1,35 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: google/protobuf/compiler/plugin.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n%google/protobuf/compiler/plugin.proto\x12\x18google.protobuf.compiler\x1a google/protobuf/descriptor.proto\"F\n\x07Version\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\x12\r\n\x05patch\x18\x03 \x01(\x05\x12\x0e\n\x06suffix\x18\x04 \x01(\t\"\xba\x01\n\x14\x43odeGeneratorRequest\x12\x18\n\x10\x66ile_to_generate\x18\x01 \x03(\t\x12\x11\n\tparameter\x18\x02 \x01(\t\x12\x38\n\nproto_file\x18\x0f \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\x12;\n\x10\x63ompiler_version\x18\x03 \x01(\x0b\x32!.google.protobuf.compiler.Version\"\xc1\x02\n\x15\x43odeGeneratorResponse\x12\r\n\x05\x65rror\x18\x01 \x01(\t\x12\x1a\n\x12supported_features\x18\x02 \x01(\x04\x12\x42\n\x04\x66ile\x18\x0f \x03(\x0b\x32\x34.google.protobuf.compiler.CodeGeneratorResponse.File\x1a\x7f\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x17\n\x0finsertion_point\x18\x02 \x01(\t\x12\x0f\n\x07\x63ontent\x18\x0f \x01(\t\x12?\n\x13generated_code_info\x18\x10 \x01(\x0b\x32\".google.protobuf.GeneratedCodeInfo\"8\n\x07\x46\x65\x61ture\x12\x10\n\x0c\x46\x45\x41TURE_NONE\x10\x00\x12\x1b\n\x17\x46\x45\x41TURE_PROTO3_OPTIONAL\x10\x01\x42W\n\x1c\x63om.google.protobuf.compilerB\x0cPluginProtosZ)google.golang.org/protobuf/types/pluginpb')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.compiler.plugin_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
DESCRIPTOR._serialized_options = b'\n\034com.google.protobuf.compilerB\014PluginProtosZ)google.golang.org/protobuf/types/pluginpb'
|
||||
_VERSION._serialized_start=101
|
||||
_VERSION._serialized_end=171
|
||||
_CODEGENERATORREQUEST._serialized_start=174
|
||||
_CODEGENERATORREQUEST._serialized_end=360
|
||||
_CODEGENERATORRESPONSE._serialized_start=363
|
||||
_CODEGENERATORRESPONSE._serialized_end=684
|
||||
_CODEGENERATORRESPONSE_FILE._serialized_start=499
|
||||
_CODEGENERATORRESPONSE_FILE._serialized_end=626
|
||||
_CODEGENERATORRESPONSE_FEATURE._serialized_start=628
|
||||
_CODEGENERATORRESPONSE_FEATURE._serialized_end=684
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
1224
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/descriptor.py
vendored
Normal file
177
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/descriptor_database.py
vendored
Normal file
|
|
@ -0,0 +1,177 @@
|
|||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# https://developers.google.com/protocol-buffers/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Provides a container for DescriptorProtos."""
|
||||
|
||||
__author__ = 'matthewtoia@google.com (Matt Toia)'
|
||||
|
||||
import warnings
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DescriptorDatabaseConflictingDefinitionError(Error):
|
||||
"""Raised when a proto is added with the same name & different descriptor."""
|
||||
|
||||
|
||||
class DescriptorDatabase(object):
|
||||
"""A container accepting FileDescriptorProtos and maps DescriptorProtos."""
|
||||
|
||||
def __init__(self):
|
||||
self._file_desc_protos_by_file = {}
|
||||
self._file_desc_protos_by_symbol = {}
|
||||
|
||||
def Add(self, file_desc_proto):
|
||||
"""Adds the FileDescriptorProto and its types to this database.
|
||||
|
||||
Args:
|
||||
file_desc_proto: The FileDescriptorProto to add.
|
||||
Raises:
|
||||
DescriptorDatabaseConflictingDefinitionError: if an attempt is made to
|
||||
add a proto with the same name but different definition than an
|
||||
existing proto in the database.
|
||||
"""
|
||||
proto_name = file_desc_proto.name
|
||||
if proto_name not in self._file_desc_protos_by_file:
|
||||
self._file_desc_protos_by_file[proto_name] = file_desc_proto
|
||||
elif self._file_desc_protos_by_file[proto_name] != file_desc_proto:
|
||||
raise DescriptorDatabaseConflictingDefinitionError(
|
||||
'%s already added, but with different descriptor.' % proto_name)
|
||||
else:
|
||||
return
|
||||
|
||||
# Add all the top-level descriptors to the index.
|
||||
package = file_desc_proto.package
|
||||
for message in file_desc_proto.message_type:
|
||||
for name in _ExtractSymbols(message, package):
|
||||
self._AddSymbol(name, file_desc_proto)
|
||||
for enum in file_desc_proto.enum_type:
|
||||
self._AddSymbol(('.'.join((package, enum.name))), file_desc_proto)
|
||||
for enum_value in enum.value:
|
||||
self._file_desc_protos_by_symbol[
|
||||
'.'.join((package, enum_value.name))] = file_desc_proto
|
||||
for extension in file_desc_proto.extension:
|
||||
self._AddSymbol(('.'.join((package, extension.name))), file_desc_proto)
|
||||
for service in file_desc_proto.service:
|
||||
self._AddSymbol(('.'.join((package, service.name))), file_desc_proto)
|
||||
|
||||
def FindFileByName(self, name):
|
||||
"""Finds the file descriptor proto by file name.
|
||||
|
||||
Typically the file name is a relative path ending to a .proto file. The
|
||||
proto with the given name will have to have been added to this database
|
||||
using the Add method or else an error will be raised.
|
||||
|
||||
Args:
|
||||
name: The file name to find.
|
||||
|
||||
Returns:
|
||||
The file descriptor proto matching the name.
|
||||
|
||||
Raises:
|
||||
KeyError if no file by the given name was added.
|
||||
"""
|
||||
|
||||
return self._file_desc_protos_by_file[name]
|
||||
|
||||
def FindFileContainingSymbol(self, symbol):
|
||||
"""Finds the file descriptor proto containing the specified symbol.
|
||||
|
||||
The symbol should be a fully qualified name including the file descriptor's
|
||||
package and any containing messages. Some examples:
|
||||
|
||||
'some.package.name.Message'
|
||||
'some.package.name.Message.NestedEnum'
|
||||
'some.package.name.Message.some_field'
|
||||
|
||||
The file descriptor proto containing the specified symbol must be added to
|
||||
this database using the Add method or else an error will be raised.
|
||||
|
||||
Args:
|
||||
symbol: The fully qualified symbol name.
|
||||
|
||||
Returns:
|
||||
The file descriptor proto containing the symbol.
|
||||
|
||||
Raises:
|
||||
KeyError if no file contains the specified symbol.
|
||||
"""
|
||||
try:
|
||||
return self._file_desc_protos_by_symbol[symbol]
|
||||
except KeyError:
|
||||
# Fields, enum values, and nested extensions are not in
|
||||
# _file_desc_protos_by_symbol. Try to find the top level
|
||||
# descriptor. Non-existent nested symbol under a valid top level
|
||||
# descriptor can also be found. The behavior is the same with
|
||||
# protobuf C++.
|
||||
top_level, _, _ = symbol.rpartition('.')
|
||||
try:
|
||||
return self._file_desc_protos_by_symbol[top_level]
|
||||
except KeyError:
|
||||
# Raise the original symbol as a KeyError for better diagnostics.
|
||||
raise KeyError(symbol)
|
||||
|
||||
def FindFileContainingExtension(self, extendee_name, extension_number):
|
||||
# TODO(jieluo): implement this API.
|
||||
return None
|
||||
|
||||
def FindAllExtensionNumbers(self, extendee_name):
|
||||
# TODO(jieluo): implement this API.
|
||||
return []
|
||||
|
||||
def _AddSymbol(self, name, file_desc_proto):
|
||||
if name in self._file_desc_protos_by_symbol:
|
||||
warn_msg = ('Conflict register for file "' + file_desc_proto.name +
|
||||
'": ' + name +
|
||||
' is already defined in file "' +
|
||||
self._file_desc_protos_by_symbol[name].name + '"')
|
||||
warnings.warn(warn_msg, RuntimeWarning)
|
||||
self._file_desc_protos_by_symbol[name] = file_desc_proto
|
||||
|
||||
|
||||
def _ExtractSymbols(desc_proto, package):
|
||||
"""Pulls out all the symbols from a descriptor proto.
|
||||
|
||||
Args:
|
||||
desc_proto: The proto to extract symbols from.
|
||||
package: The package containing the descriptor type.
|
||||
|
||||
Yields:
|
||||
The fully qualified name found in the descriptor.
|
||||
"""
|
||||
message_name = package + '.' + desc_proto.name if package else desc_proto.name
|
||||
yield message_name
|
||||
for nested_type in desc_proto.nested_type:
|
||||
for symbol in _ExtractSymbols(nested_type, message_name):
|
||||
yield symbol
|
||||
for enum_type in desc_proto.enum_type:
|
||||
yield '.'.join((message_name, enum_type.name))
|
||||
1925
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/descriptor_pb2.py
vendored
Normal file
1295
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/descriptor_pool.py
vendored
Normal file
26
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/duration_pb2.py
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: google/protobuf/duration.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\"*\n\x08\x44uration\x12\x0f\n\x07seconds\x18\x01 \x01(\x03\x12\r\n\x05nanos\x18\x02 \x01(\x05\x42\x83\x01\n\x13\x63om.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.duration_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\rDurationProtoP\001Z1google.golang.org/protobuf/types/known/durationpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'
|
||||
_DURATION._serialized_start=51
|
||||
_DURATION._serialized_end=93
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
26
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/empty_pb2.py
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: google/protobuf/empty.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\x07\n\x05\x45mptyB}\n\x13\x63om.google.protobufB\nEmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.empty_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\nEmptyProtoP\001Z.google.golang.org/protobuf/types/known/emptypb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'
|
||||
_EMPTY._serialized_start=48
|
||||
_EMPTY._serialized_end=55
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
26
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/field_mask_pb2.py
vendored
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: google/protobuf/field_mask.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"\x1a\n\tFieldMask\x12\r\n\x05paths\x18\x01 \x03(\tB\x85\x01\n\x13\x63om.google.protobufB\x0e\x46ieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.field_mask_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
DESCRIPTOR._serialized_options = b'\n\023com.google.protobufB\016FieldMaskProtoP\001Z2google.golang.org/protobuf/types/known/fieldmaskpb\370\001\001\242\002\003GPB\252\002\036Google.Protobuf.WellKnownTypes'
|
||||
_FIELDMASK._serialized_start=53
|
||||
_FIELDMASK._serialized_end=79
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
0
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/internal/__init__.py
vendored
Normal file
443
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/internal/_parameterized.py
vendored
Normal file
|
|
@ -0,0 +1,443 @@
|
|||
#! /usr/bin/env python
|
||||
#
|
||||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# https://developers.google.com/protocol-buffers/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Adds support for parameterized tests to Python's unittest TestCase class.
|
||||
|
||||
A parameterized test is a method in a test case that is invoked with different
|
||||
argument tuples.
|
||||
|
||||
A simple example:
|
||||
|
||||
class AdditionExample(parameterized.TestCase):
|
||||
@parameterized.parameters(
|
||||
(1, 2, 3),
|
||||
(4, 5, 9),
|
||||
(1, 1, 3))
|
||||
def testAddition(self, op1, op2, result):
|
||||
self.assertEqual(result, op1 + op2)
|
||||
|
||||
|
||||
Each invocation is a separate test case and properly isolated just
|
||||
like a normal test method, with its own setUp/tearDown cycle. In the
|
||||
example above, there are three separate testcases, one of which will
|
||||
fail due to an assertion error (1 + 1 != 3).
|
||||
|
||||
Parameters for individual test cases can be tuples (with positional parameters)
|
||||
or dictionaries (with named parameters):
|
||||
|
||||
class AdditionExample(parameterized.TestCase):
|
||||
@parameterized.parameters(
|
||||
{'op1': 1, 'op2': 2, 'result': 3},
|
||||
{'op1': 4, 'op2': 5, 'result': 9},
|
||||
)
|
||||
def testAddition(self, op1, op2, result):
|
||||
self.assertEqual(result, op1 + op2)
|
||||
|
||||
If a parameterized test fails, the error message will show the
|
||||
original test name (which is modified internally) and the arguments
|
||||
for the specific invocation, which are part of the string returned by
|
||||
the shortDescription() method on test cases.
|
||||
|
||||
The id method of the test, used internally by the unittest framework,
|
||||
is also modified to show the arguments. To make sure that test names
|
||||
stay the same across several invocations, object representations like
|
||||
|
||||
>>> class Foo(object):
|
||||
... pass
|
||||
>>> repr(Foo())
|
||||
'<__main__.Foo object at 0x23d8610>'
|
||||
|
||||
are turned into '<__main__.Foo>'. For even more descriptive names,
|
||||
especially in test logs, you can use the named_parameters decorator. In
|
||||
this case, only tuples are supported, and the first parameters has to
|
||||
be a string (or an object that returns an apt name when converted via
|
||||
str()):
|
||||
|
||||
class NamedExample(parameterized.TestCase):
|
||||
@parameterized.named_parameters(
|
||||
('Normal', 'aa', 'aaa', True),
|
||||
('EmptyPrefix', '', 'abc', True),
|
||||
('BothEmpty', '', '', True))
|
||||
def testStartsWith(self, prefix, string, result):
|
||||
self.assertEqual(result, strings.startswith(prefix))
|
||||
|
||||
Named tests also have the benefit that they can be run individually
|
||||
from the command line:
|
||||
|
||||
$ testmodule.py NamedExample.testStartsWithNormal
|
||||
.
|
||||
--------------------------------------------------------------------
|
||||
Ran 1 test in 0.000s
|
||||
|
||||
OK
|
||||
|
||||
Parameterized Classes
|
||||
=====================
|
||||
If invocation arguments are shared across test methods in a single
|
||||
TestCase class, instead of decorating all test methods
|
||||
individually, the class itself can be decorated:
|
||||
|
||||
@parameterized.parameters(
|
||||
(1, 2, 3)
|
||||
(4, 5, 9))
|
||||
class ArithmeticTest(parameterized.TestCase):
|
||||
def testAdd(self, arg1, arg2, result):
|
||||
self.assertEqual(arg1 + arg2, result)
|
||||
|
||||
def testSubtract(self, arg2, arg2, result):
|
||||
self.assertEqual(result - arg1, arg2)
|
||||
|
||||
Inputs from Iterables
|
||||
=====================
|
||||
If parameters should be shared across several test cases, or are dynamically
|
||||
created from other sources, a single non-tuple iterable can be passed into
|
||||
the decorator. This iterable will be used to obtain the test cases:
|
||||
|
||||
class AdditionExample(parameterized.TestCase):
|
||||
@parameterized.parameters(
|
||||
c.op1, c.op2, c.result for c in testcases
|
||||
)
|
||||
def testAddition(self, op1, op2, result):
|
||||
self.assertEqual(result, op1 + op2)
|
||||
|
||||
|
||||
Single-Argument Test Methods
|
||||
============================
|
||||
If a test method takes only one argument, the single argument does not need to
|
||||
be wrapped into a tuple:
|
||||
|
||||
class NegativeNumberExample(parameterized.TestCase):
|
||||
@parameterized.parameters(
|
||||
-1, -3, -4, -5
|
||||
)
|
||||
def testIsNegative(self, arg):
|
||||
self.assertTrue(IsNegative(arg))
|
||||
"""
|
||||
|
||||
__author__ = 'tmarek@google.com (Torsten Marek)'
|
||||
|
||||
import functools
|
||||
import re
|
||||
import types
|
||||
import unittest
|
||||
import uuid
|
||||
|
||||
try:
|
||||
# Since python 3
|
||||
import collections.abc as collections_abc
|
||||
except ImportError:
|
||||
# Won't work after python 3.8
|
||||
import collections as collections_abc
|
||||
|
||||
ADDR_RE = re.compile(r'\<([a-zA-Z0-9_\-\.]+) object at 0x[a-fA-F0-9]+\>')
|
||||
_SEPARATOR = uuid.uuid1().hex
|
||||
_FIRST_ARG = object()
|
||||
_ARGUMENT_REPR = object()
|
||||
|
||||
|
||||
def _CleanRepr(obj):
|
||||
return ADDR_RE.sub(r'<\1>', repr(obj))
|
||||
|
||||
|
||||
# Helper function formerly from the unittest module, removed from it in
|
||||
# Python 2.7.
|
||||
def _StrClass(cls):
|
||||
return '%s.%s' % (cls.__module__, cls.__name__)
|
||||
|
||||
|
||||
def _NonStringIterable(obj):
|
||||
return (isinstance(obj, collections_abc.Iterable) and
|
||||
not isinstance(obj, str))
|
||||
|
||||
|
||||
def _FormatParameterList(testcase_params):
|
||||
if isinstance(testcase_params, collections_abc.Mapping):
|
||||
return ', '.join('%s=%s' % (argname, _CleanRepr(value))
|
||||
for argname, value in testcase_params.items())
|
||||
elif _NonStringIterable(testcase_params):
|
||||
return ', '.join(map(_CleanRepr, testcase_params))
|
||||
else:
|
||||
return _FormatParameterList((testcase_params,))
|
||||
|
||||
|
||||
class _ParameterizedTestIter(object):
|
||||
"""Callable and iterable class for producing new test cases."""
|
||||
|
||||
def __init__(self, test_method, testcases, naming_type):
|
||||
"""Returns concrete test functions for a test and a list of parameters.
|
||||
|
||||
The naming_type is used to determine the name of the concrete
|
||||
functions as reported by the unittest framework. If naming_type is
|
||||
_FIRST_ARG, the testcases must be tuples, and the first element must
|
||||
have a string representation that is a valid Python identifier.
|
||||
|
||||
Args:
|
||||
test_method: The decorated test method.
|
||||
testcases: (list of tuple/dict) A list of parameter
|
||||
tuples/dicts for individual test invocations.
|
||||
naming_type: The test naming type, either _NAMED or _ARGUMENT_REPR.
|
||||
"""
|
||||
self._test_method = test_method
|
||||
self.testcases = testcases
|
||||
self._naming_type = naming_type
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
raise RuntimeError('You appear to be running a parameterized test case '
|
||||
'without having inherited from parameterized.'
|
||||
'TestCase. This is bad because none of '
|
||||
'your test cases are actually being run.')
|
||||
|
||||
def __iter__(self):
|
||||
test_method = self._test_method
|
||||
naming_type = self._naming_type
|
||||
|
||||
def MakeBoundParamTest(testcase_params):
|
||||
@functools.wraps(test_method)
|
||||
def BoundParamTest(self):
|
||||
if isinstance(testcase_params, collections_abc.Mapping):
|
||||
test_method(self, **testcase_params)
|
||||
elif _NonStringIterable(testcase_params):
|
||||
test_method(self, *testcase_params)
|
||||
else:
|
||||
test_method(self, testcase_params)
|
||||
|
||||
if naming_type is _FIRST_ARG:
|
||||
# Signal the metaclass that the name of the test function is unique
|
||||
# and descriptive.
|
||||
BoundParamTest.__x_use_name__ = True
|
||||
BoundParamTest.__name__ += str(testcase_params[0])
|
||||
testcase_params = testcase_params[1:]
|
||||
elif naming_type is _ARGUMENT_REPR:
|
||||
# __x_extra_id__ is used to pass naming information to the __new__
|
||||
# method of TestGeneratorMetaclass.
|
||||
# The metaclass will make sure to create a unique, but nondescriptive
|
||||
# name for this test.
|
||||
BoundParamTest.__x_extra_id__ = '(%s)' % (
|
||||
_FormatParameterList(testcase_params),)
|
||||
else:
|
||||
raise RuntimeError('%s is not a valid naming type.' % (naming_type,))
|
||||
|
||||
BoundParamTest.__doc__ = '%s(%s)' % (
|
||||
BoundParamTest.__name__, _FormatParameterList(testcase_params))
|
||||
if test_method.__doc__:
|
||||
BoundParamTest.__doc__ += '\n%s' % (test_method.__doc__,)
|
||||
return BoundParamTest
|
||||
return (MakeBoundParamTest(c) for c in self.testcases)
|
||||
|
||||
|
||||
def _IsSingletonList(testcases):
|
||||
"""True iff testcases contains only a single non-tuple element."""
|
||||
return len(testcases) == 1 and not isinstance(testcases[0], tuple)
|
||||
|
||||
|
||||
def _ModifyClass(class_object, testcases, naming_type):
|
||||
assert not getattr(class_object, '_id_suffix', None), (
|
||||
'Cannot add parameters to %s,'
|
||||
' which already has parameterized methods.' % (class_object,))
|
||||
class_object._id_suffix = id_suffix = {}
|
||||
# We change the size of __dict__ while we iterate over it,
|
||||
# which Python 3.x will complain about, so use copy().
|
||||
for name, obj in class_object.__dict__.copy().items():
|
||||
if (name.startswith(unittest.TestLoader.testMethodPrefix)
|
||||
and isinstance(obj, types.FunctionType)):
|
||||
delattr(class_object, name)
|
||||
methods = {}
|
||||
_UpdateClassDictForParamTestCase(
|
||||
methods, id_suffix, name,
|
||||
_ParameterizedTestIter(obj, testcases, naming_type))
|
||||
for name, meth in methods.items():
|
||||
setattr(class_object, name, meth)
|
||||
|
||||
|
||||
def _ParameterDecorator(naming_type, testcases):
|
||||
"""Implementation of the parameterization decorators.
|
||||
|
||||
Args:
|
||||
naming_type: The naming type.
|
||||
testcases: Testcase parameters.
|
||||
|
||||
Returns:
|
||||
A function for modifying the decorated object.
|
||||
"""
|
||||
def _Apply(obj):
|
||||
if isinstance(obj, type):
|
||||
_ModifyClass(
|
||||
obj,
|
||||
list(testcases) if not isinstance(testcases, collections_abc.Sequence)
|
||||
else testcases,
|
||||
naming_type)
|
||||
return obj
|
||||
else:
|
||||
return _ParameterizedTestIter(obj, testcases, naming_type)
|
||||
|
||||
if _IsSingletonList(testcases):
|
||||
assert _NonStringIterable(testcases[0]), (
|
||||
'Single parameter argument must be a non-string iterable')
|
||||
testcases = testcases[0]
|
||||
|
||||
return _Apply
|
||||
|
||||
|
||||
def parameters(*testcases): # pylint: disable=invalid-name
|
||||
"""A decorator for creating parameterized tests.
|
||||
|
||||
See the module docstring for a usage example.
|
||||
Args:
|
||||
*testcases: Parameters for the decorated method, either a single
|
||||
iterable, or a list of tuples/dicts/objects (for tests
|
||||
with only one argument).
|
||||
|
||||
Returns:
|
||||
A test generator to be handled by TestGeneratorMetaclass.
|
||||
"""
|
||||
return _ParameterDecorator(_ARGUMENT_REPR, testcases)
|
||||
|
||||
|
||||
def named_parameters(*testcases): # pylint: disable=invalid-name
|
||||
"""A decorator for creating parameterized tests.
|
||||
|
||||
See the module docstring for a usage example. The first element of
|
||||
each parameter tuple should be a string and will be appended to the
|
||||
name of the test method.
|
||||
|
||||
Args:
|
||||
*testcases: Parameters for the decorated method, either a single
|
||||
iterable, or a list of tuples.
|
||||
|
||||
Returns:
|
||||
A test generator to be handled by TestGeneratorMetaclass.
|
||||
"""
|
||||
return _ParameterDecorator(_FIRST_ARG, testcases)
|
||||
|
||||
|
||||
class TestGeneratorMetaclass(type):
|
||||
"""Metaclass for test cases with test generators.
|
||||
|
||||
A test generator is an iterable in a testcase that produces callables. These
|
||||
callables must be single-argument methods. These methods are injected into
|
||||
the class namespace and the original iterable is removed. If the name of the
|
||||
iterable conforms to the test pattern, the injected methods will be picked
|
||||
up as tests by the unittest framework.
|
||||
|
||||
In general, it is supposed to be used in conjunction with the
|
||||
parameters decorator.
|
||||
"""
|
||||
|
||||
def __new__(mcs, class_name, bases, dct):
|
||||
dct['_id_suffix'] = id_suffix = {}
|
||||
for name, obj in dct.copy().items():
|
||||
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
|
||||
_NonStringIterable(obj)):
|
||||
iterator = iter(obj)
|
||||
dct.pop(name)
|
||||
_UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator)
|
||||
|
||||
return type.__new__(mcs, class_name, bases, dct)
|
||||
|
||||
|
||||
def _UpdateClassDictForParamTestCase(dct, id_suffix, name, iterator):
|
||||
"""Adds individual test cases to a dictionary.
|
||||
|
||||
Args:
|
||||
dct: The target dictionary.
|
||||
id_suffix: The dictionary for mapping names to test IDs.
|
||||
name: The original name of the test case.
|
||||
iterator: The iterator generating the individual test cases.
|
||||
"""
|
||||
for idx, func in enumerate(iterator):
|
||||
assert callable(func), 'Test generators must yield callables, got %r' % (
|
||||
func,)
|
||||
if getattr(func, '__x_use_name__', False):
|
||||
new_name = func.__name__
|
||||
else:
|
||||
new_name = '%s%s%d' % (name, _SEPARATOR, idx)
|
||||
assert new_name not in dct, (
|
||||
'Name of parameterized test case "%s" not unique' % (new_name,))
|
||||
dct[new_name] = func
|
||||
id_suffix[new_name] = getattr(func, '__x_extra_id__', '')
|
||||
|
||||
|
||||
class TestCase(unittest.TestCase, metaclass=TestGeneratorMetaclass):
|
||||
"""Base class for test cases using the parameters decorator."""
|
||||
|
||||
def _OriginalName(self):
|
||||
return self._testMethodName.split(_SEPARATOR)[0]
|
||||
|
||||
def __str__(self):
|
||||
return '%s (%s)' % (self._OriginalName(), _StrClass(self.__class__))
|
||||
|
||||
def id(self): # pylint: disable=invalid-name
|
||||
"""Returns the descriptive ID of the test.
|
||||
|
||||
This is used internally by the unittesting framework to get a name
|
||||
for the test to be used in reports.
|
||||
|
||||
Returns:
|
||||
The test id.
|
||||
"""
|
||||
return '%s.%s%s' % (_StrClass(self.__class__),
|
||||
self._OriginalName(),
|
||||
self._id_suffix.get(self._testMethodName, ''))
|
||||
|
||||
|
||||
def CoopTestCase(other_base_class):
|
||||
"""Returns a new base class with a cooperative metaclass base.
|
||||
|
||||
This enables the TestCase to be used in combination
|
||||
with other base classes that have custom metaclasses, such as
|
||||
mox.MoxTestBase.
|
||||
|
||||
Only works with metaclasses that do not override type.__new__.
|
||||
|
||||
Example:
|
||||
|
||||
import google3
|
||||
import mox
|
||||
|
||||
from google3.testing.pybase import parameterized
|
||||
|
||||
class ExampleTest(parameterized.CoopTestCase(mox.MoxTestBase)):
|
||||
...
|
||||
|
||||
Args:
|
||||
other_base_class: (class) A test case base class.
|
||||
|
||||
Returns:
|
||||
A new class object.
|
||||
"""
|
||||
metaclass = type(
|
||||
'CoopMetaclass',
|
||||
(other_base_class.__metaclass__,
|
||||
TestGeneratorMetaclass), {})
|
||||
return metaclass(
|
||||
'CoopTestCase',
|
||||
(other_base_class, TestCase), {})
|
||||
112
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/internal/api_implementation.py
vendored
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# https://developers.google.com/protocol-buffers/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Determine which implementation of the protobuf API is used in this process.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
try:
|
||||
# pylint: disable=g-import-not-at-top
|
||||
from google.protobuf.internal import _api_implementation
|
||||
# The compile-time constants in the _api_implementation module can be used to
|
||||
# switch to a certain implementation of the Python API at build time.
|
||||
_api_version = _api_implementation.api_version
|
||||
except ImportError:
|
||||
_api_version = -1 # Unspecified by compiler flags.
|
||||
|
||||
if _api_version == 1:
|
||||
raise ValueError('api_version=1 is no longer supported.')
|
||||
|
||||
|
||||
_default_implementation_type = ('cpp' if _api_version > 0 else 'python')
|
||||
|
||||
|
||||
# This environment variable can be used to switch to a certain implementation
|
||||
# of the Python API, overriding the compile-time constants in the
|
||||
# _api_implementation module. Right now only 'python' and 'cpp' are valid
|
||||
# values. Any other value will be ignored.
|
||||
_implementation_type = os.getenv('PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION',
|
||||
_default_implementation_type)
|
||||
|
||||
if _implementation_type != 'python':
|
||||
_implementation_type = 'cpp'
|
||||
|
||||
if 'PyPy' in sys.version and _implementation_type == 'cpp':
|
||||
warnings.warn('PyPy does not work yet with cpp protocol buffers. '
|
||||
'Falling back to the python implementation.')
|
||||
_implementation_type = 'python'
|
||||
|
||||
|
||||
# Detect if serialization should be deterministic by default
|
||||
try:
|
||||
# The presence of this module in a build allows the proto implementation to
|
||||
# be upgraded merely via build deps.
|
||||
#
|
||||
# NOTE: Merely importing this automatically enables deterministic proto
|
||||
# serialization for C++ code, but we still need to export it as a boolean so
|
||||
# that we can do the same for `_implementation_type == 'python'`.
|
||||
#
|
||||
# NOTE2: It is possible for C++ code to enable deterministic serialization by
|
||||
# default _without_ affecting Python code, if the C++ implementation is not in
|
||||
# use by this module. That is intended behavior, so we don't actually expose
|
||||
# this boolean outside of this module.
|
||||
#
|
||||
# pylint: disable=g-import-not-at-top,unused-import
|
||||
from google.protobuf import enable_deterministic_proto_serialization
|
||||
_python_deterministic_proto_serialization = True
|
||||
except ImportError:
|
||||
_python_deterministic_proto_serialization = False
|
||||
|
||||
|
||||
# Usage of this function is discouraged. Clients shouldn't care which
|
||||
# implementation of the API is in use. Note that there is no guarantee
|
||||
# that differences between APIs will be maintained.
|
||||
# Please don't use this function if possible.
|
||||
def Type():
|
||||
return _implementation_type
|
||||
|
||||
|
||||
def _SetType(implementation_type):
|
||||
"""Never use! Only for protobuf benchmark."""
|
||||
global _implementation_type
|
||||
_implementation_type = implementation_type
|
||||
|
||||
|
||||
# See comment on 'Type' above.
|
||||
def Version():
|
||||
return 2
|
||||
|
||||
|
||||
# For internal use only
|
||||
def IsPythonDefaultSerializationDeterministic():
|
||||
return _python_deterministic_proto_serialization
|
||||
130
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/internal/builder.py
vendored
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# https://developers.google.com/protocol-buffers/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Builds descriptors, message classes and services for generated _pb2.py.
|
||||
|
||||
This file is only called in python generated _pb2.py files. It builds
|
||||
descriptors, message classes and services that users can directly use
|
||||
in generated code.
|
||||
"""
|
||||
|
||||
__author__ = 'jieluo@google.com (Jie Luo)'
|
||||
|
||||
from google.protobuf.internal import enum_type_wrapper
|
||||
from google.protobuf import message as _message
|
||||
from google.protobuf import reflection as _reflection
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
def BuildMessageAndEnumDescriptors(file_des, module):
|
||||
"""Builds message and enum descriptors.
|
||||
|
||||
Args:
|
||||
file_des: FileDescriptor of the .proto file
|
||||
module: Generated _pb2 module
|
||||
"""
|
||||
|
||||
def BuildNestedDescriptors(msg_des, prefix):
|
||||
for (name, nested_msg) in msg_des.nested_types_by_name.items():
|
||||
module_name = prefix + name.upper()
|
||||
module[module_name] = nested_msg
|
||||
BuildNestedDescriptors(nested_msg, module_name + '_')
|
||||
for enum_des in msg_des.enum_types:
|
||||
module[prefix + enum_des.name.upper()] = enum_des
|
||||
|
||||
for (name, msg_des) in file_des.message_types_by_name.items():
|
||||
module_name = '_' + name.upper()
|
||||
module[module_name] = msg_des
|
||||
BuildNestedDescriptors(msg_des, module_name + '_')
|
||||
|
||||
|
||||
def BuildTopDescriptorsAndMessages(file_des, module_name, module):
|
||||
"""Builds top level descriptors and message classes.
|
||||
|
||||
Args:
|
||||
file_des: FileDescriptor of the .proto file
|
||||
module_name: str, the name of generated _pb2 module
|
||||
module: Generated _pb2 module
|
||||
"""
|
||||
|
||||
def BuildMessage(msg_des):
|
||||
create_dict = {}
|
||||
for (name, nested_msg) in msg_des.nested_types_by_name.items():
|
||||
create_dict[name] = BuildMessage(nested_msg)
|
||||
create_dict['DESCRIPTOR'] = msg_des
|
||||
create_dict['__module__'] = module_name
|
||||
message_class = _reflection.GeneratedProtocolMessageType(
|
||||
msg_des.name, (_message.Message,), create_dict)
|
||||
_sym_db.RegisterMessage(message_class)
|
||||
return message_class
|
||||
|
||||
# top level enums
|
||||
for (name, enum_des) in file_des.enum_types_by_name.items():
|
||||
module['_' + name.upper()] = enum_des
|
||||
module[name] = enum_type_wrapper.EnumTypeWrapper(enum_des)
|
||||
for enum_value in enum_des.values:
|
||||
module[enum_value.name] = enum_value.number
|
||||
|
||||
# top level extensions
|
||||
for (name, extension_des) in file_des.extensions_by_name.items():
|
||||
module[name.upper() + '_FIELD_NUMBER'] = extension_des.number
|
||||
module[name] = extension_des
|
||||
|
||||
# services
|
||||
for (name, service) in file_des.services_by_name.items():
|
||||
module['_' + name.upper()] = service
|
||||
|
||||
# Build messages.
|
||||
for (name, msg_des) in file_des.message_types_by_name.items():
|
||||
module[name] = BuildMessage(msg_des)
|
||||
|
||||
|
||||
def BuildServices(file_des, module_name, module):
|
||||
"""Builds services classes and services stub class.
|
||||
|
||||
Args:
|
||||
file_des: FileDescriptor of the .proto file
|
||||
module_name: str, the name of generated _pb2 module
|
||||
module: Generated _pb2 module
|
||||
"""
|
||||
# pylint: disable=g-import-not-at-top
|
||||
from google.protobuf import service as _service
|
||||
from google.protobuf import service_reflection
|
||||
# pylint: enable=g-import-not-at-top
|
||||
for (name, service) in file_des.services_by_name.items():
|
||||
module[name] = service_reflection.GeneratedServiceType(
|
||||
name, (_service.Service,),
|
||||
dict(DESCRIPTOR=service, __module__=module_name))
|
||||
stub_name = name + '_Stub'
|
||||
module[stub_name] = service_reflection.GeneratedServiceStubType(
|
||||
stub_name, (module[name],),
|
||||
dict(DESCRIPTOR=service, __module__=module_name))
|
||||
710
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/internal/containers.py
vendored
Normal file
|
|
@ -0,0 +1,710 @@
|
|||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# https://developers.google.com/protocol-buffers/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Contains container classes to represent different protocol buffer types.
|
||||
|
||||
This file defines container classes which represent categories of protocol
|
||||
buffer field types which need extra maintenance. Currently these categories
|
||||
are:
|
||||
|
||||
- Repeated scalar fields - These are all repeated fields which aren't
|
||||
composite (e.g. they are of simple types like int32, string, etc).
|
||||
- Repeated composite fields - Repeated fields which are composite. This
|
||||
includes groups and nested messages.
|
||||
"""
|
||||
|
||||
import collections.abc
|
||||
import copy
|
||||
import pickle
|
||||
from typing import (
|
||||
Any,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
MutableMapping,
|
||||
MutableSequence,
|
||||
NoReturn,
|
||||
Optional,
|
||||
Sequence,
|
||||
TypeVar,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
|
||||
|
||||
_T = TypeVar('_T')
|
||||
_K = TypeVar('_K')
|
||||
_V = TypeVar('_V')
|
||||
|
||||
|
||||
class BaseContainer(Sequence[_T]):
|
||||
"""Base container class."""
|
||||
|
||||
# Minimizes memory usage and disallows assignment to other attributes.
|
||||
__slots__ = ['_message_listener', '_values']
|
||||
|
||||
def __init__(self, message_listener: Any) -> None:
|
||||
"""
|
||||
Args:
|
||||
message_listener: A MessageListener implementation.
|
||||
The RepeatedScalarFieldContainer will call this object's
|
||||
Modified() method when it is modified.
|
||||
"""
|
||||
self._message_listener = message_listener
|
||||
self._values = []
|
||||
|
||||
@overload
|
||||
def __getitem__(self, key: int) -> _T:
|
||||
...
|
||||
|
||||
@overload
|
||||
def __getitem__(self, key: slice) -> List[_T]:
|
||||
...
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""Retrieves item by the specified key."""
|
||||
return self._values[key]
|
||||
|
||||
def __len__(self) -> int:
|
||||
"""Returns the number of elements in the container."""
|
||||
return len(self._values)
|
||||
|
||||
def __ne__(self, other: Any) -> bool:
|
||||
"""Checks if another instance isn't equal to this one."""
|
||||
# The concrete classes should define __eq__.
|
||||
return not self == other
|
||||
|
||||
__hash__ = None
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return repr(self._values)
|
||||
|
||||
def sort(self, *args, **kwargs) -> None:
|
||||
# Continue to support the old sort_function keyword argument.
|
||||
# This is expected to be a rare occurrence, so use LBYL to avoid
|
||||
# the overhead of actually catching KeyError.
|
||||
if 'sort_function' in kwargs:
|
||||
kwargs['cmp'] = kwargs.pop('sort_function')
|
||||
self._values.sort(*args, **kwargs)
|
||||
|
||||
def reverse(self) -> None:
|
||||
self._values.reverse()
|
||||
|
||||
|
||||
# TODO(slebedev): Remove this. BaseContainer does *not* conform to
|
||||
# MutableSequence, only its subclasses do.
|
||||
collections.abc.MutableSequence.register(BaseContainer)
|
||||
|
||||
|
||||
class RepeatedScalarFieldContainer(BaseContainer[_T], MutableSequence[_T]):
|
||||
"""Simple, type-checked, list-like container for holding repeated scalars."""
|
||||
|
||||
# Disallows assignment to other attributes.
|
||||
__slots__ = ['_type_checker']
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message_listener: Any,
|
||||
type_checker: Any,
|
||||
) -> None:
|
||||
"""Args:
|
||||
|
||||
message_listener: A MessageListener implementation. The
|
||||
RepeatedScalarFieldContainer will call this object's Modified() method
|
||||
when it is modified.
|
||||
type_checker: A type_checkers.ValueChecker instance to run on elements
|
||||
inserted into this container.
|
||||
"""
|
||||
super().__init__(message_listener)
|
||||
self._type_checker = type_checker
|
||||
|
||||
def append(self, value: _T) -> None:
|
||||
"""Appends an item to the list. Similar to list.append()."""
|
||||
self._values.append(self._type_checker.CheckValue(value))
|
||||
if not self._message_listener.dirty:
|
||||
self._message_listener.Modified()
|
||||
|
||||
def insert(self, key: int, value: _T) -> None:
|
||||
"""Inserts the item at the specified position. Similar to list.insert()."""
|
||||
self._values.insert(key, self._type_checker.CheckValue(value))
|
||||
if not self._message_listener.dirty:
|
||||
self._message_listener.Modified()
|
||||
|
||||
def extend(self, elem_seq: Iterable[_T]) -> None:
|
||||
"""Extends by appending the given iterable. Similar to list.extend()."""
|
||||
if elem_seq is None:
|
||||
return
|
||||
try:
|
||||
elem_seq_iter = iter(elem_seq)
|
||||
except TypeError:
|
||||
if not elem_seq:
|
||||
# silently ignore falsy inputs :-/.
|
||||
# TODO(ptucker): Deprecate this behavior. b/18413862
|
||||
return
|
||||
raise
|
||||
|
||||
new_values = [self._type_checker.CheckValue(elem) for elem in elem_seq_iter]
|
||||
if new_values:
|
||||
self._values.extend(new_values)
|
||||
self._message_listener.Modified()
|
||||
|
||||
def MergeFrom(
|
||||
self,
|
||||
other: Union['RepeatedScalarFieldContainer[_T]', Iterable[_T]],
|
||||
) -> None:
|
||||
"""Appends the contents of another repeated field of the same type to this
|
||||
one. We do not check the types of the individual fields.
|
||||
"""
|
||||
self._values.extend(other)
|
||||
self._message_listener.Modified()
|
||||
|
||||
def remove(self, elem: _T):
|
||||
"""Removes an item from the list. Similar to list.remove()."""
|
||||
self._values.remove(elem)
|
||||
self._message_listener.Modified()
|
||||
|
||||
def pop(self, key: Optional[int] = -1) -> _T:
|
||||
"""Removes and returns an item at a given index. Similar to list.pop()."""
|
||||
value = self._values[key]
|
||||
self.__delitem__(key)
|
||||
return value
|
||||
|
||||
@overload
|
||||
def __setitem__(self, key: int, value: _T) -> None:
|
||||
...
|
||||
|
||||
@overload
|
||||
def __setitem__(self, key: slice, value: Iterable[_T]) -> None:
|
||||
...
|
||||
|
||||
def __setitem__(self, key, value) -> None:
|
||||
"""Sets the item on the specified position."""
|
||||
if isinstance(key, slice):
|
||||
if key.step is not None:
|
||||
raise ValueError('Extended slices not supported')
|
||||
self._values[key] = map(self._type_checker.CheckValue, value)
|
||||
self._message_listener.Modified()
|
||||
else:
|
||||
self._values[key] = self._type_checker.CheckValue(value)
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __delitem__(self, key: Union[int, slice]) -> None:
|
||||
"""Deletes the item at the specified position."""
|
||||
del self._values[key]
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
"""Compares the current instance with another one."""
|
||||
if self is other:
|
||||
return True
|
||||
# Special case for the same type which should be common and fast.
|
||||
if isinstance(other, self.__class__):
|
||||
return other._values == self._values
|
||||
# We are presumably comparing against some other sequence type.
|
||||
return other == self._values
|
||||
|
||||
def __deepcopy__(
|
||||
self,
|
||||
unused_memo: Any = None,
|
||||
) -> 'RepeatedScalarFieldContainer[_T]':
|
||||
clone = RepeatedScalarFieldContainer(
|
||||
copy.deepcopy(self._message_listener), self._type_checker)
|
||||
clone.MergeFrom(self)
|
||||
return clone
|
||||
|
||||
def __reduce__(self, **kwargs) -> NoReturn:
|
||||
raise pickle.PickleError(
|
||||
"Can't pickle repeated scalar fields, convert to list first")
|
||||
|
||||
|
||||
# TODO(slebedev): Constrain T to be a subtype of Message.
|
||||
class RepeatedCompositeFieldContainer(BaseContainer[_T], MutableSequence[_T]):
|
||||
"""Simple, list-like container for holding repeated composite fields."""
|
||||
|
||||
# Disallows assignment to other attributes.
|
||||
__slots__ = ['_message_descriptor']
|
||||
|
||||
def __init__(self, message_listener: Any, message_descriptor: Any) -> None:
|
||||
"""
|
||||
Note that we pass in a descriptor instead of the generated directly,
|
||||
since at the time we construct a _RepeatedCompositeFieldContainer we
|
||||
haven't yet necessarily initialized the type that will be contained in the
|
||||
container.
|
||||
|
||||
Args:
|
||||
message_listener: A MessageListener implementation.
|
||||
The RepeatedCompositeFieldContainer will call this object's
|
||||
Modified() method when it is modified.
|
||||
message_descriptor: A Descriptor instance describing the protocol type
|
||||
that should be present in this container. We'll use the
|
||||
_concrete_class field of this descriptor when the client calls add().
|
||||
"""
|
||||
super().__init__(message_listener)
|
||||
self._message_descriptor = message_descriptor
|
||||
|
||||
def add(self, **kwargs: Any) -> _T:
|
||||
"""Adds a new element at the end of the list and returns it. Keyword
|
||||
arguments may be used to initialize the element.
|
||||
"""
|
||||
new_element = self._message_descriptor._concrete_class(**kwargs)
|
||||
new_element._SetListener(self._message_listener)
|
||||
self._values.append(new_element)
|
||||
if not self._message_listener.dirty:
|
||||
self._message_listener.Modified()
|
||||
return new_element
|
||||
|
||||
def append(self, value: _T) -> None:
|
||||
"""Appends one element by copying the message."""
|
||||
new_element = self._message_descriptor._concrete_class()
|
||||
new_element._SetListener(self._message_listener)
|
||||
new_element.CopyFrom(value)
|
||||
self._values.append(new_element)
|
||||
if not self._message_listener.dirty:
|
||||
self._message_listener.Modified()
|
||||
|
||||
def insert(self, key: int, value: _T) -> None:
|
||||
"""Inserts the item at the specified position by copying."""
|
||||
new_element = self._message_descriptor._concrete_class()
|
||||
new_element._SetListener(self._message_listener)
|
||||
new_element.CopyFrom(value)
|
||||
self._values.insert(key, new_element)
|
||||
if not self._message_listener.dirty:
|
||||
self._message_listener.Modified()
|
||||
|
||||
def extend(self, elem_seq: Iterable[_T]) -> None:
|
||||
"""Extends by appending the given sequence of elements of the same type
|
||||
|
||||
as this one, copying each individual message.
|
||||
"""
|
||||
message_class = self._message_descriptor._concrete_class
|
||||
listener = self._message_listener
|
||||
values = self._values
|
||||
for message in elem_seq:
|
||||
new_element = message_class()
|
||||
new_element._SetListener(listener)
|
||||
new_element.MergeFrom(message)
|
||||
values.append(new_element)
|
||||
listener.Modified()
|
||||
|
||||
def MergeFrom(
|
||||
self,
|
||||
other: Union['RepeatedCompositeFieldContainer[_T]', Iterable[_T]],
|
||||
) -> None:
|
||||
"""Appends the contents of another repeated field of the same type to this
|
||||
one, copying each individual message.
|
||||
"""
|
||||
self.extend(other)
|
||||
|
||||
def remove(self, elem: _T) -> None:
|
||||
"""Removes an item from the list. Similar to list.remove()."""
|
||||
self._values.remove(elem)
|
||||
self._message_listener.Modified()
|
||||
|
||||
def pop(self, key: Optional[int] = -1) -> _T:
|
||||
"""Removes and returns an item at a given index. Similar to list.pop()."""
|
||||
value = self._values[key]
|
||||
self.__delitem__(key)
|
||||
return value
|
||||
|
||||
@overload
|
||||
def __setitem__(self, key: int, value: _T) -> None:
|
||||
...
|
||||
|
||||
@overload
|
||||
def __setitem__(self, key: slice, value: Iterable[_T]) -> None:
|
||||
...
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
# This method is implemented to make RepeatedCompositeFieldContainer
|
||||
# structurally compatible with typing.MutableSequence. It is
|
||||
# otherwise unsupported and will always raise an error.
|
||||
raise TypeError(
|
||||
f'{self.__class__.__name__} object does not support item assignment')
|
||||
|
||||
def __delitem__(self, key: Union[int, slice]) -> None:
|
||||
"""Deletes the item at the specified position."""
|
||||
del self._values[key]
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
"""Compares the current instance with another one."""
|
||||
if self is other:
|
||||
return True
|
||||
if not isinstance(other, self.__class__):
|
||||
raise TypeError('Can only compare repeated composite fields against '
|
||||
'other repeated composite fields.')
|
||||
return self._values == other._values
|
||||
|
||||
|
||||
class ScalarMap(MutableMapping[_K, _V]):
|
||||
"""Simple, type-checked, dict-like container for holding repeated scalars."""
|
||||
|
||||
# Disallows assignment to other attributes.
|
||||
__slots__ = ['_key_checker', '_value_checker', '_values', '_message_listener',
|
||||
'_entry_descriptor']
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message_listener: Any,
|
||||
key_checker: Any,
|
||||
value_checker: Any,
|
||||
entry_descriptor: Any,
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
message_listener: A MessageListener implementation.
|
||||
The ScalarMap will call this object's Modified() method when it
|
||||
is modified.
|
||||
key_checker: A type_checkers.ValueChecker instance to run on keys
|
||||
inserted into this container.
|
||||
value_checker: A type_checkers.ValueChecker instance to run on values
|
||||
inserted into this container.
|
||||
entry_descriptor: The MessageDescriptor of a map entry: key and value.
|
||||
"""
|
||||
self._message_listener = message_listener
|
||||
self._key_checker = key_checker
|
||||
self._value_checker = value_checker
|
||||
self._entry_descriptor = entry_descriptor
|
||||
self._values = {}
|
||||
|
||||
def __getitem__(self, key: _K) -> _V:
|
||||
try:
|
||||
return self._values[key]
|
||||
except KeyError:
|
||||
key = self._key_checker.CheckValue(key)
|
||||
val = self._value_checker.DefaultValue()
|
||||
self._values[key] = val
|
||||
return val
|
||||
|
||||
def __contains__(self, item: _K) -> bool:
|
||||
# We check the key's type to match the strong-typing flavor of the API.
|
||||
# Also this makes it easier to match the behavior of the C++ implementation.
|
||||
self._key_checker.CheckValue(item)
|
||||
return item in self._values
|
||||
|
||||
@overload
|
||||
def get(self, key: _K) -> Optional[_V]:
|
||||
...
|
||||
|
||||
@overload
|
||||
def get(self, key: _K, default: _T) -> Union[_V, _T]:
|
||||
...
|
||||
|
||||
# We need to override this explicitly, because our defaultdict-like behavior
|
||||
# will make the default implementation (from our base class) always insert
|
||||
# the key.
|
||||
def get(self, key, default=None):
|
||||
if key in self:
|
||||
return self[key]
|
||||
else:
|
||||
return default
|
||||
|
||||
def __setitem__(self, key: _K, value: _V) -> _T:
|
||||
checked_key = self._key_checker.CheckValue(key)
|
||||
checked_value = self._value_checker.CheckValue(value)
|
||||
self._values[checked_key] = checked_value
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __delitem__(self, key: _K) -> None:
|
||||
del self._values[key]
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._values)
|
||||
|
||||
def __iter__(self) -> Iterator[_K]:
|
||||
return iter(self._values)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return repr(self._values)
|
||||
|
||||
def MergeFrom(self, other: 'ScalarMap[_K, _V]') -> None:
|
||||
self._values.update(other._values)
|
||||
self._message_listener.Modified()
|
||||
|
||||
def InvalidateIterators(self) -> None:
|
||||
# It appears that the only way to reliably invalidate iterators to
|
||||
# self._values is to ensure that its size changes.
|
||||
original = self._values
|
||||
self._values = original.copy()
|
||||
original[None] = None
|
||||
|
||||
# This is defined in the abstract base, but we can do it much more cheaply.
|
||||
def clear(self) -> None:
|
||||
self._values.clear()
|
||||
self._message_listener.Modified()
|
||||
|
||||
def GetEntryClass(self) -> Any:
|
||||
return self._entry_descriptor._concrete_class
|
||||
|
||||
|
||||
class MessageMap(MutableMapping[_K, _V]):
|
||||
"""Simple, type-checked, dict-like container for with submessage values."""
|
||||
|
||||
# Disallows assignment to other attributes.
|
||||
__slots__ = ['_key_checker', '_values', '_message_listener',
|
||||
'_message_descriptor', '_entry_descriptor']
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
message_listener: Any,
|
||||
message_descriptor: Any,
|
||||
key_checker: Any,
|
||||
entry_descriptor: Any,
|
||||
) -> None:
|
||||
"""
|
||||
Args:
|
||||
message_listener: A MessageListener implementation.
|
||||
The ScalarMap will call this object's Modified() method when it
|
||||
is modified.
|
||||
key_checker: A type_checkers.ValueChecker instance to run on keys
|
||||
inserted into this container.
|
||||
value_checker: A type_checkers.ValueChecker instance to run on values
|
||||
inserted into this container.
|
||||
entry_descriptor: The MessageDescriptor of a map entry: key and value.
|
||||
"""
|
||||
self._message_listener = message_listener
|
||||
self._message_descriptor = message_descriptor
|
||||
self._key_checker = key_checker
|
||||
self._entry_descriptor = entry_descriptor
|
||||
self._values = {}
|
||||
|
||||
def __getitem__(self, key: _K) -> _V:
|
||||
key = self._key_checker.CheckValue(key)
|
||||
try:
|
||||
return self._values[key]
|
||||
except KeyError:
|
||||
new_element = self._message_descriptor._concrete_class()
|
||||
new_element._SetListener(self._message_listener)
|
||||
self._values[key] = new_element
|
||||
self._message_listener.Modified()
|
||||
return new_element
|
||||
|
||||
def get_or_create(self, key: _K) -> _V:
|
||||
"""get_or_create() is an alias for getitem (ie. map[key]).
|
||||
|
||||
Args:
|
||||
key: The key to get or create in the map.
|
||||
|
||||
This is useful in cases where you want to be explicit that the call is
|
||||
mutating the map. This can avoid lint errors for statements like this
|
||||
that otherwise would appear to be pointless statements:
|
||||
|
||||
msg.my_map[key]
|
||||
"""
|
||||
return self[key]
|
||||
|
||||
@overload
|
||||
def get(self, key: _K) -> Optional[_V]:
|
||||
...
|
||||
|
||||
@overload
|
||||
def get(self, key: _K, default: _T) -> Union[_V, _T]:
|
||||
...
|
||||
|
||||
# We need to override this explicitly, because our defaultdict-like behavior
|
||||
# will make the default implementation (from our base class) always insert
|
||||
# the key.
|
||||
def get(self, key, default=None):
|
||||
if key in self:
|
||||
return self[key]
|
||||
else:
|
||||
return default
|
||||
|
||||
def __contains__(self, item: _K) -> bool:
|
||||
item = self._key_checker.CheckValue(item)
|
||||
return item in self._values
|
||||
|
||||
def __setitem__(self, key: _K, value: _V) -> NoReturn:
|
||||
raise ValueError('May not set values directly, call my_map[key].foo = 5')
|
||||
|
||||
def __delitem__(self, key: _K) -> None:
|
||||
key = self._key_checker.CheckValue(key)
|
||||
del self._values[key]
|
||||
self._message_listener.Modified()
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._values)
|
||||
|
||||
def __iter__(self) -> Iterator[_K]:
|
||||
return iter(self._values)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return repr(self._values)
|
||||
|
||||
def MergeFrom(self, other: 'MessageMap[_K, _V]') -> None:
|
||||
# pylint: disable=protected-access
|
||||
for key in other._values:
|
||||
# According to documentation: "When parsing from the wire or when merging,
|
||||
# if there are duplicate map keys the last key seen is used".
|
||||
if key in self:
|
||||
del self[key]
|
||||
self[key].CopyFrom(other[key])
|
||||
# self._message_listener.Modified() not required here, because
|
||||
# mutations to submessages already propagate.
|
||||
|
||||
def InvalidateIterators(self) -> None:
|
||||
# It appears that the only way to reliably invalidate iterators to
|
||||
# self._values is to ensure that its size changes.
|
||||
original = self._values
|
||||
self._values = original.copy()
|
||||
original[None] = None
|
||||
|
||||
# This is defined in the abstract base, but we can do it much more cheaply.
|
||||
def clear(self) -> None:
|
||||
self._values.clear()
|
||||
self._message_listener.Modified()
|
||||
|
||||
def GetEntryClass(self) -> Any:
|
||||
return self._entry_descriptor._concrete_class
|
||||
|
||||
|
||||
class _UnknownField:
|
||||
"""A parsed unknown field."""
|
||||
|
||||
# Disallows assignment to other attributes.
|
||||
__slots__ = ['_field_number', '_wire_type', '_data']
|
||||
|
||||
def __init__(self, field_number, wire_type, data):
|
||||
self._field_number = field_number
|
||||
self._wire_type = wire_type
|
||||
self._data = data
|
||||
return
|
||||
|
||||
def __lt__(self, other):
|
||||
# pylint: disable=protected-access
|
||||
return self._field_number < other._field_number
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
# pylint: disable=protected-access
|
||||
return (self._field_number == other._field_number and
|
||||
self._wire_type == other._wire_type and
|
||||
self._data == other._data)
|
||||
|
||||
|
||||
class UnknownFieldRef: # pylint: disable=missing-class-docstring
|
||||
|
||||
def __init__(self, parent, index):
|
||||
self._parent = parent
|
||||
self._index = index
|
||||
|
||||
def _check_valid(self):
|
||||
if not self._parent:
|
||||
raise ValueError('UnknownField does not exist. '
|
||||
'The parent message might be cleared.')
|
||||
if self._index >= len(self._parent):
|
||||
raise ValueError('UnknownField does not exist. '
|
||||
'The parent message might be cleared.')
|
||||
|
||||
@property
|
||||
def field_number(self):
|
||||
self._check_valid()
|
||||
# pylint: disable=protected-access
|
||||
return self._parent._internal_get(self._index)._field_number
|
||||
|
||||
@property
|
||||
def wire_type(self):
|
||||
self._check_valid()
|
||||
# pylint: disable=protected-access
|
||||
return self._parent._internal_get(self._index)._wire_type
|
||||
|
||||
@property
|
||||
def data(self):
|
||||
self._check_valid()
|
||||
# pylint: disable=protected-access
|
||||
return self._parent._internal_get(self._index)._data
|
||||
|
||||
|
||||
class UnknownFieldSet:
|
||||
"""UnknownField container"""
|
||||
|
||||
# Disallows assignment to other attributes.
|
||||
__slots__ = ['_values']
|
||||
|
||||
def __init__(self):
|
||||
self._values = []
|
||||
|
||||
def __getitem__(self, index):
|
||||
if self._values is None:
|
||||
raise ValueError('UnknownFields does not exist. '
|
||||
'The parent message might be cleared.')
|
||||
size = len(self._values)
|
||||
if index < 0:
|
||||
index += size
|
||||
if index < 0 or index >= size:
|
||||
raise IndexError('index %d out of range'.index)
|
||||
|
||||
return UnknownFieldRef(self, index)
|
||||
|
||||
def _internal_get(self, index):
|
||||
return self._values[index]
|
||||
|
||||
def __len__(self):
|
||||
if self._values is None:
|
||||
raise ValueError('UnknownFields does not exist. '
|
||||
'The parent message might be cleared.')
|
||||
return len(self._values)
|
||||
|
||||
def _add(self, field_number, wire_type, data):
|
||||
unknown_field = _UnknownField(field_number, wire_type, data)
|
||||
self._values.append(unknown_field)
|
||||
return unknown_field
|
||||
|
||||
def __iter__(self):
|
||||
for i in range(len(self)):
|
||||
yield UnknownFieldRef(self, i)
|
||||
|
||||
def _extend(self, other):
|
||||
if other is None:
|
||||
return
|
||||
# pylint: disable=protected-access
|
||||
self._values.extend(other._values)
|
||||
|
||||
def __eq__(self, other):
|
||||
if self is other:
|
||||
return True
|
||||
# Sort unknown fields because their order shouldn't
|
||||
# affect equality test.
|
||||
values = list(self._values)
|
||||
if other is None:
|
||||
return not values
|
||||
values.sort()
|
||||
# pylint: disable=protected-access
|
||||
other_values = sorted(other._values)
|
||||
return values == other_values
|
||||
|
||||
def _clear(self):
|
||||
for value in self._values:
|
||||
# pylint: disable=protected-access
|
||||
if isinstance(value._data, UnknownFieldSet):
|
||||
value._data._clear() # pylint: disable=protected-access
|
||||
self._values = None
|
||||
1029
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/internal/decoder.py
vendored
Normal file
829
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/internal/encoder.py
vendored
Normal file
|
|
@ -0,0 +1,829 @@
|
|||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# https://developers.google.com/protocol-buffers/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Code for encoding protocol message primitives.
|
||||
|
||||
Contains the logic for encoding every logical protocol field type
|
||||
into one of the 5 physical wire types.
|
||||
|
||||
This code is designed to push the Python interpreter's performance to the
|
||||
limits.
|
||||
|
||||
The basic idea is that at startup time, for every field (i.e. every
|
||||
FieldDescriptor) we construct two functions: a "sizer" and an "encoder". The
|
||||
sizer takes a value of this field's type and computes its byte size. The
|
||||
encoder takes a writer function and a value. It encodes the value into byte
|
||||
strings and invokes the writer function to write those strings. Typically the
|
||||
writer function is the write() method of a BytesIO.
|
||||
|
||||
We try to do as much work as possible when constructing the writer and the
|
||||
sizer rather than when calling them. In particular:
|
||||
* We copy any needed global functions to local variables, so that we do not need
|
||||
to do costly global table lookups at runtime.
|
||||
* Similarly, we try to do any attribute lookups at startup time if possible.
|
||||
* Every field's tag is encoded to bytes at startup, since it can't change at
|
||||
runtime.
|
||||
* Whatever component of the field size we can compute at startup, we do.
|
||||
* We *avoid* sharing code if doing so would make the code slower and not sharing
|
||||
does not burden us too much. For example, encoders for repeated fields do
|
||||
not just call the encoders for singular fields in a loop because this would
|
||||
add an extra function call overhead for every loop iteration; instead, we
|
||||
manually inline the single-value encoder into the loop.
|
||||
* If a Python function lacks a return statement, Python actually generates
|
||||
instructions to pop the result of the last statement off the stack, push
|
||||
None onto the stack, and then return that. If we really don't care what
|
||||
value is returned, then we can save two instructions by returning the
|
||||
result of the last statement. It looks funny but it helps.
|
||||
* We assume that type and bounds checking has happened at a higher level.
|
||||
"""
|
||||
|
||||
__author__ = 'kenton@google.com (Kenton Varda)'
|
||||
|
||||
import struct
|
||||
|
||||
from google.protobuf.internal import wire_format
|
||||
|
||||
|
||||
# This will overflow and thus become IEEE-754 "infinity". We would use
|
||||
# "float('inf')" but it doesn't work on Windows pre-Python-2.6.
|
||||
_POS_INF = 1e10000
|
||||
_NEG_INF = -_POS_INF
|
||||
|
||||
|
||||
def _VarintSize(value):
|
||||
"""Compute the size of a varint value."""
|
||||
if value <= 0x7f: return 1
|
||||
if value <= 0x3fff: return 2
|
||||
if value <= 0x1fffff: return 3
|
||||
if value <= 0xfffffff: return 4
|
||||
if value <= 0x7ffffffff: return 5
|
||||
if value <= 0x3ffffffffff: return 6
|
||||
if value <= 0x1ffffffffffff: return 7
|
||||
if value <= 0xffffffffffffff: return 8
|
||||
if value <= 0x7fffffffffffffff: return 9
|
||||
return 10
|
||||
|
||||
|
||||
def _SignedVarintSize(value):
|
||||
"""Compute the size of a signed varint value."""
|
||||
if value < 0: return 10
|
||||
if value <= 0x7f: return 1
|
||||
if value <= 0x3fff: return 2
|
||||
if value <= 0x1fffff: return 3
|
||||
if value <= 0xfffffff: return 4
|
||||
if value <= 0x7ffffffff: return 5
|
||||
if value <= 0x3ffffffffff: return 6
|
||||
if value <= 0x1ffffffffffff: return 7
|
||||
if value <= 0xffffffffffffff: return 8
|
||||
if value <= 0x7fffffffffffffff: return 9
|
||||
return 10
|
||||
|
||||
|
||||
def _TagSize(field_number):
|
||||
"""Returns the number of bytes required to serialize a tag with this field
|
||||
number."""
|
||||
# Just pass in type 0, since the type won't affect the tag+type size.
|
||||
return _VarintSize(wire_format.PackTag(field_number, 0))
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# In this section we define some generic sizers. Each of these functions
|
||||
# takes parameters specific to a particular field type, e.g. int32 or fixed64.
|
||||
# It returns another function which in turn takes parameters specific to a
|
||||
# particular field, e.g. the field number and whether it is repeated or packed.
|
||||
# Look at the next section to see how these are used.
|
||||
|
||||
|
||||
def _SimpleSizer(compute_value_size):
|
||||
"""A sizer which uses the function compute_value_size to compute the size of
|
||||
each value. Typically compute_value_size is _VarintSize."""
|
||||
|
||||
def SpecificSizer(field_number, is_repeated, is_packed):
|
||||
tag_size = _TagSize(field_number)
|
||||
if is_packed:
|
||||
local_VarintSize = _VarintSize
|
||||
def PackedFieldSize(value):
|
||||
result = 0
|
||||
for element in value:
|
||||
result += compute_value_size(element)
|
||||
return result + local_VarintSize(result) + tag_size
|
||||
return PackedFieldSize
|
||||
elif is_repeated:
|
||||
def RepeatedFieldSize(value):
|
||||
result = tag_size * len(value)
|
||||
for element in value:
|
||||
result += compute_value_size(element)
|
||||
return result
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
def FieldSize(value):
|
||||
return tag_size + compute_value_size(value)
|
||||
return FieldSize
|
||||
|
||||
return SpecificSizer
|
||||
|
||||
|
||||
def _ModifiedSizer(compute_value_size, modify_value):
|
||||
"""Like SimpleSizer, but modify_value is invoked on each value before it is
|
||||
passed to compute_value_size. modify_value is typically ZigZagEncode."""
|
||||
|
||||
def SpecificSizer(field_number, is_repeated, is_packed):
|
||||
tag_size = _TagSize(field_number)
|
||||
if is_packed:
|
||||
local_VarintSize = _VarintSize
|
||||
def PackedFieldSize(value):
|
||||
result = 0
|
||||
for element in value:
|
||||
result += compute_value_size(modify_value(element))
|
||||
return result + local_VarintSize(result) + tag_size
|
||||
return PackedFieldSize
|
||||
elif is_repeated:
|
||||
def RepeatedFieldSize(value):
|
||||
result = tag_size * len(value)
|
||||
for element in value:
|
||||
result += compute_value_size(modify_value(element))
|
||||
return result
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
def FieldSize(value):
|
||||
return tag_size + compute_value_size(modify_value(value))
|
||||
return FieldSize
|
||||
|
||||
return SpecificSizer
|
||||
|
||||
|
||||
def _FixedSizer(value_size):
|
||||
"""Like _SimpleSizer except for a fixed-size field. The input is the size
|
||||
of one value."""
|
||||
|
||||
def SpecificSizer(field_number, is_repeated, is_packed):
|
||||
tag_size = _TagSize(field_number)
|
||||
if is_packed:
|
||||
local_VarintSize = _VarintSize
|
||||
def PackedFieldSize(value):
|
||||
result = len(value) * value_size
|
||||
return result + local_VarintSize(result) + tag_size
|
||||
return PackedFieldSize
|
||||
elif is_repeated:
|
||||
element_size = value_size + tag_size
|
||||
def RepeatedFieldSize(value):
|
||||
return len(value) * element_size
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
field_size = value_size + tag_size
|
||||
def FieldSize(value):
|
||||
return field_size
|
||||
return FieldSize
|
||||
|
||||
return SpecificSizer
|
||||
|
||||
|
||||
# ====================================================================
|
||||
# Here we declare a sizer constructor for each field type. Each "sizer
|
||||
# constructor" is a function that takes (field_number, is_repeated, is_packed)
|
||||
# as parameters and returns a sizer, which in turn takes a field value as
|
||||
# a parameter and returns its encoded size.
|
||||
|
||||
|
||||
Int32Sizer = Int64Sizer = EnumSizer = _SimpleSizer(_SignedVarintSize)
|
||||
|
||||
UInt32Sizer = UInt64Sizer = _SimpleSizer(_VarintSize)
|
||||
|
||||
SInt32Sizer = SInt64Sizer = _ModifiedSizer(
|
||||
_SignedVarintSize, wire_format.ZigZagEncode)
|
||||
|
||||
Fixed32Sizer = SFixed32Sizer = FloatSizer = _FixedSizer(4)
|
||||
Fixed64Sizer = SFixed64Sizer = DoubleSizer = _FixedSizer(8)
|
||||
|
||||
BoolSizer = _FixedSizer(1)
|
||||
|
||||
|
||||
def StringSizer(field_number, is_repeated, is_packed):
|
||||
"""Returns a sizer for a string field."""
|
||||
|
||||
tag_size = _TagSize(field_number)
|
||||
local_VarintSize = _VarintSize
|
||||
local_len = len
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def RepeatedFieldSize(value):
|
||||
result = tag_size * len(value)
|
||||
for element in value:
|
||||
l = local_len(element.encode('utf-8'))
|
||||
result += local_VarintSize(l) + l
|
||||
return result
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
def FieldSize(value):
|
||||
l = local_len(value.encode('utf-8'))
|
||||
return tag_size + local_VarintSize(l) + l
|
||||
return FieldSize
|
||||
|
||||
|
||||
def BytesSizer(field_number, is_repeated, is_packed):
|
||||
"""Returns a sizer for a bytes field."""
|
||||
|
||||
tag_size = _TagSize(field_number)
|
||||
local_VarintSize = _VarintSize
|
||||
local_len = len
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def RepeatedFieldSize(value):
|
||||
result = tag_size * len(value)
|
||||
for element in value:
|
||||
l = local_len(element)
|
||||
result += local_VarintSize(l) + l
|
||||
return result
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
def FieldSize(value):
|
||||
l = local_len(value)
|
||||
return tag_size + local_VarintSize(l) + l
|
||||
return FieldSize
|
||||
|
||||
|
||||
def GroupSizer(field_number, is_repeated, is_packed):
|
||||
"""Returns a sizer for a group field."""
|
||||
|
||||
tag_size = _TagSize(field_number) * 2
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def RepeatedFieldSize(value):
|
||||
result = tag_size * len(value)
|
||||
for element in value:
|
||||
result += element.ByteSize()
|
||||
return result
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
def FieldSize(value):
|
||||
return tag_size + value.ByteSize()
|
||||
return FieldSize
|
||||
|
||||
|
||||
def MessageSizer(field_number, is_repeated, is_packed):
|
||||
"""Returns a sizer for a message field."""
|
||||
|
||||
tag_size = _TagSize(field_number)
|
||||
local_VarintSize = _VarintSize
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def RepeatedFieldSize(value):
|
||||
result = tag_size * len(value)
|
||||
for element in value:
|
||||
l = element.ByteSize()
|
||||
result += local_VarintSize(l) + l
|
||||
return result
|
||||
return RepeatedFieldSize
|
||||
else:
|
||||
def FieldSize(value):
|
||||
l = value.ByteSize()
|
||||
return tag_size + local_VarintSize(l) + l
|
||||
return FieldSize
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# MessageSet is special: it needs custom logic to compute its size properly.
|
||||
|
||||
|
||||
def MessageSetItemSizer(field_number):
|
||||
"""Returns a sizer for extensions of MessageSet.
|
||||
|
||||
The message set message looks like this:
|
||||
message MessageSet {
|
||||
repeated group Item = 1 {
|
||||
required int32 type_id = 2;
|
||||
required string message = 3;
|
||||
}
|
||||
}
|
||||
"""
|
||||
static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) +
|
||||
_TagSize(3))
|
||||
local_VarintSize = _VarintSize
|
||||
|
||||
def FieldSize(value):
|
||||
l = value.ByteSize()
|
||||
return static_size + local_VarintSize(l) + l
|
||||
|
||||
return FieldSize
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# Map is special: it needs custom logic to compute its size properly.
|
||||
|
||||
|
||||
def MapSizer(field_descriptor, is_message_map):
|
||||
"""Returns a sizer for a map field."""
|
||||
|
||||
# Can't look at field_descriptor.message_type._concrete_class because it may
|
||||
# not have been initialized yet.
|
||||
message_type = field_descriptor.message_type
|
||||
message_sizer = MessageSizer(field_descriptor.number, False, False)
|
||||
|
||||
def FieldSize(map_value):
|
||||
total = 0
|
||||
for key in map_value:
|
||||
value = map_value[key]
|
||||
# It's wasteful to create the messages and throw them away one second
|
||||
# later since we'll do the same for the actual encode. But there's not an
|
||||
# obvious way to avoid this within the current design without tons of code
|
||||
# duplication. For message map, value.ByteSize() should be called to
|
||||
# update the status.
|
||||
entry_msg = message_type._concrete_class(key=key, value=value)
|
||||
total += message_sizer(entry_msg)
|
||||
if is_message_map:
|
||||
value.ByteSize()
|
||||
return total
|
||||
|
||||
return FieldSize
|
||||
|
||||
# ====================================================================
|
||||
# Encoders!
|
||||
|
||||
|
||||
def _VarintEncoder():
|
||||
"""Return an encoder for a basic varint value (does not include tag)."""
|
||||
|
||||
local_int2byte = struct.Struct('>B').pack
|
||||
|
||||
def EncodeVarint(write, value, unused_deterministic=None):
|
||||
bits = value & 0x7f
|
||||
value >>= 7
|
||||
while value:
|
||||
write(local_int2byte(0x80|bits))
|
||||
bits = value & 0x7f
|
||||
value >>= 7
|
||||
return write(local_int2byte(bits))
|
||||
|
||||
return EncodeVarint
|
||||
|
||||
|
||||
def _SignedVarintEncoder():
|
||||
"""Return an encoder for a basic signed varint value (does not include
|
||||
tag)."""
|
||||
|
||||
local_int2byte = struct.Struct('>B').pack
|
||||
|
||||
def EncodeSignedVarint(write, value, unused_deterministic=None):
|
||||
if value < 0:
|
||||
value += (1 << 64)
|
||||
bits = value & 0x7f
|
||||
value >>= 7
|
||||
while value:
|
||||
write(local_int2byte(0x80|bits))
|
||||
bits = value & 0x7f
|
||||
value >>= 7
|
||||
return write(local_int2byte(bits))
|
||||
|
||||
return EncodeSignedVarint
|
||||
|
||||
|
||||
_EncodeVarint = _VarintEncoder()
|
||||
_EncodeSignedVarint = _SignedVarintEncoder()
|
||||
|
||||
|
||||
def _VarintBytes(value):
|
||||
"""Encode the given integer as a varint and return the bytes. This is only
|
||||
called at startup time so it doesn't need to be fast."""
|
||||
|
||||
pieces = []
|
||||
_EncodeVarint(pieces.append, value, True)
|
||||
return b"".join(pieces)
|
||||
|
||||
|
||||
def TagBytes(field_number, wire_type):
|
||||
"""Encode the given tag and return the bytes. Only called at startup."""
|
||||
|
||||
return bytes(_VarintBytes(wire_format.PackTag(field_number, wire_type)))
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# As with sizers (see above), we have a number of common encoder
|
||||
# implementations.
|
||||
|
||||
|
||||
def _SimpleEncoder(wire_type, encode_value, compute_value_size):
|
||||
"""Return a constructor for an encoder for fields of a particular type.
|
||||
|
||||
Args:
|
||||
wire_type: The field's wire type, for encoding tags.
|
||||
encode_value: A function which encodes an individual value, e.g.
|
||||
_EncodeVarint().
|
||||
compute_value_size: A function which computes the size of an individual
|
||||
value, e.g. _VarintSize().
|
||||
"""
|
||||
|
||||
def SpecificEncoder(field_number, is_repeated, is_packed):
|
||||
if is_packed:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
def EncodePackedField(write, value, deterministic):
|
||||
write(tag_bytes)
|
||||
size = 0
|
||||
for element in value:
|
||||
size += compute_value_size(element)
|
||||
local_EncodeVarint(write, size, deterministic)
|
||||
for element in value:
|
||||
encode_value(write, element, deterministic)
|
||||
return EncodePackedField
|
||||
elif is_repeated:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeRepeatedField(write, value, deterministic):
|
||||
for element in value:
|
||||
write(tag_bytes)
|
||||
encode_value(write, element, deterministic)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeField(write, value, deterministic):
|
||||
write(tag_bytes)
|
||||
return encode_value(write, value, deterministic)
|
||||
return EncodeField
|
||||
|
||||
return SpecificEncoder
|
||||
|
||||
|
||||
def _ModifiedEncoder(wire_type, encode_value, compute_value_size, modify_value):
|
||||
"""Like SimpleEncoder but additionally invokes modify_value on every value
|
||||
before passing it to encode_value. Usually modify_value is ZigZagEncode."""
|
||||
|
||||
def SpecificEncoder(field_number, is_repeated, is_packed):
|
||||
if is_packed:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
def EncodePackedField(write, value, deterministic):
|
||||
write(tag_bytes)
|
||||
size = 0
|
||||
for element in value:
|
||||
size += compute_value_size(modify_value(element))
|
||||
local_EncodeVarint(write, size, deterministic)
|
||||
for element in value:
|
||||
encode_value(write, modify_value(element), deterministic)
|
||||
return EncodePackedField
|
||||
elif is_repeated:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeRepeatedField(write, value, deterministic):
|
||||
for element in value:
|
||||
write(tag_bytes)
|
||||
encode_value(write, modify_value(element), deterministic)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeField(write, value, deterministic):
|
||||
write(tag_bytes)
|
||||
return encode_value(write, modify_value(value), deterministic)
|
||||
return EncodeField
|
||||
|
||||
return SpecificEncoder
|
||||
|
||||
|
||||
def _StructPackEncoder(wire_type, format):
|
||||
"""Return a constructor for an encoder for a fixed-width field.
|
||||
|
||||
Args:
|
||||
wire_type: The field's wire type, for encoding tags.
|
||||
format: The format string to pass to struct.pack().
|
||||
"""
|
||||
|
||||
value_size = struct.calcsize(format)
|
||||
|
||||
def SpecificEncoder(field_number, is_repeated, is_packed):
|
||||
local_struct_pack = struct.pack
|
||||
if is_packed:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
def EncodePackedField(write, value, deterministic):
|
||||
write(tag_bytes)
|
||||
local_EncodeVarint(write, len(value) * value_size, deterministic)
|
||||
for element in value:
|
||||
write(local_struct_pack(format, element))
|
||||
return EncodePackedField
|
||||
elif is_repeated:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeRepeatedField(write, value, unused_deterministic=None):
|
||||
for element in value:
|
||||
write(tag_bytes)
|
||||
write(local_struct_pack(format, element))
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeField(write, value, unused_deterministic=None):
|
||||
write(tag_bytes)
|
||||
return write(local_struct_pack(format, value))
|
||||
return EncodeField
|
||||
|
||||
return SpecificEncoder
|
||||
|
||||
|
||||
def _FloatingPointEncoder(wire_type, format):
|
||||
"""Return a constructor for an encoder for float fields.
|
||||
|
||||
This is like StructPackEncoder, but catches errors that may be due to
|
||||
passing non-finite floating-point values to struct.pack, and makes a
|
||||
second attempt to encode those values.
|
||||
|
||||
Args:
|
||||
wire_type: The field's wire type, for encoding tags.
|
||||
format: The format string to pass to struct.pack().
|
||||
"""
|
||||
|
||||
value_size = struct.calcsize(format)
|
||||
if value_size == 4:
|
||||
def EncodeNonFiniteOrRaise(write, value):
|
||||
# Remember that the serialized form uses little-endian byte order.
|
||||
if value == _POS_INF:
|
||||
write(b'\x00\x00\x80\x7F')
|
||||
elif value == _NEG_INF:
|
||||
write(b'\x00\x00\x80\xFF')
|
||||
elif value != value: # NaN
|
||||
write(b'\x00\x00\xC0\x7F')
|
||||
else:
|
||||
raise
|
||||
elif value_size == 8:
|
||||
def EncodeNonFiniteOrRaise(write, value):
|
||||
if value == _POS_INF:
|
||||
write(b'\x00\x00\x00\x00\x00\x00\xF0\x7F')
|
||||
elif value == _NEG_INF:
|
||||
write(b'\x00\x00\x00\x00\x00\x00\xF0\xFF')
|
||||
elif value != value: # NaN
|
||||
write(b'\x00\x00\x00\x00\x00\x00\xF8\x7F')
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
raise ValueError('Can\'t encode floating-point values that are '
|
||||
'%d bytes long (only 4 or 8)' % value_size)
|
||||
|
||||
def SpecificEncoder(field_number, is_repeated, is_packed):
|
||||
local_struct_pack = struct.pack
|
||||
if is_packed:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
def EncodePackedField(write, value, deterministic):
|
||||
write(tag_bytes)
|
||||
local_EncodeVarint(write, len(value) * value_size, deterministic)
|
||||
for element in value:
|
||||
# This try/except block is going to be faster than any code that
|
||||
# we could write to check whether element is finite.
|
||||
try:
|
||||
write(local_struct_pack(format, element))
|
||||
except SystemError:
|
||||
EncodeNonFiniteOrRaise(write, element)
|
||||
return EncodePackedField
|
||||
elif is_repeated:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeRepeatedField(write, value, unused_deterministic=None):
|
||||
for element in value:
|
||||
write(tag_bytes)
|
||||
try:
|
||||
write(local_struct_pack(format, element))
|
||||
except SystemError:
|
||||
EncodeNonFiniteOrRaise(write, element)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
tag_bytes = TagBytes(field_number, wire_type)
|
||||
def EncodeField(write, value, unused_deterministic=None):
|
||||
write(tag_bytes)
|
||||
try:
|
||||
write(local_struct_pack(format, value))
|
||||
except SystemError:
|
||||
EncodeNonFiniteOrRaise(write, value)
|
||||
return EncodeField
|
||||
|
||||
return SpecificEncoder
|
||||
|
||||
|
||||
# ====================================================================
|
||||
# Here we declare an encoder constructor for each field type. These work
|
||||
# very similarly to sizer constructors, described earlier.
|
||||
|
||||
|
||||
Int32Encoder = Int64Encoder = EnumEncoder = _SimpleEncoder(
|
||||
wire_format.WIRETYPE_VARINT, _EncodeSignedVarint, _SignedVarintSize)
|
||||
|
||||
UInt32Encoder = UInt64Encoder = _SimpleEncoder(
|
||||
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize)
|
||||
|
||||
SInt32Encoder = SInt64Encoder = _ModifiedEncoder(
|
||||
wire_format.WIRETYPE_VARINT, _EncodeVarint, _VarintSize,
|
||||
wire_format.ZigZagEncode)
|
||||
|
||||
# Note that Python conveniently guarantees that when using the '<' prefix on
|
||||
# formats, they will also have the same size across all platforms (as opposed
|
||||
# to without the prefix, where their sizes depend on the C compiler's basic
|
||||
# type sizes).
|
||||
Fixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<I')
|
||||
Fixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<Q')
|
||||
SFixed32Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED32, '<i')
|
||||
SFixed64Encoder = _StructPackEncoder(wire_format.WIRETYPE_FIXED64, '<q')
|
||||
FloatEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED32, '<f')
|
||||
DoubleEncoder = _FloatingPointEncoder(wire_format.WIRETYPE_FIXED64, '<d')
|
||||
|
||||
|
||||
def BoolEncoder(field_number, is_repeated, is_packed):
|
||||
"""Returns an encoder for a boolean field."""
|
||||
|
||||
false_byte = b'\x00'
|
||||
true_byte = b'\x01'
|
||||
if is_packed:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
def EncodePackedField(write, value, deterministic):
|
||||
write(tag_bytes)
|
||||
local_EncodeVarint(write, len(value), deterministic)
|
||||
for element in value:
|
||||
if element:
|
||||
write(true_byte)
|
||||
else:
|
||||
write(false_byte)
|
||||
return EncodePackedField
|
||||
elif is_repeated:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
|
||||
def EncodeRepeatedField(write, value, unused_deterministic=None):
|
||||
for element in value:
|
||||
write(tag_bytes)
|
||||
if element:
|
||||
write(true_byte)
|
||||
else:
|
||||
write(false_byte)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_VARINT)
|
||||
def EncodeField(write, value, unused_deterministic=None):
|
||||
write(tag_bytes)
|
||||
if value:
|
||||
return write(true_byte)
|
||||
return write(false_byte)
|
||||
return EncodeField
|
||||
|
||||
|
||||
def StringEncoder(field_number, is_repeated, is_packed):
|
||||
"""Returns an encoder for a string field."""
|
||||
|
||||
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
local_len = len
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def EncodeRepeatedField(write, value, deterministic):
|
||||
for element in value:
|
||||
encoded = element.encode('utf-8')
|
||||
write(tag)
|
||||
local_EncodeVarint(write, local_len(encoded), deterministic)
|
||||
write(encoded)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
def EncodeField(write, value, deterministic):
|
||||
encoded = value.encode('utf-8')
|
||||
write(tag)
|
||||
local_EncodeVarint(write, local_len(encoded), deterministic)
|
||||
return write(encoded)
|
||||
return EncodeField
|
||||
|
||||
|
||||
def BytesEncoder(field_number, is_repeated, is_packed):
|
||||
"""Returns an encoder for a bytes field."""
|
||||
|
||||
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
local_len = len
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def EncodeRepeatedField(write, value, deterministic):
|
||||
for element in value:
|
||||
write(tag)
|
||||
local_EncodeVarint(write, local_len(element), deterministic)
|
||||
write(element)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
def EncodeField(write, value, deterministic):
|
||||
write(tag)
|
||||
local_EncodeVarint(write, local_len(value), deterministic)
|
||||
return write(value)
|
||||
return EncodeField
|
||||
|
||||
|
||||
def GroupEncoder(field_number, is_repeated, is_packed):
|
||||
"""Returns an encoder for a group field."""
|
||||
|
||||
start_tag = TagBytes(field_number, wire_format.WIRETYPE_START_GROUP)
|
||||
end_tag = TagBytes(field_number, wire_format.WIRETYPE_END_GROUP)
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def EncodeRepeatedField(write, value, deterministic):
|
||||
for element in value:
|
||||
write(start_tag)
|
||||
element._InternalSerialize(write, deterministic)
|
||||
write(end_tag)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
def EncodeField(write, value, deterministic):
|
||||
write(start_tag)
|
||||
value._InternalSerialize(write, deterministic)
|
||||
return write(end_tag)
|
||||
return EncodeField
|
||||
|
||||
|
||||
def MessageEncoder(field_number, is_repeated, is_packed):
|
||||
"""Returns an encoder for a message field."""
|
||||
|
||||
tag = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
assert not is_packed
|
||||
if is_repeated:
|
||||
def EncodeRepeatedField(write, value, deterministic):
|
||||
for element in value:
|
||||
write(tag)
|
||||
local_EncodeVarint(write, element.ByteSize(), deterministic)
|
||||
element._InternalSerialize(write, deterministic)
|
||||
return EncodeRepeatedField
|
||||
else:
|
||||
def EncodeField(write, value, deterministic):
|
||||
write(tag)
|
||||
local_EncodeVarint(write, value.ByteSize(), deterministic)
|
||||
return value._InternalSerialize(write, deterministic)
|
||||
return EncodeField
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# As before, MessageSet is special.
|
||||
|
||||
|
||||
def MessageSetItemEncoder(field_number):
|
||||
"""Encoder for extensions of MessageSet.
|
||||
|
||||
The message set message looks like this:
|
||||
message MessageSet {
|
||||
repeated group Item = 1 {
|
||||
required int32 type_id = 2;
|
||||
required string message = 3;
|
||||
}
|
||||
}
|
||||
"""
|
||||
start_bytes = b"".join([
|
||||
TagBytes(1, wire_format.WIRETYPE_START_GROUP),
|
||||
TagBytes(2, wire_format.WIRETYPE_VARINT),
|
||||
_VarintBytes(field_number),
|
||||
TagBytes(3, wire_format.WIRETYPE_LENGTH_DELIMITED)])
|
||||
end_bytes = TagBytes(1, wire_format.WIRETYPE_END_GROUP)
|
||||
local_EncodeVarint = _EncodeVarint
|
||||
|
||||
def EncodeField(write, value, deterministic):
|
||||
write(start_bytes)
|
||||
local_EncodeVarint(write, value.ByteSize(), deterministic)
|
||||
value._InternalSerialize(write, deterministic)
|
||||
return write(end_bytes)
|
||||
|
||||
return EncodeField
|
||||
|
||||
|
||||
# --------------------------------------------------------------------
|
||||
# As before, Map is special.
|
||||
|
||||
|
||||
def MapEncoder(field_descriptor):
|
||||
"""Encoder for extensions of MessageSet.
|
||||
|
||||
Maps always have a wire format like this:
|
||||
message MapEntry {
|
||||
key_type key = 1;
|
||||
value_type value = 2;
|
||||
}
|
||||
repeated MapEntry map = N;
|
||||
"""
|
||||
# Can't look at field_descriptor.message_type._concrete_class because it may
|
||||
# not have been initialized yet.
|
||||
message_type = field_descriptor.message_type
|
||||
encode_message = MessageEncoder(field_descriptor.number, False, False)
|
||||
|
||||
def EncodeField(write, value, deterministic):
|
||||
value_keys = sorted(value.keys()) if deterministic else value
|
||||
for key in value_keys:
|
||||
entry_msg = message_type._concrete_class(key=key, value=value[key])
|
||||
encode_message(write, entry_msg, deterministic)
|
||||
|
||||
return EncodeField
|
||||
124
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/internal/enum_type_wrapper.py
vendored
Normal file
|
|
@ -0,0 +1,124 @@
|
|||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# https://developers.google.com/protocol-buffers/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""A simple wrapper around enum types to expose utility functions.
|
||||
|
||||
Instances are created as properties with the same name as the enum they wrap
|
||||
on proto classes. For usage, see:
|
||||
reflection_test.py
|
||||
"""
|
||||
|
||||
__author__ = 'rabsatt@google.com (Kevin Rabsatt)'
|
||||
|
||||
|
||||
class EnumTypeWrapper(object):
|
||||
"""A utility for finding the names of enum values."""
|
||||
|
||||
DESCRIPTOR = None
|
||||
|
||||
# This is a type alias, which mypy typing stubs can type as
|
||||
# a genericized parameter constrained to an int, allowing subclasses
|
||||
# to be typed with more constraint in .pyi stubs
|
||||
# Eg.
|
||||
# def MyGeneratedEnum(Message):
|
||||
# ValueType = NewType('ValueType', int)
|
||||
# def Name(self, number: MyGeneratedEnum.ValueType) -> str
|
||||
ValueType = int
|
||||
|
||||
def __init__(self, enum_type):
|
||||
"""Inits EnumTypeWrapper with an EnumDescriptor."""
|
||||
self._enum_type = enum_type
|
||||
self.DESCRIPTOR = enum_type # pylint: disable=invalid-name
|
||||
|
||||
def Name(self, number): # pylint: disable=invalid-name
|
||||
"""Returns a string containing the name of an enum value."""
|
||||
try:
|
||||
return self._enum_type.values_by_number[number].name
|
||||
except KeyError:
|
||||
pass # fall out to break exception chaining
|
||||
|
||||
if not isinstance(number, int):
|
||||
raise TypeError(
|
||||
'Enum value for {} must be an int, but got {} {!r}.'.format(
|
||||
self._enum_type.name, type(number), number))
|
||||
else:
|
||||
# repr here to handle the odd case when you pass in a boolean.
|
||||
raise ValueError('Enum {} has no name defined for value {!r}'.format(
|
||||
self._enum_type.name, number))
|
||||
|
||||
def Value(self, name): # pylint: disable=invalid-name
|
||||
"""Returns the value corresponding to the given enum name."""
|
||||
try:
|
||||
return self._enum_type.values_by_name[name].number
|
||||
except KeyError:
|
||||
pass # fall out to break exception chaining
|
||||
raise ValueError('Enum {} has no value defined for name {!r}'.format(
|
||||
self._enum_type.name, name))
|
||||
|
||||
def keys(self):
|
||||
"""Return a list of the string names in the enum.
|
||||
|
||||
Returns:
|
||||
A list of strs, in the order they were defined in the .proto file.
|
||||
"""
|
||||
|
||||
return [value_descriptor.name
|
||||
for value_descriptor in self._enum_type.values]
|
||||
|
||||
def values(self):
|
||||
"""Return a list of the integer values in the enum.
|
||||
|
||||
Returns:
|
||||
A list of ints, in the order they were defined in the .proto file.
|
||||
"""
|
||||
|
||||
return [value_descriptor.number
|
||||
for value_descriptor in self._enum_type.values]
|
||||
|
||||
def items(self):
|
||||
"""Return a list of the (name, value) pairs of the enum.
|
||||
|
||||
Returns:
|
||||
A list of (str, int) pairs, in the order they were defined
|
||||
in the .proto file.
|
||||
"""
|
||||
return [(value_descriptor.name, value_descriptor.number)
|
||||
for value_descriptor in self._enum_type.values]
|
||||
|
||||
def __getattr__(self, name):
|
||||
"""Returns the value corresponding to the given enum name."""
|
||||
try:
|
||||
return super(
|
||||
EnumTypeWrapper,
|
||||
self).__getattribute__('_enum_type').values_by_name[name].number
|
||||
except KeyError:
|
||||
pass # fall out to break exception chaining
|
||||
raise AttributeError('Enum {} has no value defined for name {!r}'.format(
|
||||
self._enum_type.name, name))
|
||||
213
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/internal/extension_dict.py
vendored
Normal file
|
|
@ -0,0 +1,213 @@
|
|||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# https://developers.google.com/protocol-buffers/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Contains _ExtensionDict class to represent extensions.
|
||||
"""
|
||||
|
||||
from google.protobuf.internal import type_checkers
|
||||
from google.protobuf.descriptor import FieldDescriptor
|
||||
|
||||
|
||||
def _VerifyExtensionHandle(message, extension_handle):
|
||||
"""Verify that the given extension handle is valid."""
|
||||
|
||||
if not isinstance(extension_handle, FieldDescriptor):
|
||||
raise KeyError('HasExtension() expects an extension handle, got: %s' %
|
||||
extension_handle)
|
||||
|
||||
if not extension_handle.is_extension:
|
||||
raise KeyError('"%s" is not an extension.' % extension_handle.full_name)
|
||||
|
||||
if not extension_handle.containing_type:
|
||||
raise KeyError('"%s" is missing a containing_type.'
|
||||
% extension_handle.full_name)
|
||||
|
||||
if extension_handle.containing_type is not message.DESCRIPTOR:
|
||||
raise KeyError('Extension "%s" extends message type "%s", but this '
|
||||
'message is of type "%s".' %
|
||||
(extension_handle.full_name,
|
||||
extension_handle.containing_type.full_name,
|
||||
message.DESCRIPTOR.full_name))
|
||||
|
||||
|
||||
# TODO(robinson): Unify error handling of "unknown extension" crap.
|
||||
# TODO(robinson): Support iteritems()-style iteration over all
|
||||
# extensions with the "has" bits turned on?
|
||||
class _ExtensionDict(object):
|
||||
|
||||
"""Dict-like container for Extension fields on proto instances.
|
||||
|
||||
Note that in all cases we expect extension handles to be
|
||||
FieldDescriptors.
|
||||
"""
|
||||
|
||||
def __init__(self, extended_message):
|
||||
"""
|
||||
Args:
|
||||
extended_message: Message instance for which we are the Extensions dict.
|
||||
"""
|
||||
self._extended_message = extended_message
|
||||
|
||||
def __getitem__(self, extension_handle):
|
||||
"""Returns the current value of the given extension handle."""
|
||||
|
||||
_VerifyExtensionHandle(self._extended_message, extension_handle)
|
||||
|
||||
result = self._extended_message._fields.get(extension_handle)
|
||||
if result is not None:
|
||||
return result
|
||||
|
||||
if extension_handle.label == FieldDescriptor.LABEL_REPEATED:
|
||||
result = extension_handle._default_constructor(self._extended_message)
|
||||
elif extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
|
||||
message_type = extension_handle.message_type
|
||||
if not hasattr(message_type, '_concrete_class'):
|
||||
# pylint: disable=protected-access
|
||||
self._extended_message._FACTORY.GetPrototype(message_type)
|
||||
assert getattr(extension_handle.message_type, '_concrete_class', None), (
|
||||
'Uninitialized concrete class found for field %r (message type %r)'
|
||||
% (extension_handle.full_name,
|
||||
extension_handle.message_type.full_name))
|
||||
result = extension_handle.message_type._concrete_class()
|
||||
try:
|
||||
result._SetListener(self._extended_message._listener_for_children)
|
||||
except ReferenceError:
|
||||
pass
|
||||
else:
|
||||
# Singular scalar -- just return the default without inserting into the
|
||||
# dict.
|
||||
return extension_handle.default_value
|
||||
|
||||
# Atomically check if another thread has preempted us and, if not, swap
|
||||
# in the new object we just created. If someone has preempted us, we
|
||||
# take that object and discard ours.
|
||||
# WARNING: We are relying on setdefault() being atomic. This is true
|
||||
# in CPython but we haven't investigated others. This warning appears
|
||||
# in several other locations in this file.
|
||||
result = self._extended_message._fields.setdefault(
|
||||
extension_handle, result)
|
||||
|
||||
return result
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return False
|
||||
|
||||
my_fields = self._extended_message.ListFields()
|
||||
other_fields = other._extended_message.ListFields()
|
||||
|
||||
# Get rid of non-extension fields.
|
||||
my_fields = [field for field in my_fields if field.is_extension]
|
||||
other_fields = [field for field in other_fields if field.is_extension]
|
||||
|
||||
return my_fields == other_fields
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
def __len__(self):
|
||||
fields = self._extended_message.ListFields()
|
||||
# Get rid of non-extension fields.
|
||||
extension_fields = [field for field in fields if field[0].is_extension]
|
||||
return len(extension_fields)
|
||||
|
||||
def __hash__(self):
|
||||
raise TypeError('unhashable object')
|
||||
|
||||
# Note that this is only meaningful for non-repeated, scalar extension
|
||||
# fields. Note also that we may have to call _Modified() when we do
|
||||
# successfully set a field this way, to set any necessary "has" bits in the
|
||||
# ancestors of the extended message.
|
||||
def __setitem__(self, extension_handle, value):
|
||||
"""If extension_handle specifies a non-repeated, scalar extension
|
||||
field, sets the value of that field.
|
||||
"""
|
||||
|
||||
_VerifyExtensionHandle(self._extended_message, extension_handle)
|
||||
|
||||
if (extension_handle.label == FieldDescriptor.LABEL_REPEATED or
|
||||
extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE):
|
||||
raise TypeError(
|
||||
'Cannot assign to extension "%s" because it is a repeated or '
|
||||
'composite type.' % extension_handle.full_name)
|
||||
|
||||
# It's slightly wasteful to lookup the type checker each time,
|
||||
# but we expect this to be a vanishingly uncommon case anyway.
|
||||
type_checker = type_checkers.GetTypeChecker(extension_handle)
|
||||
# pylint: disable=protected-access
|
||||
self._extended_message._fields[extension_handle] = (
|
||||
type_checker.CheckValue(value))
|
||||
self._extended_message._Modified()
|
||||
|
||||
def __delitem__(self, extension_handle):
|
||||
self._extended_message.ClearExtension(extension_handle)
|
||||
|
||||
def _FindExtensionByName(self, name):
|
||||
"""Tries to find a known extension with the specified name.
|
||||
|
||||
Args:
|
||||
name: Extension full name.
|
||||
|
||||
Returns:
|
||||
Extension field descriptor.
|
||||
"""
|
||||
return self._extended_message._extensions_by_name.get(name, None)
|
||||
|
||||
def _FindExtensionByNumber(self, number):
|
||||
"""Tries to find a known extension with the field number.
|
||||
|
||||
Args:
|
||||
number: Extension field number.
|
||||
|
||||
Returns:
|
||||
Extension field descriptor.
|
||||
"""
|
||||
return self._extended_message._extensions_by_number.get(number, None)
|
||||
|
||||
def __iter__(self):
|
||||
# Return a generator over the populated extension fields
|
||||
return (f[0] for f in self._extended_message.ListFields()
|
||||
if f[0].is_extension)
|
||||
|
||||
def __contains__(self, extension_handle):
|
||||
_VerifyExtensionHandle(self._extended_message, extension_handle)
|
||||
|
||||
if extension_handle not in self._extended_message._fields:
|
||||
return False
|
||||
|
||||
if extension_handle.label == FieldDescriptor.LABEL_REPEATED:
|
||||
return bool(self._extended_message._fields.get(extension_handle))
|
||||
|
||||
if extension_handle.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
|
||||
value = self._extended_message._fields.get(extension_handle)
|
||||
# pylint: disable=protected-access
|
||||
return value is not None and value._is_present_in_parent
|
||||
|
||||
return True
|
||||
78
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/internal/message_listener.py
vendored
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
# Protocol Buffers - Google's data interchange format
|
||||
# Copyright 2008 Google Inc. All rights reserved.
|
||||
# https://developers.google.com/protocol-buffers/
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are
|
||||
# met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above
|
||||
# copyright notice, this list of conditions and the following disclaimer
|
||||
# in the documentation and/or other materials provided with the
|
||||
# distribution.
|
||||
# * Neither the name of Google Inc. nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""Defines a listener interface for observing certain
|
||||
state transitions on Message objects.
|
||||
|
||||
Also defines a null implementation of this interface.
|
||||
"""
|
||||
|
||||
__author__ = 'robinson@google.com (Will Robinson)'
|
||||
|
||||
|
||||
class MessageListener(object):
|
||||
|
||||
"""Listens for modifications made to a message. Meant to be registered via
|
||||
Message._SetListener().
|
||||
|
||||
Attributes:
|
||||
dirty: If True, then calling Modified() would be a no-op. This can be
|
||||
used to avoid these calls entirely in the common case.
|
||||
"""
|
||||
|
||||
def Modified(self):
|
||||
"""Called every time the message is modified in such a way that the parent
|
||||
message may need to be updated. This currently means either:
|
||||
(a) The message was modified for the first time, so the parent message
|
||||
should henceforth mark the message as present.
|
||||
(b) The message's cached byte size became dirty -- i.e. the message was
|
||||
modified for the first time after a previous call to ByteSize().
|
||||
Therefore the parent should also mark its byte size as dirty.
|
||||
Note that (a) implies (b), since new objects start out with a client cached
|
||||
size (zero). However, we document (a) explicitly because it is important.
|
||||
|
||||
Modified() will *only* be called in response to one of these two events --
|
||||
not every time the sub-message is modified.
|
||||
|
||||
Note that if the listener's |dirty| attribute is true, then calling
|
||||
Modified at the moment would be a no-op, so it can be skipped. Performance-
|
||||
sensitive callers should check this attribute directly before calling since
|
||||
it will be true most of the time.
|
||||
"""
|
||||
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class NullMessageListener(object):
|
||||
|
||||
"""No-op MessageListener implementation."""
|
||||
|
||||
def Modified(self):
|
||||
pass
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: google/protobuf/internal/message_set_extensions.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n5google/protobuf/internal/message_set_extensions.proto\x12\x18google.protobuf.internal\"\x1e\n\x0eTestMessageSet*\x08\x08\x04\x10\xff\xff\xff\xff\x07:\x02\x08\x01\"\xa5\x01\n\x18TestMessageSetExtension1\x12\t\n\x01i\x18\x0f \x01(\x05\x32~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xab\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension1\"\xa7\x01\n\x18TestMessageSetExtension2\x12\x0b\n\x03str\x18\x19 \x01(\t2~\n\x15message_set_extension\x12(.google.protobuf.internal.TestMessageSet\x18\xca\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension2\"(\n\x18TestMessageSetExtension3\x12\x0c\n\x04text\x18# \x01(\t:\x7f\n\x16message_set_extension3\x12(.google.protobuf.internal.TestMessageSet\x18\xdf\xff\xf6. \x01(\x0b\x32\x32.google.protobuf.internal.TestMessageSetExtension3')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.message_set_extensions_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
TestMessageSet.RegisterExtension(message_set_extension3)
|
||||
TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION1.extensions_by_name['message_set_extension'])
|
||||
TestMessageSet.RegisterExtension(_TESTMESSAGESETEXTENSION2.extensions_by_name['message_set_extension'])
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_TESTMESSAGESET._options = None
|
||||
_TESTMESSAGESET._serialized_options = b'\010\001'
|
||||
_TESTMESSAGESET._serialized_start=83
|
||||
_TESTMESSAGESET._serialized_end=113
|
||||
_TESTMESSAGESETEXTENSION1._serialized_start=116
|
||||
_TESTMESSAGESETEXTENSION1._serialized_end=281
|
||||
_TESTMESSAGESETEXTENSION2._serialized_start=284
|
||||
_TESTMESSAGESETEXTENSION2._serialized_end=451
|
||||
_TESTMESSAGESETEXTENSION3._serialized_start=453
|
||||
_TESTMESSAGESETEXTENSION3._serialized_end=493
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
37
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/internal/missing_enum_values_pb2.py
vendored
Normal file
|
|
@ -0,0 +1,37 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: google/protobuf/internal/missing_enum_values.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n2google/protobuf/internal/missing_enum_values.proto\x12\x1fgoogle.protobuf.python.internal\"\xc1\x02\n\x0eTestEnumValues\x12X\n\x14optional_nested_enum\x18\x01 \x01(\x0e\x32:.google.protobuf.python.internal.TestEnumValues.NestedEnum\x12X\n\x14repeated_nested_enum\x18\x02 \x03(\x0e\x32:.google.protobuf.python.internal.TestEnumValues.NestedEnum\x12Z\n\x12packed_nested_enum\x18\x03 \x03(\x0e\x32:.google.protobuf.python.internal.TestEnumValues.NestedEnumB\x02\x10\x01\"\x1f\n\nNestedEnum\x12\x08\n\x04ZERO\x10\x00\x12\x07\n\x03ONE\x10\x01\"\xd3\x02\n\x15TestMissingEnumValues\x12_\n\x14optional_nested_enum\x18\x01 \x01(\x0e\x32\x41.google.protobuf.python.internal.TestMissingEnumValues.NestedEnum\x12_\n\x14repeated_nested_enum\x18\x02 \x03(\x0e\x32\x41.google.protobuf.python.internal.TestMissingEnumValues.NestedEnum\x12\x61\n\x12packed_nested_enum\x18\x03 \x03(\x0e\x32\x41.google.protobuf.python.internal.TestMissingEnumValues.NestedEnumB\x02\x10\x01\"\x15\n\nNestedEnum\x12\x07\n\x03TWO\x10\x02\"\x1b\n\nJustString\x12\r\n\x05\x64ummy\x18\x01 \x02(\t')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.missing_enum_values_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_TESTENUMVALUES.fields_by_name['packed_nested_enum']._options = None
|
||||
_TESTENUMVALUES.fields_by_name['packed_nested_enum']._serialized_options = b'\020\001'
|
||||
_TESTMISSINGENUMVALUES.fields_by_name['packed_nested_enum']._options = None
|
||||
_TESTMISSINGENUMVALUES.fields_by_name['packed_nested_enum']._serialized_options = b'\020\001'
|
||||
_TESTENUMVALUES._serialized_start=88
|
||||
_TESTENUMVALUES._serialized_end=409
|
||||
_TESTENUMVALUES_NESTEDENUM._serialized_start=378
|
||||
_TESTENUMVALUES_NESTEDENUM._serialized_end=409
|
||||
_TESTMISSINGENUMVALUES._serialized_start=412
|
||||
_TESTMISSINGENUMVALUES._serialized_end=751
|
||||
_TESTMISSINGENUMVALUES_NESTEDENUM._serialized_start=730
|
||||
_TESTMISSINGENUMVALUES_NESTEDENUM._serialized_end=751
|
||||
_JUSTSTRING._serialized_start=753
|
||||
_JUSTSTRING._serialized_end=780
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: google/protobuf/internal/more_extensions_dynamic.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
from google.protobuf.internal import more_extensions_pb2 as google_dot_protobuf_dot_internal_dot_more__extensions__pb2
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n6google/protobuf/internal/more_extensions_dynamic.proto\x12\x18google.protobuf.internal\x1a.google/protobuf/internal/more_extensions.proto\"\x1f\n\x12\x44ynamicMessageType\x12\t\n\x01\x61\x18\x01 \x01(\x05:J\n\x17\x64ynamic_int32_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x64 \x01(\x05:z\n\x19\x64ynamic_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x65 \x01(\x0b\x32,.google.protobuf.internal.DynamicMessageType:\x83\x01\n\"repeated_dynamic_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x66 \x03(\x0b\x32,.google.protobuf.internal.DynamicMessageType')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.more_extensions_dynamic_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(dynamic_int32_extension)
|
||||
google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(dynamic_message_extension)
|
||||
google_dot_protobuf_dot_internal_dot_more__extensions__pb2.ExtendedMessage.RegisterExtension(repeated_dynamic_message_extension)
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_DYNAMICMESSAGETYPE._serialized_start=132
|
||||
_DYNAMICMESSAGETYPE._serialized_end=163
|
||||
# @@protoc_insertion_point(module_scope)
|
||||
41
server_addon/hiero/client/ayon_hiero/vendor/google/protobuf/internal/more_extensions_pb2.py
vendored
Normal file
|
|
@ -0,0 +1,41 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
# source: google/protobuf/internal/more_extensions.proto
|
||||
"""Generated protocol buffer code."""
|
||||
from google.protobuf.internal import builder as _builder
|
||||
from google.protobuf import descriptor as _descriptor
|
||||
from google.protobuf import descriptor_pool as _descriptor_pool
|
||||
from google.protobuf import symbol_database as _symbol_database
|
||||
# @@protoc_insertion_point(imports)
|
||||
|
||||
_sym_db = _symbol_database.Default()
|
||||
|
||||
|
||||
|
||||
|
||||
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.google/protobuf/internal/more_extensions.proto\x12\x18google.protobuf.internal\"\x99\x01\n\x0fTopLevelMessage\x12\x41\n\nsubmessage\x18\x01 \x01(\x0b\x32).google.protobuf.internal.ExtendedMessageB\x02(\x01\x12\x43\n\x0enested_message\x18\x02 \x01(\x0b\x32\'.google.protobuf.internal.NestedMessageB\x02(\x01\"R\n\rNestedMessage\x12\x41\n\nsubmessage\x18\x01 \x01(\x0b\x32).google.protobuf.internal.ExtendedMessageB\x02(\x01\"K\n\x0f\x45xtendedMessage\x12\x17\n\x0eoptional_int32\x18\xe9\x07 \x01(\x05\x12\x18\n\x0frepeated_string\x18\xea\x07 \x03(\t*\x05\x08\x01\x10\xe8\x07\"-\n\x0e\x46oreignMessage\x12\x1b\n\x13\x66oreign_message_int\x18\x01 \x01(\x05:I\n\x16optional_int_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x01 \x01(\x05:w\n\x1aoptional_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x02 \x01(\x0b\x32(.google.protobuf.internal.ForeignMessage:I\n\x16repeated_int_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x03 \x03(\x05:w\n\x1arepeated_message_extension\x12).google.protobuf.internal.ExtendedMessage\x18\x04 \x03(\x0b\x32(.google.protobuf.internal.ForeignMessage')
|
||||
|
||||
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals())
|
||||
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.protobuf.internal.more_extensions_pb2', globals())
|
||||
if _descriptor._USE_C_DESCRIPTORS == False:
|
||||
ExtendedMessage.RegisterExtension(optional_int_extension)
|
||||
ExtendedMessage.RegisterExtension(optional_message_extension)
|
||||
ExtendedMessage.RegisterExtension(repeated_int_extension)
|
||||
ExtendedMessage.RegisterExtension(repeated_message_extension)
|
||||
|
||||
DESCRIPTOR._options = None
|
||||
_TOPLEVELMESSAGE.fields_by_name['submessage']._options = None
|
||||
_TOPLEVELMESSAGE.fields_by_name['submessage']._serialized_options = b'(\001'
|
||||
_TOPLEVELMESSAGE.fields_by_name['nested_message']._options = None
|
||||
_TOPLEVELMESSAGE.fields_by_name['nested_message']._serialized_options = b'(\001'
|
||||
_NESTEDMESSAGE.fields_by_name['submessage']._options = None
|
||||
_NESTEDMESSAGE.fields_by_name['submessage']._serialized_options = b'(\001'
|
||||
_TOPLEVELMESSAGE._serialized_start=77
|
||||
_TOPLEVELMESSAGE._serialized_end=230
|
||||
_NESTEDMESSAGE._serialized_start=232
|
||||
_NESTEDMESSAGE._serialized_end=314
|
||||
_EXTENDEDMESSAGE._serialized_start=316
|
||||
_EXTENDEDMESSAGE._serialized_end=391
|
||||
_FOREIGNMESSAGE._serialized_start=393
|
||||
_FOREIGNMESSAGE._serialized_end=438
|
||||
# @@protoc_insertion_point(module_scope)
|
||||