Merge branch 'develop' into feature/houdini_cleanup_after_publishing

This commit is contained in:
MustafaJafar 2024-06-03 16:56:18 +03:00
commit 7543082353
848 changed files with 5115 additions and 3663 deletions

View file

@ -212,7 +212,13 @@ class ApplicationsAddonSettings(BaseSettingsModel):
scope=["studio"]
)
only_available: bool = SettingsField(
True, title="Show only available applications")
True,
title="Show only available applications",
description="Enable to show only applications in AYON Launcher"
" for which the executable paths are found on the running machine."
" This applies as an additional filter to the applications defined in a "
" project's anatomy settings to ignore unavailable applications."
)
@validator("tool_groups")
def validate_unique_name(cls, value):

View file

@ -0,0 +1,13 @@
from .version import __version__
from .addon import (
CELACTION_ROOT_DIR,
CelactionAddon,
)
__all__ = (
"__version__",
"CELACTION_ROOT_DIR",
"CelactionAddon",
)

View file

@ -0,0 +1,31 @@
import os
from ayon_core.addon import AYONAddon, IHostAddon
from .version import __version__
CELACTION_ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class CelactionAddon(AYONAddon, IHostAddon):
name = "celaction"
version = __version__
host_name = "celaction"
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(CELACTION_ROOT_DIR, "hooks")
]
def add_implementation_envs(self, env, _app):
# Set default values if are not already set via settings
defaults = {
"LOGLEVEL": "DEBUG"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
def get_workfile_extensions(self):
return [".scn"]

View file

@ -0,0 +1,152 @@
import os
import shutil
import winreg
import subprocess
from ayon_core.lib import get_ayon_launcher_args
from ayon_applications import PreLaunchHook, LaunchTypes
from ayon_celaction import CELACTION_ROOT_DIR
class CelactionPrelaunchHook(PreLaunchHook):
"""Bootstrap celacion with AYON"""
app_groups = {"celaction"}
platforms = {"windows"}
launch_types = {LaunchTypes.local}
def execute(self):
folder_attributes = self.data["folder_entity"]["attrib"]
width = folder_attributes["resolutionWidth"]
height = folder_attributes["resolutionHeight"]
# Add workfile path to launch arguments
workfile_path = self.workfile_path()
if workfile_path:
self.launch_context.launch_args.append(workfile_path)
# setting output parameters
path_user_settings = "\\".join([
"Software", "CelAction", "CelAction2D", "User Settings"
])
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_user_settings)
hKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER, path_user_settings, 0,
winreg.KEY_ALL_ACCESS
)
path_to_cli = os.path.join(
CELACTION_ROOT_DIR, "scripts", "publish_cli.py"
)
subprocess_args = get_ayon_launcher_args("run", path_to_cli)
executable = subprocess_args.pop(0)
workfile_settings = self.get_workfile_settings()
winreg.SetValueEx(
hKey,
"SubmitAppTitle",
0,
winreg.REG_SZ,
executable
)
# add required arguments for workfile path
parameters = subprocess_args + [
"--currentFile", "*SCENE*"
]
# Add custom parameters from workfile settings
if "render_chunk" in workfile_settings["submission_overrides"]:
parameters += [
"--chunk", "*CHUNK*"
]
if "resolution" in workfile_settings["submission_overrides"]:
parameters += [
"--resolutionWidth", "*X*",
"--resolutionHeight", "*Y*"
]
if "frame_range" in workfile_settings["submission_overrides"]:
parameters += [
"--frameStart", "*START*",
"--frameEnd", "*END*"
]
winreg.SetValueEx(
hKey, "SubmitParametersTitle", 0, winreg.REG_SZ,
subprocess.list2cmdline(parameters)
)
self.log.debug(f"__ parameters: \"{parameters}\"")
# setting resolution parameters
path_submit = "\\".join([
path_user_settings, "Dialogs", "SubmitOutput"
])
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_submit)
hKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER, path_submit, 0,
winreg.KEY_ALL_ACCESS
)
winreg.SetValueEx(hKey, "SaveScene", 0, winreg.REG_DWORD, 1)
winreg.SetValueEx(hKey, "CustomX", 0, winreg.REG_DWORD, width)
winreg.SetValueEx(hKey, "CustomY", 0, winreg.REG_DWORD, height)
# making sure message dialogs don't appear when overwriting
path_overwrite_scene = "\\".join([
path_user_settings, "Messages", "OverwriteScene"
])
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_overwrite_scene)
hKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER, path_overwrite_scene, 0,
winreg.KEY_ALL_ACCESS
)
winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 6)
winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
# set scane as not saved
path_scene_saved = "\\".join([
path_user_settings, "Messages", "SceneSaved"
])
winreg.CreateKey(winreg.HKEY_CURRENT_USER, path_scene_saved)
hKey = winreg.OpenKey(
winreg.HKEY_CURRENT_USER, path_scene_saved, 0,
winreg.KEY_ALL_ACCESS
)
winreg.SetValueEx(hKey, "Result", 0, winreg.REG_DWORD, 1)
winreg.SetValueEx(hKey, "Valid", 0, winreg.REG_DWORD, 1)
def workfile_path(self):
workfile_path = self.data["last_workfile_path"]
# copy workfile from template if doesn't exist any on path
if not os.path.exists(workfile_path):
# TODO add ability to set different template workfile path via
# settings
template_path = os.path.join(
CELACTION_ROOT_DIR,
"resources",
"celaction_template_scene.scn"
)
if not os.path.exists(template_path):
self.log.warning(
"Couldn't find workfile template file in {}".format(
template_path
)
)
return
self.log.info(
f"Creating workfile from template: \"{template_path}\""
)
# Copy template workfile to new destinantion
shutil.copy2(
os.path.normpath(template_path),
os.path.normpath(workfile_path)
)
self.log.info(f"Workfile to open: \"{workfile_path}\"")
return workfile_path
def get_workfile_settings(self):
return self.data["project_settings"]["celaction"]["workfile"]

View file

@ -0,0 +1,60 @@
import pyblish.api
import sys
from pprint import pformat
class CollectCelactionCliKwargs(pyblish.api.ContextPlugin):
""" Collects all keyword arguments passed from the terminal """
label = "Collect Celaction Cli Kwargs"
order = pyblish.api.CollectorOrder - 0.1
def process(self, context):
args = list(sys.argv[1:])
self.log.info(str(args))
missing_kwargs = []
passing_kwargs = {}
for key in (
"chunk",
"frameStart",
"frameEnd",
"resolutionWidth",
"resolutionHeight",
"currentFile",
):
arg_key = f"--{key}"
if arg_key not in args:
missing_kwargs.append(key)
continue
arg_idx = args.index(arg_key)
args.pop(arg_idx)
if key != "currentFile":
value = args.pop(arg_idx)
else:
path_parts = []
while arg_idx < len(args):
path_parts.append(args.pop(arg_idx))
value = " ".join(path_parts).strip('"')
passing_kwargs[key] = value
if missing_kwargs:
self.log.debug("Missing arguments {}".format(
", ".join(
[f'"{key}"' for key in missing_kwargs]
)
))
self.log.info("Storing kwargs ...")
self.log.debug("_ passing_kwargs: {}".format(pformat(passing_kwargs)))
# set kwargs to context data
context.set_data("passingKwargs", passing_kwargs)
# get kwargs onto context data as keys with values
for k, v in passing_kwargs.items():
self.log.info(f"Setting `{k}` to instance.data with value: `{v}`")
if k in ["frameStart", "frameEnd"]:
context.data[k] = passing_kwargs[k] = int(v)
else:
context.data[k] = v

View file

@ -0,0 +1,96 @@
import os
import pyblish.api
class CollectCelactionInstances(pyblish.api.ContextPlugin):
""" Adds the celaction render instances """
label = "Collect Celaction Instances"
order = pyblish.api.CollectorOrder + 0.1
def process(self, context):
task = context.data["task"]
current_file = context.data["currentFile"]
staging_dir = os.path.dirname(current_file)
scene_file = os.path.basename(current_file)
version = context.data["version"]
folder_entity = context.data["folderEntity"]
folder_attributes = folder_entity["attrib"]
shared_instance_data = {
"folderPath": folder_entity["path"],
"frameStart": folder_attributes["frameStart"],
"frameEnd": folder_attributes["frameEnd"],
"handleStart": folder_attributes["handleStart"],
"handleEnd": folder_attributes["handleEnd"],
"fps": folder_attributes["fps"],
"resolutionWidth": folder_attributes["resolutionWidth"],
"resolutionHeight": folder_attributes["resolutionHeight"],
"pixelAspect": 1,
"step": 1,
"version": version
}
celaction_kwargs = context.data.get(
"passingKwargs", {})
if celaction_kwargs:
shared_instance_data.update(celaction_kwargs)
# workfile instance
product_type = "workfile"
product_name = product_type + task.capitalize()
# Create instance
instance = context.create_instance(product_name)
# creating instance data
instance.data.update({
"label": scene_file,
"productName": product_name,
"productType": product_type,
"family": product_type,
"families": [product_type],
"representations": []
})
# adding basic script data
instance.data.update(shared_instance_data)
# creating representation
representation = {
'name': 'scn',
'ext': 'scn',
'files': scene_file,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation)
self.log.info('Publishing Celaction workfile')
# render instance
product_name = f"render{task}Main"
product_type = "render.farm"
instance = context.create_instance(name=product_name)
# getting instance state
instance.data["publish"] = True
# add folderEntity data into instance
instance.data.update({
"label": "{} - farm".format(product_name),
"productType": product_type,
"family": product_type,
"families": [product_type],
"productName": product_name
})
# adding basic script data
instance.data.update(shared_instance_data)
self.log.info('Publishing Celaction render instance')
self.log.debug(f"Instance data: `{instance.data}`")
for i in context:
self.log.debug(f"{i.data['families']}")

View file

@ -0,0 +1,65 @@
import os
import copy
import pyblish.api
class CollectRenderPath(pyblish.api.InstancePlugin):
"""Generate file and directory path where rendered images will be"""
label = "Collect Render Path"
order = pyblish.api.CollectorOrder + 0.495
families = ["render.farm"]
settings_category = "celaction"
# Presets
output_extension = "png"
anatomy_template_key_render_files = None
anatomy_template_key_metadata = None
def process(self, instance):
anatomy = instance.context.data["anatomy"]
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
padding = anatomy.templates_obj.frame_padding
product_type = "render"
anatomy_data.update({
"frame": f"%0{padding}d",
"family": product_type,
"representation": self.output_extension,
"ext": self.output_extension
})
anatomy_data["product"]["type"] = product_type
# get anatomy rendering keys
r_anatomy_key = self.anatomy_template_key_render_files
m_anatomy_key = self.anatomy_template_key_metadata
# get folder and path for rendering images from celaction
r_template_item = anatomy.get_template_item("publish", r_anatomy_key)
render_dir = r_template_item["directory"].format_strict(anatomy_data)
render_path = r_template_item["path"].format_strict(anatomy_data)
self.log.debug("__ render_path: `{}`".format(render_path))
# create dir if it doesn't exists
try:
if not os.path.isdir(render_dir):
os.makedirs(render_dir, exist_ok=True)
except OSError:
# directory is not available
self.log.warning("Path is unreachable: `{}`".format(render_dir))
# add rendering path to instance data
instance.data["path"] = render_path
# get anatomy for published renders folder path
m_template_item = anatomy.get_template_item(
"publish", m_anatomy_key, default=None
)
if m_template_item is not None:
metadata_path = m_template_item["directory"].format_strict(
anatomy_data
)
instance.data["publishRenderMetadataFolder"] = metadata_path
self.log.info("Metadata render path: `{}`".format(metadata_path))
self.log.info(f"Render output path set to: `{render_path}`")

View file

@ -0,0 +1,22 @@
import shutil
import pyblish.api
from ayon_core.lib import version_up
class VersionUpScene(pyblish.api.ContextPlugin):
order = pyblish.api.IntegratorOrder + 0.5
label = 'Version Up Scene'
families = ['workfile']
optional = True
active = True
def process(self, context):
current_file = context.data.get('currentFile')
v_up = version_up(current_file)
self.log.debug('Current file is: {}'.format(current_file))
self.log.debug('Version up: {}'.format(v_up))
shutil.copy2(current_file, v_up)
self.log.info('Scene saved into new version: {}'.format(v_up))

View file

@ -0,0 +1,36 @@
import os
import sys
import pyblish.api
import pyblish.util
from ayon_celaction import CELACTION_ROOT_DIR
from ayon_core.lib import Logger
from ayon_core.tools.utils import host_tools
from ayon_core.pipeline import install_ayon_plugins
log = Logger.get_logger("celaction")
PUBLISH_HOST = "celaction"
PLUGINS_DIR = os.path.join(CELACTION_ROOT_DIR, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
def main():
# Registers global pyblish plugins
install_ayon_plugins()
if os.path.exists(PUBLISH_PATH):
log.info(f"Registering path: {PUBLISH_PATH}")
pyblish.api.register_plugin_path(PUBLISH_PATH)
pyblish.api.register_host(PUBLISH_HOST)
pyblish.api.register_target("local")
return host_tools.show_publish()
if __name__ == "__main__":
result = main()
sys.exit(not bool(result))

View file

@ -0,0 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON addon 'celaction' version."""
__version__ = "0.2.0"

View file

@ -1,3 +1,12 @@
name = "celaction"
title = "CelAction"
version = "0.1.0"
version = "0.2.0"
client_dir = "ayon_celaction"
ayon_required_addons = {
"core": ">0.3.2",
}
ayon_compatible_addons = {
"applications": ">=0.2.0",
}

View file

@ -0,0 +1,13 @@
from .version import __version__
from .addon import (
FLAME_ADDON_ROOT,
FlameAddon,
)
__all__ = (
"__version__",
"FLAME_ADDON_ROOT",
"FlameAddon",
)

View file

@ -0,0 +1,35 @@
import os
from ayon_core.addon import AYONAddon, IHostAddon
from .version import __version__
FLAME_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__))
class FlameAddon(AYONAddon, IHostAddon):
name = "flame"
version = __version__
host_name = "flame"
def add_implementation_envs(self, env, _app):
# Add requirements to DL_PYTHON_HOOK_PATH
env["DL_PYTHON_HOOK_PATH"] = os.path.join(FLAME_ADDON_ROOT, "startup")
env.pop("QT_AUTO_SCREEN_SCALE_FACTOR", None)
# Set default values if are not already set via settings
defaults = {
"LOGLEVEL": "DEBUG"
}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [
os.path.join(FLAME_ADDON_ROOT, "hooks")
]
def get_workfile_extensions(self):
return [".otoc"]

View file

@ -0,0 +1,159 @@
"""
AYON Autodesk Flame api
"""
from .constants import (
COLOR_MAP,
MARKER_NAME,
MARKER_COLOR,
MARKER_DURATION,
MARKER_PUBLISH_DEFAULT
)
from .lib import (
CTX,
FlameAppFramework,
get_current_project,
get_current_sequence,
create_segment_data_marker,
get_segment_data_marker,
set_segment_data_marker,
set_publish_attribute,
get_publish_attribute,
get_sequence_segments,
maintained_segment_selection,
reset_segment_selection,
get_segment_attributes,
get_clips_in_reels,
get_reformatted_filename,
get_frame_from_filename,
get_padding_from_filename,
maintained_object_duplication,
maintained_temp_file_path,
get_clip_segment,
get_batch_group_from_desktop,
MediaInfoFile,
TimeEffectMetadata
)
from .utils import (
setup,
get_flame_version,
get_flame_install_root
)
from .pipeline import (
install,
uninstall,
ls,
containerise,
update_container,
remove_instance,
list_instances,
imprint,
maintained_selection
)
from .menu import (
FlameMenuProjectConnect,
FlameMenuTimeline,
FlameMenuUniversal
)
from .plugin import (
Creator,
PublishableClip,
ClipLoader,
OpenClipSolver
)
from .workio import (
open_file,
save_file,
current_file,
has_unsaved_changes,
file_extensions,
work_root
)
from .render_utils import (
export_clip,
get_preset_path_by_xml_name,
modify_preset_file
)
from .batch_utils import (
create_batch_group,
create_batch_group_conent
)
__all__ = [
# constants
"COLOR_MAP",
"MARKER_NAME",
"MARKER_COLOR",
"MARKER_DURATION",
"MARKER_PUBLISH_DEFAULT",
# lib
"CTX",
"FlameAppFramework",
"get_current_project",
"get_current_sequence",
"create_segment_data_marker",
"get_segment_data_marker",
"set_segment_data_marker",
"set_publish_attribute",
"get_publish_attribute",
"get_sequence_segments",
"maintained_segment_selection",
"reset_segment_selection",
"get_segment_attributes",
"get_clips_in_reels",
"get_reformatted_filename",
"get_frame_from_filename",
"get_padding_from_filename",
"maintained_object_duplication",
"maintained_temp_file_path",
"get_clip_segment",
"get_batch_group_from_desktop",
"MediaInfoFile",
"TimeEffectMetadata",
# pipeline
"install",
"uninstall",
"ls",
"containerise",
"update_container",
"reload_pipeline",
"maintained_selection",
"remove_instance",
"list_instances",
"imprint",
"maintained_selection",
# utils
"setup",
"get_flame_version",
"get_flame_install_root",
# menu
"FlameMenuProjectConnect",
"FlameMenuTimeline",
"FlameMenuUniversal",
# plugin
"Creator",
"PublishableClip",
"ClipLoader",
"OpenClipSolver",
# workio
"open_file",
"save_file",
"current_file",
"has_unsaved_changes",
"file_extensions",
"work_root",
# render utils
"export_clip",
"get_preset_path_by_xml_name",
"modify_preset_file",
# batch utils
"create_batch_group",
"create_batch_group_conent"
]

View file

@ -0,0 +1,151 @@
import flame
def create_batch_group(
name,
frame_start,
frame_duration,
update_batch_group=None,
**kwargs
):
"""Create Batch Group in active project's Desktop
Args:
name (str): name of batch group to be created
frame_start (int): start frame of batch
frame_end (int): end frame of batch
update_batch_group (PyBatch)[optional]: batch group to update
Return:
PyBatch: active flame batch group
"""
# make sure some batch obj is present
batch_group = update_batch_group or flame.batch
schematic_reels = kwargs.get("shematic_reels") or ['LoadedReel1']
shelf_reels = kwargs.get("shelf_reels") or ['ShelfReel1']
handle_start = kwargs.get("handleStart") or 0
handle_end = kwargs.get("handleEnd") or 0
frame_start -= handle_start
frame_duration += handle_start + handle_end
if not update_batch_group:
# Create batch group with name, start_frame value, duration value,
# set of schematic reel names, set of shelf reel names
batch_group = batch_group.create_batch_group(
name,
start_frame=frame_start,
duration=frame_duration,
reels=schematic_reels,
shelf_reels=shelf_reels
)
else:
batch_group.name = name
batch_group.start_frame = frame_start
batch_group.duration = frame_duration
# add reels to batch group
_add_reels_to_batch_group(
batch_group, schematic_reels, shelf_reels)
# TODO: also update write node if there is any
# TODO: also update loaders to start from correct frameStart
if kwargs.get("switch_batch_tab"):
# use this command to switch to the batch tab
batch_group.go_to()
return batch_group
def _add_reels_to_batch_group(batch_group, reels, shelf_reels):
# update or create defined reels
# helper variables
reel_names = [
r.name.get_value()
for r in batch_group.reels
]
shelf_reel_names = [
r.name.get_value()
for r in batch_group.shelf_reels
]
# add schematic reels
for _r in reels:
if _r in reel_names:
continue
batch_group.create_reel(_r)
# add shelf reels
for _sr in shelf_reels:
if _sr in shelf_reel_names:
continue
batch_group.create_shelf_reel(_sr)
def create_batch_group_conent(batch_nodes, batch_links, batch_group=None):
"""Creating batch group with links
Args:
batch_nodes (list of dict): each dict is node definition
batch_links (list of dict): each dict is link definition
batch_group (PyBatch, optional): batch group. Defaults to None.
Return:
dict: all batch nodes {name or id: PyNode}
"""
# make sure some batch obj is present
batch_group = batch_group or flame.batch
all_batch_nodes = {
b.name.get_value(): b
for b in batch_group.nodes
}
for node in batch_nodes:
# NOTE: node_props needs to be ideally OrederDict type
node_id, node_type, node_props = (
node["id"], node["type"], node["properties"])
# get node name for checking if exists
node_name = node_props.pop("name", None) or node_id
if all_batch_nodes.get(node_name):
# update existing batch node
batch_node = all_batch_nodes[node_name]
else:
# create new batch node
batch_node = batch_group.create_node(node_type)
# set name
batch_node.name.set_value(node_name)
# set attributes found in node props
for key, value in node_props.items():
if not hasattr(batch_node, key):
continue
setattr(batch_node, key, value)
# add created node for possible linking
all_batch_nodes[node_id] = batch_node
# link nodes to each other
for link in batch_links:
_from_n, _to_n = link["from_node"], link["to_node"]
# check if all linking nodes are available
if not all([
all_batch_nodes.get(_from_n["id"]),
all_batch_nodes.get(_to_n["id"])
]):
continue
# link nodes in defined link
batch_group.connect_nodes(
all_batch_nodes[_from_n["id"]], _from_n["connector"],
all_batch_nodes[_to_n["id"]], _to_n["connector"]
)
# sort batch nodes
batch_group.organize()
return all_batch_nodes

View file

@ -0,0 +1,24 @@
"""
AYON Flame api constances
"""
# AYON marker workflow variables
MARKER_NAME = "OpenPypeData"
MARKER_DURATION = 0
MARKER_COLOR = "cyan"
MARKER_PUBLISH_DEFAULT = False
# AYON color definitions
COLOR_MAP = {
"red": (1.0, 0.0, 0.0),
"orange": (1.0, 0.5, 0.0),
"yellow": (1.0, 1.0, 0.0),
"pink": (1.0, 0.5, 1.0),
"white": (1.0, 1.0, 1.0),
"green": (0.0, 1.0, 0.0),
"cyan": (0.0, 1.0, 1.0),
"blue": (0.0, 0.0, 1.0),
"purple": (0.5, 0.0, 0.5),
"magenta": (0.5, 0.0, 1.0),
"black": (0.0, 0.0, 0.0)
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,256 @@
from copy import deepcopy
from pprint import pformat
from qtpy import QtWidgets
from ayon_core.pipeline import get_current_project_name
from ayon_core.tools.utils.host_tools import HostToolsHelper
menu_group_name = 'OpenPype'
default_flame_export_presets = {
'Publish': {
'PresetVisibility': 2,
'PresetType': 0,
'PresetFile': 'OpenEXR/OpenEXR (16-bit fp PIZ).xml'
},
'Preview': {
'PresetVisibility': 3,
'PresetType': 2,
'PresetFile': 'Generate Preview.xml'
},
'Thumbnail': {
'PresetVisibility': 3,
'PresetType': 0,
'PresetFile': 'Generate Thumbnail.xml'
}
}
def callback_selection(selection, function):
import ayon_flame.api as opfapi
opfapi.CTX.selection = selection
print("Hook Selection: \n\t{}".format(
pformat({
index: (type(item), item.name)
for index, item in enumerate(opfapi.CTX.selection)})
))
function()
class _FlameMenuApp(object):
def __init__(self, framework):
self.name = self.__class__.__name__
self.framework = framework
self.log = framework.log
self.menu_group_name = menu_group_name
self.dynamic_menu_data = {}
# flame module is only available when a
# flame project is loaded and initialized
self.flame = None
try:
import flame
self.flame = flame
except ImportError:
self.flame = None
self.flame_project_name = flame.project.current_project.name
self.prefs = self.framework.prefs_dict(self.framework.prefs, self.name)
self.prefs_user = self.framework.prefs_dict(
self.framework.prefs_user, self.name)
self.prefs_global = self.framework.prefs_dict(
self.framework.prefs_global, self.name)
self.mbox = QtWidgets.QMessageBox()
project_name = get_current_project_name()
self.menu = {
"actions": [{
'name': project_name or "project",
'isEnabled': False
}],
"name": self.menu_group_name
}
self.tools_helper = HostToolsHelper()
def __getattr__(self, name):
def method(*args, **kwargs):
print('calling %s' % name)
return method
def rescan(self, *args, **kwargs):
if not self.flame:
try:
import flame
self.flame = flame
except ImportError:
self.flame = None
if self.flame:
self.flame.execute_shortcut('Rescan Python Hooks')
self.log.info('Rescan Python Hooks')
class FlameMenuProjectConnect(_FlameMenuApp):
# flameMenuProjectconnect app takes care of the preferences dialog as well
def __init__(self, framework):
_FlameMenuApp.__init__(self, framework)
def __getattr__(self, name):
def method(*args, **kwargs):
project = self.dynamic_menu_data.get(name)
if project:
self.link_project(project)
return method
def build_menu(self):
if not self.flame:
return []
menu = deepcopy(self.menu)
menu['actions'].append({
"name": "Workfiles...",
"execute": lambda x: self.tools_helper.show_workfiles()
})
menu['actions'].append({
"name": "Load...",
"execute": lambda x: self.tools_helper.show_loader()
})
menu['actions'].append({
"name": "Manage...",
"execute": lambda x: self.tools_helper.show_scene_inventory()
})
menu['actions'].append({
"name": "Library...",
"execute": lambda x: self.tools_helper.show_library_loader()
})
return menu
def refresh(self, *args, **kwargs):
self.rescan()
def rescan(self, *args, **kwargs):
if not self.flame:
try:
import flame
self.flame = flame
except ImportError:
self.flame = None
if self.flame:
self.flame.execute_shortcut('Rescan Python Hooks')
self.log.info('Rescan Python Hooks')
class FlameMenuTimeline(_FlameMenuApp):
# flameMenuProjectconnect app takes care of the preferences dialog as well
def __init__(self, framework):
_FlameMenuApp.__init__(self, framework)
def __getattr__(self, name):
def method(*args, **kwargs):
project = self.dynamic_menu_data.get(name)
if project:
self.link_project(project)
return method
def build_menu(self):
if not self.flame:
return []
menu = deepcopy(self.menu)
menu['actions'].append({
"name": "Create...",
"execute": lambda x: callback_selection(
x, self.tools_helper.show_creator)
})
menu['actions'].append({
"name": "Publish...",
"execute": lambda x: callback_selection(
x, self.tools_helper.show_publish)
})
menu['actions'].append({
"name": "Load...",
"execute": lambda x: self.tools_helper.show_loader()
})
menu['actions'].append({
"name": "Manage...",
"execute": lambda x: self.tools_helper.show_scene_inventory()
})
menu['actions'].append({
"name": "Library...",
"execute": lambda x: self.tools_helper.show_library_loader()
})
return menu
def refresh(self, *args, **kwargs):
self.rescan()
def rescan(self, *args, **kwargs):
if not self.flame:
try:
import flame
self.flame = flame
except ImportError:
self.flame = None
if self.flame:
self.flame.execute_shortcut('Rescan Python Hooks')
self.log.info('Rescan Python Hooks')
class FlameMenuUniversal(_FlameMenuApp):
# flameMenuProjectconnect app takes care of the preferences dialog as well
def __init__(self, framework):
_FlameMenuApp.__init__(self, framework)
def __getattr__(self, name):
def method(*args, **kwargs):
project = self.dynamic_menu_data.get(name)
if project:
self.link_project(project)
return method
def build_menu(self):
if not self.flame:
return []
menu = deepcopy(self.menu)
menu['actions'].append({
"name": "Load...",
"execute": lambda x: callback_selection(
x, self.tools_helper.show_loader)
})
menu['actions'].append({
"name": "Manage...",
"execute": lambda x: self.tools_helper.show_scene_inventory()
})
menu['actions'].append({
"name": "Library...",
"execute": lambda x: self.tools_helper.show_library_loader()
})
return menu
def refresh(self, *args, **kwargs):
self.rescan()
def rescan(self, *args, **kwargs):
if not self.flame:
try:
import flame
self.flame = flame
except ImportError:
self.flame = None
if self.flame:
self.flame.execute_shortcut('Rescan Python Hooks')
self.log.info('Rescan Python Hooks')

View file

@ -0,0 +1,174 @@
"""
Basic avalon integration
"""
import os
import contextlib
from pyblish import api as pyblish
from ayon_core.lib import Logger
from ayon_core.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
deregister_loader_plugin_path,
deregister_creator_plugin_path,
AVALON_CONTAINER_ID,
)
from ayon_flame import FLAME_ADDON_ROOT
from .lib import (
set_segment_data_marker,
set_publish_attribute,
maintained_segment_selection,
get_current_sequence,
reset_segment_selection
)
PLUGINS_DIR = os.path.join(FLAME_ADDON_ROOT, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
AVALON_CONTAINERS = "AVALON_CONTAINERS"
log = Logger.get_logger(__name__)
def install():
pyblish.register_host("flame")
pyblish.register_plugin_path(PUBLISH_PATH)
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
log.info("AYON Flame plug-ins registered ...")
# register callback for switching publishable
pyblish.register_callback("instanceToggled", on_pyblish_instance_toggled)
log.info("AYON Flame host installed ...")
def uninstall():
pyblish.deregister_host("flame")
log.info("Deregistering Flame plug-ins..")
pyblish.deregister_plugin_path(PUBLISH_PATH)
deregister_loader_plugin_path(LOAD_PATH)
deregister_creator_plugin_path(CREATE_PATH)
# register callback for switching publishable
pyblish.deregister_callback("instanceToggled", on_pyblish_instance_toggled)
log.info("AYON Flame host uninstalled ...")
def containerise(flame_clip_segment,
name,
namespace,
context,
loader=None,
data=None):
data_imprint = {
"schema": "openpype:container-2.0",
"id": AVALON_CONTAINER_ID,
"name": str(name),
"namespace": str(namespace),
"loader": str(loader),
"representation": context["representation"]["id"],
}
if data:
for k, v in data.items():
data_imprint[k] = v
log.debug("_ data_imprint: {}".format(data_imprint))
set_segment_data_marker(flame_clip_segment, data_imprint)
return True
def ls():
"""List available containers.
"""
return []
def parse_container(tl_segment, validate=True):
"""Return container data from timeline_item's openpype tag.
"""
# TODO: parse_container
pass
def update_container(tl_segment, data=None):
"""Update container data to input timeline_item's openpype tag.
"""
# TODO: update_container
pass
def on_pyblish_instance_toggled(instance, old_value, new_value):
"""Toggle node passthrough states on instance toggles."""
log.info("instance toggle: {}, old_value: {}, new_value:{} ".format(
instance, old_value, new_value))
# # Whether instances should be passthrough based on new value
# timeline_item = instance.data["item"]
# set_publish_attribute(timeline_item, new_value)
def remove_instance(instance):
"""Remove instance marker from track item."""
# TODO: remove_instance
pass
def list_instances():
"""List all created instances from current workfile."""
# TODO: list_instances
pass
def imprint(segment, data=None):
"""
Adding openpype data to Flame timeline segment.
Also including publish attribute into tag.
Arguments:
segment (flame.PySegment)): flame api object
data (dict): Any data which needst to be imprinted
Examples:
data = {
'asset': 'sq020sh0280',
'productType': 'render',
'productName': 'productMain'
}
"""
data = data or {}
set_segment_data_marker(segment, data)
# add publish attribute
set_publish_attribute(segment, True)
@contextlib.contextmanager
def maintained_selection():
import flame
from .lib import CTX
# check if segment is selected
if isinstance(CTX.selection[0], flame.PySegment):
sequence = get_current_sequence(CTX.selection)
try:
with maintained_segment_selection(sequence) as selected:
yield
finally:
# reset all selected clips
reset_segment_selection(sequence)
# select only original selection of segments
for segment in selected:
segment.selected = True

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,185 @@
import os
from xml.etree import ElementTree as ET
from ayon_core.lib import Logger
log = Logger.get_logger(__name__)
def export_clip(export_path, clip, preset_path, **kwargs):
"""Flame exported wrapper
Args:
export_path (str): exporting directory path
clip (PyClip): flame api object
preset_path (str): full export path to xml file
Kwargs:
thumb_frame_number (int)[optional]: source frame number
in_mark (int)[optional]: cut in mark
out_mark (int)[optional]: cut out mark
Raises:
KeyError: Missing input kwarg `thumb_frame_number`
in case `thumbnail` in `export_preset`
FileExistsError: Missing export preset in shared folder
"""
import flame
in_mark = out_mark = None
# Set exporter
exporter = flame.PyExporter()
exporter.foreground = True
exporter.export_between_marks = True
if kwargs.get("thumb_frame_number"):
thumb_frame_number = kwargs["thumb_frame_number"]
# make sure it exists in kwargs
if not thumb_frame_number:
raise KeyError(
"Missing key `thumb_frame_number` in input kwargs")
in_mark = int(thumb_frame_number)
out_mark = int(thumb_frame_number) + 1
elif kwargs.get("in_mark") and kwargs.get("out_mark"):
in_mark = int(kwargs["in_mark"])
out_mark = int(kwargs["out_mark"])
else:
exporter.export_between_marks = False
try:
# set in and out marks if they are available
if in_mark and out_mark:
clip.in_mark = in_mark
clip.out_mark = out_mark
# export with exporter
exporter.export(clip, preset_path, export_path)
finally:
print('Exported: {} at {}-{}'.format(
clip.name.get_value(),
clip.in_mark,
clip.out_mark
))
def get_preset_path_by_xml_name(xml_preset_name):
def _search_path(root):
output = []
for root, _dirs, files in os.walk(root):
for f in files:
if f != xml_preset_name:
continue
file_path = os.path.join(root, f)
output.append(file_path)
return output
def _validate_results(results):
if results and len(results) == 1:
return results.pop()
elif results and len(results) > 1:
print((
"More matching presets for `{}`: /n"
"{}").format(xml_preset_name, results))
return results.pop()
else:
return None
from .utils import (
get_flame_install_root,
get_flame_version
)
# get actual flame version and install path
_version = get_flame_version()["full"]
_install_root = get_flame_install_root()
# search path templates
shared_search_root = "{install_root}/shared/export/presets"
install_search_root = (
"{install_root}/presets/{version}/export/presets/flame")
# fill templates
shared_search_root = shared_search_root.format(
install_root=_install_root
)
install_search_root = install_search_root.format(
install_root=_install_root,
version=_version
)
# get search results
shared_results = _search_path(shared_search_root)
installed_results = _search_path(install_search_root)
# first try to return shared results
shared_preset_path = _validate_results(shared_results)
if shared_preset_path:
return os.path.dirname(shared_preset_path)
# then try installed results
installed_preset_path = _validate_results(installed_results)
if installed_preset_path:
return os.path.dirname(installed_preset_path)
# if nothing found then return False
return False
def modify_preset_file(xml_path, staging_dir, data):
"""Modify xml preset with input data
Args:
xml_path (str ): path for input xml preset
staging_dir (str): staging dir path
data (dict): data where key is xmlTag and value as string
Returns:
str: _description_
"""
# create temp path
dirname, basename = os.path.split(xml_path)
temp_path = os.path.join(staging_dir, basename)
# change xml following data keys
with open(xml_path, "r") as datafile:
_root = ET.parse(datafile)
for key, value in data.items():
try:
if "/" in key:
if not key.startswith("./"):
key = ".//" + key
split_key_path = key.split("/")
element_key = split_key_path[-1]
parent_obj_path = "/".join(split_key_path[:-1])
parent_obj = _root.find(parent_obj_path)
element_obj = parent_obj.find(element_key)
if not element_obj:
append_element(parent_obj, element_key, value)
else:
finds = _root.findall(".//{}".format(key))
if not finds:
raise AttributeError
for element in finds:
element.text = str(value)
except AttributeError:
log.warning(
"Cannot create attribute: {}: {}. Skipping".format(
key, value
))
_root.write(temp_path)
return temp_path
def append_element(root_element_obj, key, value):
new_element_obj = ET.Element(key)
log.debug("__ new_element_obj: {}".format(new_element_obj))
new_element_obj.text = str(value)
root_element_obj.insert(0, new_element_obj)

View file

@ -0,0 +1,504 @@
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
import subprocess
import json
import xml.dom.minidom as minidom
from copy import deepcopy
import datetime
from libwiretapPythonClientAPI import ( # noqa
WireTapClientInit,
WireTapClientUninit,
WireTapNodeHandle,
WireTapServerHandle,
WireTapInt,
WireTapStr
)
class WireTapCom(object):
"""
Comunicator class wrapper for talking to WireTap db.
This way we are able to set new project with settings and
correct colorspace policy. Also we are able to create new user
or get actual user with similar name (users are usually cloning
their profiles and adding date stamp into suffix).
"""
def __init__(self, host_name=None, volume_name=None, group_name=None):
"""Initialisation of WireTap communication class
Args:
host_name (str, optional): Name of host server. Defaults to None.
volume_name (str, optional): Name of volume. Defaults to None.
group_name (str, optional): Name of user group. Defaults to None.
"""
# set main attributes of server
# if there are none set the default installation
self.host_name = host_name or "localhost"
self.volume_name = volume_name or "stonefs"
self.group_name = group_name or "staff"
# wiretap tools dir path
self.wiretap_tools_dir = os.getenv("AYON_WIRETAP_TOOLS")
# initialize WireTap client
WireTapClientInit()
# add the server to shared variable
self._server = WireTapServerHandle("{}:IFFFS".format(self.host_name))
print("WireTap connected at '{}'...".format(
self.host_name))
def close(self):
self._server = None
WireTapClientUninit()
print("WireTap closed...")
def get_launch_args(
self, project_name, project_data, user_name, *args, **kwargs):
"""Forming launch arguments for AYON launcher.
Args:
project_name (str): name of project
project_data (dict): Flame compatible project data
user_name (str): name of user
Returns:
list: arguments
"""
workspace_name = kwargs.get("workspace_name")
color_policy = kwargs.get("color_policy")
project_exists = self._project_prep(project_name)
if not project_exists:
self._set_project_settings(project_name, project_data)
self._set_project_colorspace(project_name, color_policy)
user_name = self._user_prep(user_name)
if workspace_name is None:
# default workspace
print("Using a default workspace")
return [
"--start-project={}".format(project_name),
"--start-user={}".format(user_name),
"--create-workspace"
]
else:
print(
"Using a custom workspace '{}'".format(workspace_name))
self._workspace_prep(project_name, workspace_name)
return [
"--start-project={}".format(project_name),
"--start-user={}".format(user_name),
"--create-workspace",
"--start-workspace={}".format(workspace_name)
]
def _workspace_prep(self, project_name, workspace_name):
"""Preparing a workspace
In case it doesn not exists it will create one
Args:
project_name (str): project name
workspace_name (str): workspace name
Raises:
AttributeError: unable to create workspace
"""
workspace_exists = self._child_is_in_parent_path(
"/projects/{}".format(project_name), workspace_name, "WORKSPACE"
)
if not workspace_exists:
project = WireTapNodeHandle(
self._server, "/projects/{}".format(project_name))
workspace_node = WireTapNodeHandle()
created_workspace = project.createNode(
workspace_name, "WORKSPACE", workspace_node)
if not created_workspace:
raise AttributeError(
"Cannot create workspace `{}` in "
"project `{}`: `{}`".format(
workspace_name, project_name, project.lastError())
)
print(
"Workspace `{}` is successfully created".format(workspace_name))
def _project_prep(self, project_name):
"""Preparing a project
In case it doesn not exists it will create one
Args:
project_name (str): project name
Raises:
AttributeError: unable to create project
"""
# test if projeft exists
project_exists = self._child_is_in_parent_path(
"/projects", project_name, "PROJECT")
if not project_exists:
volumes = self._get_all_volumes()
if len(volumes) == 0:
raise AttributeError(
"Not able to create new project. No Volumes existing"
)
# check if volumes exists
if self.volume_name not in volumes:
raise AttributeError(
("Volume '{}' does not exist in '{}'").format(
self.volume_name, volumes)
)
# form cmd arguments
project_create_cmd = [
os.path.join(
self.wiretap_tools_dir,
"wiretap_create_node"
),
'-n',
os.path.join("/volumes", self.volume_name),
'-d',
project_name,
'-g',
]
project_create_cmd.append(self.group_name)
print(project_create_cmd)
exit_code = subprocess.call(
project_create_cmd,
cwd=os.path.expanduser('~'),
preexec_fn=_subprocess_preexec_fn
)
if exit_code != 0:
RuntimeError("Cannot create project in flame db")
print(
"A new project '{}' is created.".format(project_name))
return project_exists
def _get_all_volumes(self):
"""Request all available volumens from WireTap
Returns:
list: all available volumes in server
Rises:
AttributeError: unable to get any volumes children from server
"""
root = WireTapNodeHandle(self._server, "/volumes")
children_num = WireTapInt(0)
get_children_num = root.getNumChildren(children_num)
if not get_children_num:
raise AttributeError(
"Cannot get number of volumes: {}".format(root.lastError())
)
volumes = []
# go through all children and get volume names
child_obj = WireTapNodeHandle()
for child_idx in range(children_num):
# get a child
if not root.getChild(child_idx, child_obj):
raise AttributeError(
"Unable to get child: {}".format(root.lastError()))
node_name = WireTapStr()
get_children_name = child_obj.getDisplayName(node_name)
if not get_children_name:
raise AttributeError(
"Unable to get child name: {}".format(
child_obj.lastError())
)
volumes.append(node_name.c_str())
return volumes
def _user_prep(self, user_name):
"""Ensuring user does exists in user's stack
Args:
user_name (str): name of a user
Raises:
AttributeError: unable to create user
"""
# get all used usernames in db
used_names = self._get_usernames()
print(">> used_names: {}".format(used_names))
# filter only those which are sharing input user name
filtered_users = [user for user in used_names if user_name in user]
if filtered_users:
# TODO: need to find lastly created following regex pattern for
# date used in name
return filtered_users.pop()
# create new user name with date in suffix
now = datetime.datetime.now() # current date and time
date = now.strftime("%Y%m%d")
new_user_name = "{}_{}".format(user_name, date)
print(new_user_name)
if not self._child_is_in_parent_path("/users", new_user_name, "USER"):
# Create the new user
users = WireTapNodeHandle(self._server, "/users")
user_node = WireTapNodeHandle()
created_user = users.createNode(new_user_name, "USER", user_node)
if not created_user:
raise AttributeError(
"User {} cannot be created: {}".format(
new_user_name, users.lastError())
)
print("User `{}` is created".format(new_user_name))
return new_user_name
def _get_usernames(self):
"""Requesting all available users from WireTap
Returns:
list: all available user names
Raises:
AttributeError: there are no users in server
"""
root = WireTapNodeHandle(self._server, "/users")
children_num = WireTapInt(0)
get_children_num = root.getNumChildren(children_num)
if not get_children_num:
raise AttributeError(
"Cannot get number of volumes: {}".format(root.lastError())
)
usernames = []
# go through all children and get volume names
child_obj = WireTapNodeHandle()
for child_idx in range(children_num):
# get a child
if not root.getChild(child_idx, child_obj):
raise AttributeError(
"Unable to get child: {}".format(root.lastError()))
node_name = WireTapStr()
get_children_name = child_obj.getDisplayName(node_name)
if not get_children_name:
raise AttributeError(
"Unable to get child name: {}".format(
child_obj.lastError())
)
usernames.append(node_name.c_str())
return usernames
def _child_is_in_parent_path(self, parent_path, child_name, child_type):
"""Checking if a given child is in parent path.
Args:
parent_path (str): db path to parent
child_name (str): name of child
child_type (str): type of child
Raises:
AttributeError: Not able to get number of children
AttributeError: Not able to get children form parent
AttributeError: Not able to get children name
AttributeError: Not able to get children type
Returns:
bool: True if child is in parent path
"""
parent = WireTapNodeHandle(self._server, parent_path)
# iterate number of children
children_num = WireTapInt(0)
requested = parent.getNumChildren(children_num)
if not requested:
raise AttributeError((
"Error: Cannot request number of "
"children from the node {}. Make sure your "
"wiretap service is running: {}").format(
parent_path, parent.lastError())
)
# iterate children
child_obj = WireTapNodeHandle()
for child_idx in range(children_num):
if not parent.getChild(child_idx, child_obj):
raise AttributeError(
"Cannot get child: {}".format(
parent.lastError()))
node_name = WireTapStr()
node_type = WireTapStr()
if not child_obj.getDisplayName(node_name):
raise AttributeError(
"Unable to get child name: %s" % child_obj.lastError()
)
if not child_obj.getNodeTypeStr(node_type):
raise AttributeError(
"Unable to obtain child type: %s" % child_obj.lastError()
)
if (node_name.c_str() == child_name) and (
node_type.c_str() == child_type):
return True
return False
def _set_project_settings(self, project_name, project_data):
"""Setting project attributes.
Args:
project_name (str): name of project
project_data (dict): data with project attributes
(flame compatible)
Raises:
AttributeError: Not able to set project attributes
"""
# generated xml from project_data dict
_xml = "<Project>"
for key, value in project_data.items():
_xml += "<{}>{}</{}>".format(key, value, key)
_xml += "</Project>"
pretty_xml = minidom.parseString(_xml).toprettyxml()
print("__ xml: {}".format(pretty_xml))
# set project data to wiretap
project_node = WireTapNodeHandle(
self._server, "/projects/{}".format(project_name))
if not project_node.setMetaData("XML", _xml):
raise AttributeError(
"Not able to set project attributes {}. Error: {}".format(
project_name, project_node.lastError())
)
print("Project settings successfully set.")
def _set_project_colorspace(self, project_name, color_policy):
"""Set project's colorspace policy.
Args:
project_name (str): name of project
color_policy (str): name of policy
Raises:
RuntimeError: Not able to set colorspace policy
"""
color_policy = color_policy or "Legacy"
# check if the colour policy in custom dir
if "/" in color_policy:
# if unlikelly full path was used make it redundant
color_policy = color_policy.replace("/syncolor/policies/", "")
# expecting input is `Shared/NameOfPolicy`
color_policy = "/syncolor/policies/{}".format(
color_policy)
else:
color_policy = "/syncolor/policies/Autodesk/{}".format(
color_policy)
# create arguments
project_colorspace_cmd = [
os.path.join(
self.wiretap_tools_dir,
"wiretap_duplicate_node"
),
"-s",
color_policy,
"-n",
"/projects/{}/syncolor".format(project_name)
]
print(project_colorspace_cmd)
exit_code = subprocess.call(
project_colorspace_cmd,
cwd=os.path.expanduser('~'),
preexec_fn=_subprocess_preexec_fn
)
if exit_code != 0:
RuntimeError("Cannot set colorspace {} on project {}".format(
color_policy, project_name
))
def _subprocess_preexec_fn():
""" Helper function
Setting permission mask to 0777
"""
os.setpgrp()
os.umask(0o000)
if __name__ == "__main__":
# get json exchange data
json_path = sys.argv[-1]
json_data = open(json_path).read()
in_data = json.loads(json_data)
out_data = deepcopy(in_data)
# get main server attributes
host_name = in_data.pop("host_name")
volume_name = in_data.pop("volume_name")
group_name = in_data.pop("group_name")
# initialize class
wiretap_handler = WireTapCom(host_name, volume_name, group_name)
try:
app_args = wiretap_handler.get_launch_args(
project_name=in_data.pop("project_name"),
project_data=in_data.pop("project_data"),
user_name=in_data.pop("user_name"),
**in_data
)
finally:
wiretap_handler.close()
# set returned args back to out data
out_data.update({
"app_args": app_args
})
# write it out back to the exchange json file
with open(json_path, "w") as file_stream:
json.dump(out_data, file_stream, indent=4)

View file

@ -0,0 +1,143 @@
"""
Flame utils for syncing scripts
"""
import os
import shutil
from ayon_core.lib import Logger
from ayon_flame import FLAME_ADDON_ROOT
log = Logger.get_logger(__name__)
def _sync_utility_scripts(env=None):
""" Synchronizing basic utlility scripts for flame.
To be able to run start AYON within Flame we have to copy
all utility_scripts and additional FLAME_SCRIPT_DIR into
`/opt/Autodesk/shared/python`. This will be always synchronizing those
folders.
"""
env = env or os.environ
# initiate inputs
scripts = {}
fsd_env = env.get("FLAME_SCRIPT_DIRS", "")
flame_shared_dir = "/opt/Autodesk/shared/python"
fsd_paths = [os.path.join(
FLAME_ADDON_ROOT,
"api",
"utility_scripts"
)]
# collect script dirs
log.info("FLAME_SCRIPT_DIRS: `{fsd_env}`".format(**locals()))
log.info("fsd_paths: `{fsd_paths}`".format(**locals()))
# add application environment setting for FLAME_SCRIPT_DIR
# to script path search
for _dirpath in fsd_env.split(os.pathsep):
if not os.path.isdir(_dirpath):
log.warning("Path is not a valid dir: `{_dirpath}`".format(
**locals()))
continue
fsd_paths.append(_dirpath)
# collect scripts from dirs
for path in fsd_paths:
scripts.update({path: os.listdir(path)})
remove_black_list = []
for _k, s_list in scripts.items():
remove_black_list += s_list
log.info("remove_black_list: `{remove_black_list}`".format(**locals()))
log.info("Additional Flame script paths: `{fsd_paths}`".format(**locals()))
log.info("Flame Scripts: `{scripts}`".format(**locals()))
# make sure no script file is in folder
if next(iter(os.listdir(flame_shared_dir)), None):
for _itm in os.listdir(flame_shared_dir):
skip = False
# skip all scripts and folders which are not maintained
if _itm not in remove_black_list:
skip = True
# do not skip if pyc in extension
if not os.path.isdir(_itm) and "pyc" in os.path.splitext(_itm)[-1]:
skip = False
# continue if skip in true
if skip:
continue
path = os.path.join(flame_shared_dir, _itm)
log.info("Removing `{path}`...".format(**locals()))
try:
if os.path.isdir(path):
shutil.rmtree(path, onerror=None)
else:
os.remove(path)
except PermissionError as msg:
log.warning(
"Not able to remove: `{}`, Problem with: `{}`".format(
path,
msg
)
)
# copy scripts into Resolve's utility scripts dir
for dirpath, scriptlist in scripts.items():
# directory and scripts list
for _script in scriptlist:
# script in script list
src = os.path.join(dirpath, _script)
dst = os.path.join(flame_shared_dir, _script)
log.info("Copying `{src}` to `{dst}`...".format(**locals()))
try:
if os.path.isdir(src):
shutil.copytree(
src, dst, symlinks=False,
ignore=None, ignore_dangling_symlinks=False
)
else:
shutil.copy2(src, dst)
except (PermissionError, FileExistsError) as msg:
log.warning(
"Not able to copy to: `{}`, Problem with: `{}`".format(
dst,
msg
)
)
def setup(env=None):
""" Wrapper installer started from
`flame/hooks/pre_flame_setup.py`
"""
env = env or os.environ
# synchronize resolve utility scripts
_sync_utility_scripts(env)
log.info("Flame AYON wrapper has been installed")
def get_flame_version():
import flame
return {
"full": flame.get_version(),
"major": flame.get_version_major(),
"minor": flame.get_version_minor(),
"patch": flame.get_version_patch()
}
def get_flame_install_root():
return "/opt/Autodesk"

View file

@ -0,0 +1,37 @@
"""Host API required Work Files tool"""
import os
from ayon_core.lib import Logger
# from .. import (
# get_project_manager,
# get_current_project
# )
log = Logger.get_logger(__name__)
exported_projet_ext = ".otoc"
def file_extensions():
return [exported_projet_ext]
def has_unsaved_changes():
pass
def save_file(filepath):
pass
def open_file(filepath):
pass
def current_file():
pass
def work_root(session):
return os.path.normpath(session["AYON_WORKDIR"]).replace("\\", "/")

View file

@ -0,0 +1,239 @@
import os
import json
import tempfile
import contextlib
import socket
from pprint import pformat
from ayon_core.lib import (
get_ayon_username,
run_subprocess,
)
from ayon_applications import PreLaunchHook, LaunchTypes
from ayon_flame import FLAME_ADDON_ROOT
class FlamePrelaunch(PreLaunchHook):
""" Flame prelaunch hook
Will make sure flame_script_dirs are copied to user's folder defined
in environment var FLAME_SCRIPT_DIR.
"""
app_groups = {"flame"}
permissions = 0o777
wtc_script_path = os.path.join(
FLAME_ADDON_ROOT, "api", "scripts", "wiretap_com.py"
)
launch_types = {LaunchTypes.local}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.signature = "( {} )".format(self.__class__.__name__)
def execute(self):
_env = self.launch_context.env
self.flame_python_exe = _env["AYON_FLAME_PYTHON_EXEC"]
self.flame_pythonpath = _env["AYON_FLAME_PYTHONPATH"]
"""Hook entry method."""
project_entity = self.data["project_entity"]
project_name = project_entity["name"]
volume_name = _env.get("FLAME_WIRETAP_VOLUME")
# get image io
project_settings = self.data["project_settings"]
imageio_flame = project_settings["flame"]["imageio"]
# Check whether 'enabled' key from host imageio settings exists
# so we can tell if host is using the new colormanagement framework.
# If the 'enabled' isn't found we want 'colormanaged' set to True
# because prior to the key existing we always did colormanagement for
# Flame
colormanaged = imageio_flame.get("enabled")
# if key was not found, set to True
# ensuring backward compatibility
if colormanaged is None:
colormanaged = True
# get user name and host name
user_name = get_ayon_username()
user_name = user_name.replace(".", "_")
hostname = socket.gethostname() # not returning wiretap host name
self.log.debug("Collected user \"{}\"".format(user_name))
self.log.info(pformat(project_entity))
project_attribs = project_entity["attrib"]
width = project_attribs["resolutionWidth"]
height = project_attribs["resolutionHeight"]
fps = float(project_attribs["fps"])
project_data = {
"Name": project_entity["name"],
"Nickname": project_entity["code"],
"Description": "Created by AYON",
"SetupDir": project_entity["name"],
"FrameWidth": int(width),
"FrameHeight": int(height),
"AspectRatio": float(
(width / height) * project_attribs["pixelAspect"]
),
"FrameRate": self._get_flame_fps(fps)
}
data_to_script = {
# from settings
"host_name": _env.get("FLAME_WIRETAP_HOSTNAME") or hostname,
"volume_name": volume_name,
"group_name": _env.get("FLAME_WIRETAP_GROUP"),
# from project
"project_name": project_name,
"user_name": user_name,
"project_data": project_data
}
# add color management data
if colormanaged:
project_data.update({
"FrameDepth": str(imageio_flame["project"]["frameDepth"]),
"FieldDominance": str(
imageio_flame["project"]["fieldDominance"])
})
data_to_script["color_policy"] = str(
imageio_flame["project"]["colourPolicy"])
self.log.info(pformat(dict(_env)))
self.log.info(pformat(data_to_script))
# add to python path from settings
self._add_pythonpath()
app_arguments = self._get_launch_arguments(data_to_script)
# fix project data permission issue
self._fix_permissions(project_name, volume_name)
self.launch_context.launch_args.extend(app_arguments)
def _fix_permissions(self, project_name, volume_name):
"""Work around for project data permissions
Reported issue: when project is created locally on one machine,
it is impossible to migrate it to other machine. Autodesk Flame
is crating some unmanagable files which needs to be opened to 0o777.
Args:
project_name (str): project name
volume_name (str): studio volume
"""
dirs_to_modify = [
"/usr/discreet/project/{}".format(project_name),
"/opt/Autodesk/clip/{}/{}.prj".format(volume_name, project_name),
"/usr/discreet/clip/{}/{}.prj".format(volume_name, project_name)
]
for dirtm in dirs_to_modify:
for root, dirs, files in os.walk(dirtm):
try:
for name in set(dirs) | set(files):
path = os.path.join(root, name)
st = os.stat(path)
if oct(st.st_mode) != self.permissions:
os.chmod(path, self.permissions)
except OSError as exc:
self.log.warning("Not able to open files: {}".format(exc))
def _get_flame_fps(self, fps_num):
fps_table = {
float(23.976): "23.976 fps",
int(25): "25 fps",
int(24): "24 fps",
float(29.97): "29.97 fps DF",
int(30): "30 fps",
int(50): "50 fps",
float(59.94): "59.94 fps DF",
int(60): "60 fps"
}
match_key = min(fps_table.keys(), key=lambda x: abs(x - fps_num))
try:
return fps_table[match_key]
except KeyError as msg:
raise KeyError((
"Missing FPS key in conversion table. "
"Following keys are available: {}".format(fps_table.keys())
)) from msg
def _add_pythonpath(self):
pythonpath = self.launch_context.env.get("PYTHONPATH")
# separate it explicitly by `;` that is what we use in settings
new_pythonpath = self.flame_pythonpath.split(os.pathsep)
new_pythonpath += pythonpath.split(os.pathsep)
self.launch_context.env["PYTHONPATH"] = os.pathsep.join(new_pythonpath)
def _get_launch_arguments(self, script_data):
# Dump data to string
dumped_script_data = json.dumps(script_data)
with make_temp_file(dumped_script_data) as tmp_json_path:
# Prepare subprocess arguments
args = [
self.flame_python_exe.format(
**self.launch_context.env
),
self.wtc_script_path,
tmp_json_path
]
self.log.info("Executing: {}".format(" ".join(args)))
process_kwargs = {
"logger": self.log,
"env": self.launch_context.env
}
run_subprocess(args, **process_kwargs)
# process returned json file to pass launch args
return_json_data = open(tmp_json_path).read()
returned_data = json.loads(return_json_data)
app_args = returned_data.get("app_args")
self.log.info("____ app_args: `{}`".format(app_args))
if not app_args:
RuntimeError("App arguments were not solved")
return app_args
@contextlib.contextmanager
def make_temp_file(data):
try:
# Store dumped json to temporary file
temporary_json_file = tempfile.NamedTemporaryFile(
mode="w", suffix=".json", delete=False
)
temporary_json_file.write(data)
temporary_json_file.close()
temporary_json_filepath = temporary_json_file.name.replace(
"\\", "/"
)
yield temporary_json_filepath
except IOError as _error:
raise IOError(
"Not able to create temp json file: {}".format(
_error
)
)
finally:
# Remove the temporary json
os.remove(temporary_json_filepath)

View file

@ -0,0 +1,624 @@
""" compatibility OpenTimelineIO 0.12.0 and newer
"""
import os
import re
import json
import logging
import opentimelineio as otio
from . import utils
import flame
from pprint import pformat
log = logging.getLogger(__name__)
TRACK_TYPES = {
"video": otio.schema.TrackKind.Video,
"audio": otio.schema.TrackKind.Audio
}
MARKERS_COLOR_MAP = {
(1.0, 0.0, 0.0): otio.schema.MarkerColor.RED,
(1.0, 0.5, 0.0): otio.schema.MarkerColor.ORANGE,
(1.0, 1.0, 0.0): otio.schema.MarkerColor.YELLOW,
(1.0, 0.5, 1.0): otio.schema.MarkerColor.PINK,
(1.0, 1.0, 1.0): otio.schema.MarkerColor.WHITE,
(0.0, 1.0, 0.0): otio.schema.MarkerColor.GREEN,
(0.0, 1.0, 1.0): otio.schema.MarkerColor.CYAN,
(0.0, 0.0, 1.0): otio.schema.MarkerColor.BLUE,
(0.5, 0.0, 0.5): otio.schema.MarkerColor.PURPLE,
(0.5, 0.0, 1.0): otio.schema.MarkerColor.MAGENTA,
(0.0, 0.0, 0.0): otio.schema.MarkerColor.BLACK
}
MARKERS_INCLUDE = True
class CTX:
_fps = None
_tl_start_frame = None
project = None
clips = None
@classmethod
def set_fps(cls, new_fps):
if not isinstance(new_fps, float):
raise TypeError("Invalid fps type {}".format(type(new_fps)))
if cls._fps != new_fps:
cls._fps = new_fps
@classmethod
def get_fps(cls):
return cls._fps
@classmethod
def set_tl_start_frame(cls, number):
if not isinstance(number, int):
raise TypeError("Invalid timeline start frame type {}".format(
type(number)))
if cls._tl_start_frame != number:
cls._tl_start_frame = number
@classmethod
def get_tl_start_frame(cls):
return cls._tl_start_frame
def flatten(_list):
for item in _list:
if isinstance(item, (list, tuple)):
for sub_item in flatten(item):
yield sub_item
else:
yield item
def get_current_flame_project():
project = flame.project.current_project
return project
def create_otio_rational_time(frame, fps):
return otio.opentime.RationalTime(
float(frame),
float(fps)
)
def create_otio_time_range(start_frame, frame_duration, fps):
return otio.opentime.TimeRange(
start_time=create_otio_rational_time(start_frame, fps),
duration=create_otio_rational_time(frame_duration, fps)
)
def _get_metadata(item):
if hasattr(item, 'metadata'):
return dict(item.metadata) if item.metadata else {}
return {}
def create_time_effects(otio_clip, speed):
otio_effect = None
# retime on track item
if speed != 1.:
# make effect
otio_effect = otio.schema.LinearTimeWarp()
otio_effect.name = "Speed"
otio_effect.time_scalar = speed
otio_effect.metadata = {}
# freeze frame effect
if speed == 0.:
otio_effect = otio.schema.FreezeFrame()
otio_effect.name = "FreezeFrame"
otio_effect.metadata = {}
if otio_effect:
# add otio effect to clip effects
otio_clip.effects.append(otio_effect)
def _get_marker_color(flame_colour):
# clamp colors to closes half numbers
_flame_colour = [
(lambda x: round(x * 2) / 2)(c)
for c in flame_colour]
for color, otio_color_type in MARKERS_COLOR_MAP.items():
if _flame_colour == list(color):
return otio_color_type
return otio.schema.MarkerColor.RED
def _get_flame_markers(item):
output_markers = []
time_in = item.record_in.relative_frame
for marker in item.markers:
log.debug(marker)
start_frame = marker.location.get_value().relative_frame
start_frame = (start_frame - time_in) + 1
marker_data = {
"name": marker.name.get_value(),
"duration": marker.duration.get_value().relative_frame,
"comment": marker.comment.get_value(),
"start_frame": start_frame,
"colour": marker.colour.get_value()
}
output_markers.append(marker_data)
return output_markers
def create_otio_markers(otio_item, item):
markers = _get_flame_markers(item)
for marker in markers:
frame_rate = CTX.get_fps()
marked_range = otio.opentime.TimeRange(
start_time=otio.opentime.RationalTime(
marker["start_frame"],
frame_rate
),
duration=otio.opentime.RationalTime(
marker["duration"],
frame_rate
)
)
# testing the comment if it is not containing json string
check_if_json = re.findall(
re.compile(r"[{:}]"),
marker["comment"]
)
# to identify this as json, at least 3 items in the list should
# be present ["{", ":", "}"]
metadata = {}
if len(check_if_json) >= 3:
# this is json string
try:
# capture exceptions which are related to strings only
metadata.update(
json.loads(marker["comment"])
)
except ValueError as msg:
log.error("Marker json conversion: {}".format(msg))
else:
metadata["comment"] = marker["comment"]
otio_marker = otio.schema.Marker(
name=marker["name"],
color=_get_marker_color(
marker["colour"]),
marked_range=marked_range,
metadata=metadata
)
otio_item.markers.append(otio_marker)
def create_otio_reference(clip_data, fps=None):
metadata = _get_metadata(clip_data)
duration = int(clip_data["source_duration"])
# get file info for path and start frame
frame_start = 0
fps = fps or CTX.get_fps()
path = clip_data["fpath"]
file_name = os.path.basename(path)
file_head, extension = os.path.splitext(file_name)
# get padding and other file infos
log.debug("_ path: {}".format(path))
otio_ex_ref_item = None
is_sequence = frame_number = utils.get_frame_from_filename(file_name)
if is_sequence:
file_head = file_name.split(frame_number)[:-1]
frame_start = int(frame_number)
padding = len(frame_number)
metadata.update({
"isSequence": True,
"padding": padding
})
# if it is file sequence try to create `ImageSequenceReference`
# the OTIO might not be compatible so return nothing and do it old way
try:
dirname = os.path.dirname(path)
otio_ex_ref_item = otio.schema.ImageSequenceReference(
target_url_base=dirname + os.sep,
name_prefix=file_head,
name_suffix=extension,
start_frame=frame_start,
frame_zero_padding=padding,
rate=fps,
available_range=create_otio_time_range(
frame_start,
duration,
fps
)
)
except AttributeError:
pass
if not otio_ex_ref_item:
dirname, file_name = os.path.split(path)
file_name = utils.get_reformatted_filename(file_name, padded=False)
reformated_path = os.path.join(dirname, file_name)
# in case old OTIO or video file create `ExternalReference`
otio_ex_ref_item = otio.schema.ExternalReference(
target_url=reformated_path,
available_range=create_otio_time_range(
frame_start,
duration,
fps
)
)
# add metadata to otio item
add_otio_metadata(otio_ex_ref_item, clip_data, **metadata)
return otio_ex_ref_item
def create_otio_clip(clip_data):
from ayon_flame.api import MediaInfoFile, TimeEffectMetadata
segment = clip_data["PySegment"]
# calculate source in
media_info = MediaInfoFile(clip_data["fpath"], logger=log)
media_timecode_start = media_info.start_frame
media_fps = media_info.fps
# Timewarp metadata
tw_data = TimeEffectMetadata(segment, logger=log).data
log.debug("__ tw_data: {}".format(tw_data))
# define first frame
file_first_frame = utils.get_frame_from_filename(
clip_data["fpath"])
if file_first_frame:
file_first_frame = int(file_first_frame)
first_frame = media_timecode_start or file_first_frame or 0
_clip_source_in = int(clip_data["source_in"])
_clip_source_out = int(clip_data["source_out"])
_clip_record_in = clip_data["record_in"]
_clip_record_out = clip_data["record_out"]
_clip_record_duration = int(clip_data["record_duration"])
log.debug("_ file_first_frame: {}".format(file_first_frame))
log.debug("_ first_frame: {}".format(first_frame))
log.debug("_ _clip_source_in: {}".format(_clip_source_in))
log.debug("_ _clip_source_out: {}".format(_clip_source_out))
log.debug("_ _clip_record_in: {}".format(_clip_record_in))
log.debug("_ _clip_record_out: {}".format(_clip_record_out))
# first solve if the reverse timing
speed = 1
if clip_data["source_in"] > clip_data["source_out"]:
source_in = _clip_source_out - int(first_frame)
source_out = _clip_source_in - int(first_frame)
speed = -1
else:
source_in = _clip_source_in - int(first_frame)
source_out = _clip_source_out - int(first_frame)
log.debug("_ source_in: {}".format(source_in))
log.debug("_ source_out: {}".format(source_out))
if file_first_frame:
log.debug("_ file_source_in: {}".format(
file_first_frame + source_in))
log.debug("_ file_source_in: {}".format(
file_first_frame + source_out))
source_duration = (source_out - source_in + 1)
# secondly check if any change of speed
if source_duration != _clip_record_duration:
retime_speed = float(source_duration) / float(_clip_record_duration)
log.debug("_ calculated speed: {}".format(retime_speed))
speed *= retime_speed
# get speed from metadata if available
if tw_data.get("speed"):
speed = tw_data["speed"]
log.debug("_ metadata speed: {}".format(speed))
log.debug("_ speed: {}".format(speed))
log.debug("_ source_duration: {}".format(source_duration))
log.debug("_ _clip_record_duration: {}".format(_clip_record_duration))
# create media reference
media_reference = create_otio_reference(
clip_data, media_fps)
# creatae source range
source_range = create_otio_time_range(
source_in,
_clip_record_duration,
CTX.get_fps()
)
otio_clip = otio.schema.Clip(
name=clip_data["segment_name"],
source_range=source_range,
media_reference=media_reference
)
# Add markers
if MARKERS_INCLUDE:
create_otio_markers(otio_clip, segment)
if speed != 1:
create_time_effects(otio_clip, speed)
return otio_clip
def create_otio_gap(gap_start, clip_start, tl_start_frame, fps):
return otio.schema.Gap(
source_range=create_otio_time_range(
gap_start,
(clip_start - tl_start_frame) - gap_start,
fps
)
)
def _get_colourspace_policy():
output = {}
# get policies project path
policy_dir = "/opt/Autodesk/project/{}/synColor/policy".format(
CTX.project.name
)
log.debug(policy_dir)
policy_fp = os.path.join(policy_dir, "policy.cfg")
if not os.path.exists(policy_fp):
return output
with open(policy_fp) as file:
dict_conf = dict(line.strip().split(' = ', 1) for line in file)
output.update(
{"openpype.flame.{}".format(k): v for k, v in dict_conf.items()}
)
return output
def _create_otio_timeline(sequence):
metadata = _get_metadata(sequence)
# find colour policy files and add them to metadata
colorspace_policy = _get_colourspace_policy()
metadata.update(colorspace_policy)
metadata.update({
"openpype.timeline.width": int(sequence.width),
"openpype.timeline.height": int(sequence.height),
"openpype.timeline.pixelAspect": 1
})
rt_start_time = create_otio_rational_time(
CTX.get_tl_start_frame(), CTX.get_fps())
return otio.schema.Timeline(
name=str(sequence.name)[1:-1],
global_start_time=rt_start_time,
metadata=metadata
)
def create_otio_track(track_type, track_name):
return otio.schema.Track(
name=track_name,
kind=TRACK_TYPES[track_type]
)
def add_otio_gap(clip_data, otio_track, prev_out):
gap_length = clip_data["record_in"] - prev_out
if prev_out != 0:
gap_length -= 1
gap = otio.opentime.TimeRange(
duration=otio.opentime.RationalTime(
gap_length,
CTX.get_fps()
)
)
otio_gap = otio.schema.Gap(source_range=gap)
otio_track.append(otio_gap)
def add_otio_metadata(otio_item, item, **kwargs):
metadata = _get_metadata(item)
# add additional metadata from kwargs
if kwargs:
metadata.update(kwargs)
# add metadata to otio item metadata
for key, value in metadata.items():
otio_item.metadata.update({key: value})
def _get_shot_tokens_values(clip, tokens):
old_value = None
output = {}
old_value = clip.shot_name.get_value()
for token in tokens:
clip.shot_name.set_value(token)
_key = re.sub("[ <>]", "", token)
try:
output[_key] = int(clip.shot_name.get_value())
except ValueError:
output[_key] = clip.shot_name.get_value()
clip.shot_name.set_value(old_value)
return output
def _get_segment_attributes(segment):
log.debug("Segment name|hidden: {}|{}".format(
segment.name.get_value(), segment.hidden
))
if (
segment.name.get_value() == ""
or segment.hidden.get_value()
):
return None
# Add timeline segment to tree
clip_data = {
"segment_name": segment.name.get_value(),
"segment_comment": segment.comment.get_value(),
"shot_name": segment.shot_name.get_value(),
"tape_name": segment.tape_name,
"source_name": segment.source_name,
"fpath": segment.file_path,
"PySegment": segment
}
# add all available shot tokens
shot_tokens = _get_shot_tokens_values(
segment,
["<colour space>", "<width>", "<height>", "<depth>"]
)
clip_data.update(shot_tokens)
# populate shot source metadata
segment_attrs = [
"record_duration", "record_in", "record_out",
"source_duration", "source_in", "source_out"
]
segment_attrs_data = {}
for attr in segment_attrs:
if not hasattr(segment, attr):
continue
_value = getattr(segment, attr)
segment_attrs_data[attr] = str(_value).replace("+", ":")
if attr in ["record_in", "record_out"]:
clip_data[attr] = _value.relative_frame
else:
clip_data[attr] = _value.frame
clip_data["segment_timecodes"] = segment_attrs_data
return clip_data
def create_otio_timeline(sequence):
log.info(dir(sequence))
log.info(sequence.attributes)
CTX.project = get_current_flame_project()
# get current timeline
CTX.set_fps(
float(str(sequence.frame_rate)[:-4]))
tl_start_frame = utils.timecode_to_frames(
str(sequence.start_time).replace("+", ":"),
CTX.get_fps()
)
CTX.set_tl_start_frame(tl_start_frame)
# convert timeline to otio
otio_timeline = _create_otio_timeline(sequence)
# create otio tracks and clips
for ver in sequence.versions:
for track in ver.tracks:
# avoid all empty tracks
# or hidden tracks
if (
len(track.segments) == 0
or track.hidden.get_value()
):
continue
# convert track to otio
otio_track = create_otio_track(
"video", str(track.name)[1:-1])
all_segments = []
for segment in track.segments:
clip_data = _get_segment_attributes(segment)
if not clip_data:
continue
all_segments.append(clip_data)
segments_ordered = dict(enumerate(all_segments))
log.debug("_ segments_ordered: {}".format(
pformat(segments_ordered)
))
if not segments_ordered:
continue
for itemindex, segment_data in segments_ordered.items():
log.debug("_ itemindex: {}".format(itemindex))
# Add Gap if needed
prev_item = (
segment_data
if itemindex == 0
else segments_ordered[itemindex - 1]
)
log.debug("_ segment_data: {}".format(segment_data))
# calculate clip frame range difference from each other
clip_diff = segment_data["record_in"] - prev_item["record_out"]
# add gap if first track item is not starting
# at first timeline frame
if itemindex == 0 and segment_data["record_in"] > 0:
add_otio_gap(segment_data, otio_track, 0)
# or add gap if following track items are having
# frame range differences from each other
elif itemindex and clip_diff != 1:
add_otio_gap(
segment_data, otio_track, prev_item["record_out"])
# create otio clip and add it to track
otio_clip = create_otio_clip(segment_data)
otio_track.append(otio_clip)
log.debug("_ otio_clip: {}".format(otio_clip))
# create otio marker
# create otio metadata
# add track to otio timeline
otio_timeline.tracks.append(otio_track)
return otio_timeline
def write_to_file(otio_timeline, path):
otio.adapters.write_to_file(otio_timeline, path)

View file

@ -0,0 +1,91 @@
import re
import opentimelineio as otio
import logging
log = logging.getLogger(__name__)
FRAME_PATTERN = re.compile(r"[\._](\d+)[\.]")
def timecode_to_frames(timecode, framerate):
rt = otio.opentime.from_timecode(timecode, framerate)
return int(otio.opentime.to_frames(rt))
def frames_to_timecode(frames, framerate):
rt = otio.opentime.from_frames(frames, framerate)
return otio.opentime.to_timecode(rt)
def frames_to_seconds(frames, framerate):
rt = otio.opentime.from_frames(frames, framerate)
return otio.opentime.to_seconds(rt)
def get_reformatted_filename(filename, padded=True):
"""
Return fixed python expression path
Args:
filename (str): file name
Returns:
type: string with reformatted path
Example:
get_reformatted_filename("plate.1001.exr") > plate.%04d.exr
"""
found = FRAME_PATTERN.search(filename)
if not found:
log.info("File name is not sequence: {}".format(filename))
return filename
padding = get_padding_from_filename(filename)
replacement = "%0{}d".format(padding) if padded else "%d"
start_idx, end_idx = found.span(1)
return replacement.join(
[filename[:start_idx], filename[end_idx:]]
)
def get_padding_from_filename(filename):
"""
Return padding number from Flame path style
Args:
filename (str): file name
Returns:
int: padding number
Example:
get_padding_from_filename("plate.0001.exr") > 4
"""
found = get_frame_from_filename(filename)
return len(found) if found else None
def get_frame_from_filename(filename):
"""
Return sequence number from Flame path style
Args:
filename (str): file name
Returns:
int: sequence frame number
Example:
def get_frame_from_filename(path):
("plate.0001.exr") > 0001
"""
found = re.findall(FRAME_PATTERN, filename)
return found.pop() if found else None

View file

@ -0,0 +1,307 @@
from copy import deepcopy
import ayon_flame.api as opfapi
class CreateShotClip(opfapi.Creator):
"""Publishable clip"""
label = "Create Publishable Clip"
product_type = "clip"
icon = "film"
defaults = ["Main"]
presets = None
def process(self):
# Creator copy of object attributes that are modified during `process`
presets = deepcopy(self.presets)
gui_inputs = self.get_gui_inputs()
# get key pairs from presets and match it on ui inputs
for k, v in gui_inputs.items():
if v["type"] in ("dict", "section"):
# nested dictionary (only one level allowed
# for sections and dict)
for _k, _v in v["value"].items():
if presets.get(_k) is not None:
gui_inputs[k][
"value"][_k]["value"] = presets[_k]
if presets.get(k) is not None:
gui_inputs[k]["value"] = presets[k]
# open widget for plugins inputs
results_back = self.create_widget(
"AYON publish attributes creator",
"Define sequential rename and fill hierarchy data.",
gui_inputs
)
if len(self.selected) < 1:
return
if not results_back:
print("Operation aborted")
return
# get ui output for track name for vertical sync
v_sync_track = results_back["vSyncTrack"]["value"]
# sort selected trackItems by
sorted_selected_segments = []
unsorted_selected_segments = []
for _segment in self.selected:
if _segment.parent.name.get_value() in v_sync_track:
sorted_selected_segments.append(_segment)
else:
unsorted_selected_segments.append(_segment)
sorted_selected_segments.extend(unsorted_selected_segments)
kwargs = {
"log": self.log,
"ui_inputs": results_back,
"avalon": self.data,
"product_type": self.data["productType"]
}
for i, segment in enumerate(sorted_selected_segments):
kwargs["rename_index"] = i
# convert track item to timeline media pool item
opfapi.PublishableClip(segment, **kwargs).convert()
def get_gui_inputs(self):
gui_tracks = self._get_video_track_names(
opfapi.get_current_sequence(opfapi.CTX.selection)
)
return deepcopy({
"renameHierarchy": {
"type": "section",
"label": "Shot Hierarchy And Rename Settings",
"target": "ui",
"order": 0,
"value": {
"hierarchy": {
"value": "{folder}/{sequence}",
"type": "QLineEdit",
"label": "Shot Parent Hierarchy",
"target": "tag",
"toolTip": "Parents folder for shot root folder, Template filled with `Hierarchy Data` section", # noqa
"order": 0},
"useShotName": {
"value": True,
"type": "QCheckBox",
"label": "Use Shot Name",
"target": "ui",
"toolTip": "Use name form Shot name clip attribute", # noqa
"order": 1},
"clipRename": {
"value": False,
"type": "QCheckBox",
"label": "Rename clips",
"target": "ui",
"toolTip": "Renaming selected clips on fly", # noqa
"order": 2},
"clipName": {
"value": "{sequence}{shot}",
"type": "QLineEdit",
"label": "Clip Name Template",
"target": "ui",
"toolTip": "template for creating shot namespaused for renaming (use rename: on)", # noqa
"order": 3},
"segmentIndex": {
"value": True,
"type": "QCheckBox",
"label": "Segment index",
"target": "ui",
"toolTip": "Take number from segment index", # noqa
"order": 4},
"countFrom": {
"value": 10,
"type": "QSpinBox",
"label": "Count sequence from",
"target": "ui",
"toolTip": "Set when the sequence number stafrom", # noqa
"order": 5},
"countSteps": {
"value": 10,
"type": "QSpinBox",
"label": "Stepping number",
"target": "ui",
"toolTip": "What number is adding every new step", # noqa
"order": 6},
}
},
"hierarchyData": {
"type": "dict",
"label": "Shot Template Keywords",
"target": "tag",
"order": 1,
"value": {
"folder": {
"value": "shots",
"type": "QLineEdit",
"label": "{folder}",
"target": "tag",
"toolTip": "Name of folder used for root of generated shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 0},
"episode": {
"value": "ep01",
"type": "QLineEdit",
"label": "{episode}",
"target": "tag",
"toolTip": "Name of episode.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 1},
"sequence": {
"value": "sq01",
"type": "QLineEdit",
"label": "{sequence}",
"target": "tag",
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 2},
"track": {
"value": "{_track_}",
"type": "QLineEdit",
"label": "{track}",
"target": "tag",
"toolTip": "Name of sequence of shots.\nUsable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 3},
"shot": {
"value": "sh###",
"type": "QLineEdit",
"label": "{shot}",
"target": "tag",
"toolTip": "Name of shot. `#` is converted to paded number. \nAlso could be used with usable tokens:\n\t{_clip_}: name of used clip\n\t{_track_}: name of parent track layer\n\t{_sequence_}: name of parent sequence (timeline)", # noqa
"order": 4}
}
},
"verticalSync": {
"type": "section",
"label": "Vertical Synchronization Of Attributes",
"target": "ui",
"order": 2,
"value": {
"vSyncOn": {
"value": True,
"type": "QCheckBox",
"label": "Enable Vertical Sync",
"target": "ui",
"toolTip": "Switch on if you want clips above each other to share its attributes", # noqa
"order": 0},
"vSyncTrack": {
"value": gui_tracks, # noqa
"type": "QComboBox",
"label": "Hero track",
"target": "ui",
"toolTip": "Select driving track name which should be hero for all others", # noqa
"order": 1}
}
},
"publishSettings": {
"type": "section",
"label": "Publish Settings",
"target": "ui",
"order": 3,
"value": {
"productName": {
"value": ["[ track name ]", "main", "bg", "fg", "bg",
"animatic"],
"type": "QComboBox",
"label": "Product Name",
"target": "ui",
"toolTip": "chose product name pattern, if [ track name ] is selected, name of track layer will be used", # noqa
"order": 0},
"productType": {
"value": ["plate", "take"],
"type": "QComboBox",
"label": "Product Type",
"target": "ui", "toolTip": "What use of this product is for", # noqa
"order": 1},
"reviewTrack": {
"value": ["< none >"] + gui_tracks,
"type": "QComboBox",
"label": "Use Review Track",
"target": "ui",
"toolTip": "Generate preview videos on fly, if `< none >` is defined nothing will be generated.", # noqa
"order": 2},
"audio": {
"value": False,
"type": "QCheckBox",
"label": "Include audio",
"target": "tag",
"toolTip": "Process products with corresponding audio", # noqa
"order": 3},
"sourceResolution": {
"value": False,
"type": "QCheckBox",
"label": "Source resolution",
"target": "tag",
"toolTip": "Is resolution taken from timeline or source?", # noqa
"order": 4},
}
},
"frameRangeAttr": {
"type": "section",
"label": "Shot Attributes",
"target": "ui",
"order": 4,
"value": {
"workfileFrameStart": {
"value": 1001,
"type": "QSpinBox",
"label": "Workfiles Start Frame",
"target": "tag",
"toolTip": "Set workfile starting frame number", # noqa
"order": 0
},
"handleStart": {
"value": 0,
"type": "QSpinBox",
"label": "Handle Start",
"target": "tag",
"toolTip": "Handle at start of clip", # noqa
"order": 1
},
"handleEnd": {
"value": 0,
"type": "QSpinBox",
"label": "Handle End",
"target": "tag",
"toolTip": "Handle at end of clip", # noqa
"order": 2
},
"includeHandles": {
"value": False,
"type": "QCheckBox",
"label": "Include handles",
"target": "tag",
"toolTip": "By default handles are excluded", # noqa
"order": 3
},
"retimedHandles": {
"value": True,
"type": "QCheckBox",
"label": "Retimed handles",
"target": "tag",
"toolTip": "By default handles are retimed.", # noqa
"order": 4
},
"retimedFramerange": {
"value": True,
"type": "QCheckBox",
"label": "Retimed framerange",
"target": "tag",
"toolTip": "By default framerange is retimed.", # noqa
"order": 5
}
}
}
})
def _get_video_track_names(self, sequence):
track_names = []
for ver in sequence.versions:
for track in ver.tracks:
track_names.append(track.name.get_value())
return track_names

View file

@ -0,0 +1,274 @@
from copy import deepcopy
import os
import flame
from pprint import pformat
import ayon_flame.api as opfapi
from ayon_core.lib import StringTemplate
from ayon_core.lib.transcoding import (
VIDEO_EXTENSIONS,
IMAGE_EXTENSIONS
)
class LoadClip(opfapi.ClipLoader):
"""Load a product to timeline as clip
Place clip to timeline on its asset origin timings collected
during conforming to project
"""
product_types = {"render2d", "source", "plate", "render", "review"}
representations = {"*"}
extensions = set(
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
)
label = "Load as clip"
order = -10
icon = "code-fork"
color = "orange"
# settings
reel_group_name = "OpenPype_Reels"
reel_name = "Loaded"
clip_name_template = "{folder[name]}_{product[name]}<_{output}>"
""" Anatomy keys from version context data and dynamically added:
- {layerName} - original layer name token
- {layerUID} - original layer UID token
- {originalBasename} - original clip name taken from file
"""
layer_rename_template = "{folder[name]}_{product[name]}<_{output}>"
layer_rename_patterns = []
def load(self, context, name, namespace, options):
# get flame objects
fproject = flame.project.current_project
self.fpd = fproject.current_workspace.desktop
# load clip to timeline and get main variables
version_entity = context["version"]
version_attributes = version_entity["attrib"]
version_name = version_entity["version"]
colorspace = self.get_colorspace(context)
# in case output is not in context replace key to representation
if not context["representation"]["context"].get("output"):
self.clip_name_template = self.clip_name_template.replace(
"output", "representation")
self.layer_rename_template = self.layer_rename_template.replace(
"output", "representation")
formatting_data = deepcopy(context["representation"]["context"])
clip_name = StringTemplate(self.clip_name_template).format(
formatting_data)
# convert colorspace with ocio to flame mapping
# in imageio flame section
colorspace = self.get_native_colorspace(colorspace)
self.log.info("Loading with colorspace: `{}`".format(colorspace))
# create workfile path
workfile_dir = os.environ["AYON_WORKDIR"]
openclip_dir = os.path.join(
workfile_dir, clip_name
)
openclip_path = os.path.join(
openclip_dir, clip_name + ".clip"
)
if not os.path.exists(openclip_dir):
os.makedirs(openclip_dir)
# prepare clip data from context ad send it to openClipLoader
path = self.filepath_from_context(context)
loading_context = {
"path": path.replace("\\", "/"),
"colorspace": colorspace,
"version": "v{:0>3}".format(version_name),
"layer_rename_template": self.layer_rename_template,
"layer_rename_patterns": self.layer_rename_patterns,
"context_data": formatting_data
}
self.log.debug(pformat(
loading_context
))
self.log.debug(openclip_path)
# make openpype clip file
opfapi.OpenClipSolver(
openclip_path, loading_context, logger=self.log).make()
# prepare Reel group in actual desktop
opc = self._get_clip(
clip_name,
openclip_path
)
# add additional metadata from the version to imprint Avalon knob
add_keys = [
"frameStart", "frameEnd", "source", "author",
"fps", "handleStart", "handleEnd"
]
# move all version data keys to tag data
data_imprint = {
key: version_attributes.get(key, str(None))
for key in add_keys
}
# add variables related to version context
data_imprint.update({
"version": version_name,
"colorspace": colorspace,
"objectName": clip_name
})
# TODO: finish the containerisation
# opc_segment = opfapi.get_clip_segment(opc)
# return opfapi.containerise(
# opc_segment,
# name, namespace, context,
# self.__class__.__name__,
# data_imprint)
return opc
def _get_clip(self, name, clip_path):
reel = self._get_reel()
# with maintained openclip as opc
matching_clip = [cl for cl in reel.clips
if cl.name.get_value() == name]
if matching_clip:
return matching_clip.pop()
else:
created_clips = flame.import_clips(str(clip_path), reel)
return created_clips.pop()
def _get_reel(self):
matching_rgroup = [
rg for rg in self.fpd.reel_groups
if rg.name.get_value() == self.reel_group_name
]
if not matching_rgroup:
reel_group = self.fpd.create_reel_group(str(self.reel_group_name))
for _r in reel_group.reels:
if "reel" not in _r.name.get_value().lower():
continue
self.log.debug("Removing: {}".format(_r.name))
flame.delete(_r)
else:
reel_group = matching_rgroup.pop()
matching_reel = [
re for re in reel_group.reels
if re.name.get_value() == self.reel_name
]
if not matching_reel:
reel_group = reel_group.create_reel(str(self.reel_name))
else:
reel_group = matching_reel.pop()
return reel_group
def _get_segment_from_clip(self, clip):
# unwrapping segment from input clip
pass
# def switch(self, container, context):
# self.update(container, context)
# def update(self, container, context):
# """ Updating previously loaded clips
# """
# # load clip to timeline and get main variables
# repre_entity = context['representation']
# name = container['name']
# namespace = container['namespace']
# track_item = phiero.get_track_items(
# track_item_name=namespace)
# version = io.find_one({
# "type": "version",
# "id": repre_entity["versionId"]
# })
# version_data = version.get("data", {})
# version_name = version.get("name", None)
# colorspace = version_data.get("colorSpace", None)
# object_name = "{}_{}".format(name, namespace)
# file = get_representation_path(repre_entity).replace("\\", "/")
# clip = track_item.source()
# # reconnect media to new path
# clip.reconnectMedia(file)
# # set colorspace
# if colorspace:
# clip.setSourceMediaColourTransform(colorspace)
# # add additional metadata from the version to imprint Avalon knob
# add_keys = [
# "frameStart", "frameEnd", "source", "author",
# "fps", "handleStart", "handleEnd"
# ]
# # move all version data keys to tag data
# data_imprint = {}
# for key in add_keys:
# data_imprint.update({
# key: version_data.get(key, str(None))
# })
# # add variables related to version context
# data_imprint.update({
# "representation": repre_entity["id"],
# "version": version_name,
# "colorspace": colorspace,
# "objectName": object_name
# })
# # update color of clip regarding the version order
# self.set_item_color(track_item, version)
# return phiero.update_container(track_item, data_imprint)
# def remove(self, container):
# """ Removing previously loaded clips
# """
# # load clip to timeline and get main variables
# namespace = container['namespace']
# track_item = phiero.get_track_items(
# track_item_name=namespace)
# track = track_item.parent()
# # remove track item from track
# track.removeItem(track_item)
# @classmethod
# def multiselection(cls, track_item):
# if not cls.track:
# cls.track = track_item.parent()
# cls.sequence = cls.track.parent()
# @classmethod
# def set_item_color(cls, track_item, version):
# clip = track_item.source()
# # define version name
# version_name = version.get("name", None)
# # get all versions in list
# versions = io.find({
# "type": "version",
# "parent": version["parent"]
# }).distinct('name')
# max_version = max(versions)
# # set clip colour
# if version_name == max_version:
# clip.binItem().setColor(cls.clip_color_last)
# else:
# clip.binItem().setColor(cls.clip_color)

View file

@ -0,0 +1,180 @@
from copy import deepcopy
import os
import flame
from pprint import pformat
import ayon_flame.api as opfapi
from ayon_core.lib import StringTemplate
from ayon_core.lib.transcoding import (
VIDEO_EXTENSIONS,
IMAGE_EXTENSIONS
)
class LoadClipBatch(opfapi.ClipLoader):
"""Load a product to timeline as clip
Place clip to timeline on its asset origin timings collected
during conforming to project
"""
product_types = {"render2d", "source", "plate", "render", "review"}
representations = {"*"}
extensions = set(
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
)
label = "Load as clip to current batch"
order = -10
icon = "code-fork"
color = "orange"
# settings
reel_name = "OP_LoadedReel"
clip_name_template = "{batch}_{folder[name]}_{product[name]}<_{output}>"
""" Anatomy keys from version context data and dynamically added:
- {layerName} - original layer name token
- {layerUID} - original layer UID token
- {originalBasename} - original clip name taken from file
"""
layer_rename_template = "{folder[name]}_{product[name]}<_{output}>"
layer_rename_patterns = []
def load(self, context, name, namespace, options):
# get flame objects
self.batch = options.get("batch") or flame.batch
# load clip to timeline and get main variables
version_entity = context["version"]
version_attributes =version_entity["attrib"]
version_name = version_entity["version"]
colorspace = self.get_colorspace(context)
clip_name_template = self.clip_name_template
layer_rename_template = self.layer_rename_template
# in case output is not in context replace key to representation
if not context["representation"]["context"].get("output"):
clip_name_template = clip_name_template.replace(
"output", "representation")
layer_rename_template = layer_rename_template.replace(
"output", "representation")
folder_entity = context["folder"]
product_entity = context["product"]
formatting_data = deepcopy(context["representation"]["context"])
formatting_data["batch"] = self.batch.name.get_value()
formatting_data.update({
"asset": folder_entity["name"],
"folder": {
"name": folder_entity["name"],
},
"subset": product_entity["name"],
"family": product_entity["productType"],
"product": {
"name": product_entity["name"],
"type": product_entity["productType"],
}
})
clip_name = StringTemplate(clip_name_template).format(
formatting_data)
# convert colorspace with ocio to flame mapping
# in imageio flame section
colorspace = self.get_native_colorspace(colorspace)
self.log.info("Loading with colorspace: `{}`".format(colorspace))
# create workfile path
workfile_dir = options.get("workdir") or os.environ["AYON_WORKDIR"]
openclip_dir = os.path.join(
workfile_dir, clip_name
)
openclip_path = os.path.join(
openclip_dir, clip_name + ".clip"
)
if not os.path.exists(openclip_dir):
os.makedirs(openclip_dir)
# prepare clip data from context and send it to openClipLoader
path = self.filepath_from_context(context)
loading_context = {
"path": path.replace("\\", "/"),
"colorspace": colorspace,
"version": "v{:0>3}".format(version_name),
"layer_rename_template": layer_rename_template,
"layer_rename_patterns": self.layer_rename_patterns,
"context_data": formatting_data
}
self.log.debug(pformat(
loading_context
))
self.log.debug(openclip_path)
# make openpype clip file
opfapi.OpenClipSolver(
openclip_path, loading_context, logger=self.log).make()
# prepare Reel group in actual desktop
opc = self._get_clip(
clip_name,
openclip_path
)
# add additional metadata from the version to imprint Avalon knob
add_keys = [
"frameStart", "frameEnd", "source", "author",
"fps", "handleStart", "handleEnd"
]
# move all version data keys to tag data
data_imprint = {
key: version_attributes.get(key, str(None))
for key in add_keys
}
# add variables related to version context
data_imprint.update({
"version": version_name,
"colorspace": colorspace,
"objectName": clip_name
})
# TODO: finish the containerisation
# opc_segment = opfapi.get_clip_segment(opc)
# return opfapi.containerise(
# opc_segment,
# name, namespace, context,
# self.__class__.__name__,
# data_imprint)
return opc
def _get_clip(self, name, clip_path):
reel = self._get_reel()
# with maintained openclip as opc
matching_clip = None
for cl in reel.clips:
if cl.name.get_value() != name:
continue
matching_clip = cl
if not matching_clip:
created_clips = flame.import_clips(str(clip_path), reel)
return created_clips.pop()
return matching_clip
def _get_reel(self):
matching_reel = [
rg for rg in self.batch.reels
if rg.name.get_value() == self.reel_name
]
return (
matching_reel.pop()
if matching_reel
else self.batch.create_reel(str(self.reel_name))
)

View file

@ -0,0 +1,64 @@
import os
import pyblish.api
import tempfile
import ayon_flame.api as opfapi
from ayon_flame.otio import flame_export as otio_export
import opentimelineio as otio
from pprint import pformat
reload(otio_export) # noqa
@pyblish.api.log
class CollectTestSelection(pyblish.api.ContextPlugin):
"""testing selection sharing
"""
order = pyblish.api.CollectorOrder
label = "test selection"
hosts = ["flame"]
active = False
def process(self, context):
self.log.info(
"Active Selection: {}".format(opfapi.CTX.selection))
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)
self.test_imprint_data(sequence)
self.test_otio_export(sequence)
def test_otio_export(self, sequence):
test_dir = os.path.normpath(
tempfile.mkdtemp(prefix="test_pyblish_tmp_")
)
export_path = os.path.normpath(
os.path.join(
test_dir, "otio_timeline_export.otio"
)
)
otio_timeline = otio_export.create_otio_timeline(sequence)
otio_export.write_to_file(
otio_timeline, export_path
)
read_timeline_otio = otio.adapters.read_from_file(export_path)
if otio_timeline != read_timeline_otio:
raise Exception("Exported timeline is different from original")
self.log.info(pformat(otio_timeline))
self.log.info("Otio exported to: {}".format(export_path))
def test_imprint_data(self, sequence):
with opfapi.maintained_segment_selection(sequence) as sel_segments:
for segment in sel_segments:
if str(segment.name)[1:-1] == "":
continue
self.log.debug("Segment with OpenPypeData: {}".format(
segment.name))
opfapi.imprint(segment, {
'asset': segment.name.get_value(),
'productType': 'render',
'productName': 'productMain'
})

View file

@ -0,0 +1,417 @@
import re
from types import NoneType
import pyblish
import ayon_flame.api as opfapi
from ayon_flame.otio import flame_export
from ayon_core.pipeline import AYON_INSTANCE_ID, AVALON_INSTANCE_ID
from ayon_core.pipeline.editorial import (
is_overlapping_otio_ranges,
get_media_range_with_retimes
)
# # developer reload modules
from pprint import pformat
# constatns
NUM_PATERN = re.compile(r"([0-9\.]+)")
TXT_PATERN = re.compile(r"([a-zA-Z]+)")
class CollectTimelineInstances(pyblish.api.ContextPlugin):
"""Collect all Timeline segment selection."""
order = pyblish.api.CollectorOrder - 0.09
label = "Collect timeline Instances"
hosts = ["flame"]
settings_category = "flame"
audio_track_items = []
# settings
xml_preset_attrs_from_comments = []
add_tasks = []
def process(self, context):
selected_segments = context.data["flameSelectedSegments"]
self.log.debug("__ selected_segments: {}".format(selected_segments))
self.otio_timeline = context.data["otioTimeline"]
self.fps = context.data["fps"]
# process all selected
for segment in selected_segments:
# get openpype tag data
marker_data = opfapi.get_segment_data_marker(segment)
self.log.debug("__ marker_data: {}".format(
pformat(marker_data)))
if not marker_data:
continue
if marker_data.get("id") not in {
AYON_INSTANCE_ID, AVALON_INSTANCE_ID
}:
continue
self.log.debug("__ segment.name: {}".format(
segment.name
))
comment_attributes = self._get_comment_attributes(segment)
self.log.debug("_ comment_attributes: {}".format(
pformat(comment_attributes)))
clip_data = opfapi.get_segment_attributes(segment)
clip_name = clip_data["segment_name"]
self.log.debug("clip_name: {}".format(clip_name))
# get otio clip data
otio_data = self._get_otio_clip_instance_data(clip_data) or {}
self.log.debug("__ otio_data: {}".format(pformat(otio_data)))
# get file path
file_path = clip_data["fpath"]
first_frame = opfapi.get_frame_from_filename(file_path) or 0
head, tail = self._get_head_tail(
clip_data,
otio_data["otioClip"],
marker_data["handleStart"],
marker_data["handleEnd"]
)
# make sure there is not NoneType rather 0
if isinstance(head, NoneType):
head = 0
if isinstance(tail, NoneType):
tail = 0
# make sure value is absolute
if head != 0:
head = abs(head)
if tail != 0:
tail = abs(tail)
# solve handles length
marker_data["handleStart"] = min(
marker_data["handleStart"], head)
marker_data["handleEnd"] = min(
marker_data["handleEnd"], tail)
# Backward compatibility fix of 'entity_type' > 'folder_type'
if "parents" in marker_data:
for parent in marker_data["parents"]:
if "entity_type" in parent:
parent["folder_type"] = parent.pop("entity_type")
workfile_start = self._set_workfile_start(marker_data)
with_audio = bool(marker_data.pop("audio"))
# add marker data to instance data
inst_data = dict(marker_data.items())
# add ocio_data to instance data
inst_data.update(otio_data)
folder_path = marker_data["folderPath"]
folder_name = folder_path.rsplit("/")[-1]
product_name = marker_data["productName"]
# insert product type into families
product_type = marker_data["productType"]
families = [str(f) for f in marker_data["families"]]
families.insert(0, str(product_type))
# form label
label = folder_name
if folder_name != clip_name:
label += " ({})".format(clip_name)
label += " {} [{}]".format(product_name, ", ".join(families))
inst_data.update({
"name": "{}_{}".format(folder_name, product_name),
"label": label,
"folderPath": folder_path,
"item": segment,
"families": families,
"publish": marker_data["publish"],
"fps": self.fps,
"workfileFrameStart": workfile_start,
"sourceFirstFrame": int(first_frame),
"retimedHandles": marker_data.get("retimedHandles"),
"shotDurationFromSource": (
not marker_data.get("retimedFramerange")),
"path": file_path,
"flameAddTasks": self.add_tasks,
"tasks": {
task["name"]: {"type": task["type"]}
for task in self.add_tasks},
"representations": [],
"newAssetPublishing": True
})
self.log.debug("__ inst_data: {}".format(pformat(inst_data)))
# add resolution
self._get_resolution_to_data(inst_data, context)
# add comment attributes if any
inst_data.update(comment_attributes)
# create instance
instance = context.create_instance(**inst_data)
# add colorspace data
instance.data.update({
"versionData": {
"colorspace": clip_data["colour_space"],
}
})
# create shot instance for shot attributes create/update
self._create_shot_instance(context, clip_name, **inst_data)
self.log.info("Creating instance: {}".format(instance))
self.log.info(
"_ instance.data: {}".format(pformat(instance.data)))
if not with_audio:
continue
# add audioReview attribute to plate instance data
# if reviewTrack is on
if marker_data.get("reviewTrack") is not None:
instance.data["reviewAudio"] = True
@staticmethod
def _set_workfile_start(data):
include_handles = data.get("includeHandles")
workfile_start = data["workfileFrameStart"]
handle_start = data["handleStart"]
if include_handles:
workfile_start += handle_start
return workfile_start
def _get_comment_attributes(self, segment):
comment = segment.comment.get_value()
# try to find attributes
attributes = {
"xml_overrides": {
"pixelRatio": 1.00}
}
# search for `:`
for split in self._split_comments(comment):
# make sure we ignore if not `:` in key
if ":" not in split:
continue
self._get_xml_preset_attrs(
attributes, split)
# add xml overrides resolution to instance data
xml_overrides = attributes["xml_overrides"]
if xml_overrides.get("width"):
attributes.update({
"resolutionWidth": xml_overrides["width"],
"resolutionHeight": xml_overrides["height"],
"pixelAspect": xml_overrides["pixelRatio"]
})
return attributes
def _get_xml_preset_attrs(self, attributes, split):
# split to key and value
key, value = split.split(":")
for attr_data in self.xml_preset_attrs_from_comments:
a_name = attr_data["name"]
a_type = attr_data["type"]
# exclude all not related attributes
if a_name.lower() not in key.lower():
continue
# get pattern defined by type
pattern = TXT_PATERN
if a_type in ("number", "float"):
pattern = NUM_PATERN
res_goup = pattern.findall(value)
# raise if nothing is found as it is not correctly defined
if not res_goup:
raise ValueError((
"Value for `{}` attribute is not "
"set correctly: `{}`").format(a_name, split))
if "string" in a_type:
_value = res_goup[0]
if "float" in a_type:
_value = float(res_goup[0])
if "number" in a_type:
_value = int(res_goup[0])
attributes["xml_overrides"][a_name] = _value
# condition for resolution in key
if "resolution" in key.lower():
res_goup = NUM_PATERN.findall(value)
# check if axpect was also defined
# 1920x1080x1.5
aspect = res_goup[2] if len(res_goup) > 2 else 1
width = int(res_goup[0])
height = int(res_goup[1])
pixel_ratio = float(aspect)
attributes["xml_overrides"].update({
"width": width,
"height": height,
"pixelRatio": pixel_ratio
})
def _split_comments(self, comment_string):
# first split comment by comma
split_comments = []
if "," in comment_string:
split_comments.extend(comment_string.split(","))
elif ";" in comment_string:
split_comments.extend(comment_string.split(";"))
else:
split_comments.append(comment_string)
return split_comments
def _get_head_tail(self, clip_data, otio_clip, handle_start, handle_end):
# calculate head and tail with forward compatibility
head = clip_data.get("segment_head")
tail = clip_data.get("segment_tail")
self.log.debug("__ head: `{}`".format(head))
self.log.debug("__ tail: `{}`".format(tail))
# HACK: it is here to serve for versions below 2021.1
if not any([head, tail]):
retimed_attributes = get_media_range_with_retimes(
otio_clip, handle_start, handle_end)
self.log.debug(
">> retimed_attributes: {}".format(retimed_attributes))
# retimed head and tail
head = int(retimed_attributes["handleStart"])
tail = int(retimed_attributes["handleEnd"])
return head, tail
def _get_resolution_to_data(self, data, context):
assert data.get("otioClip"), "Missing `otioClip` data"
# solve source resolution option
if data.get("sourceResolution", None):
otio_clip_metadata = data[
"otioClip"].media_reference.metadata
data.update({
"resolutionWidth": otio_clip_metadata[
"openpype.source.width"],
"resolutionHeight": otio_clip_metadata[
"openpype.source.height"],
"pixelAspect": otio_clip_metadata[
"openpype.source.pixelAspect"]
})
else:
otio_tl_metadata = context.data["otioTimeline"].metadata
data.update({
"resolutionWidth": otio_tl_metadata["openpype.timeline.width"],
"resolutionHeight": otio_tl_metadata[
"openpype.timeline.height"],
"pixelAspect": otio_tl_metadata[
"openpype.timeline.pixelAspect"]
})
def _create_shot_instance(self, context, clip_name, **data):
master_layer = data.get("heroTrack")
hierarchy_data = data.get("hierarchyData")
if not master_layer:
return
if not hierarchy_data:
return
folder_path = data["folderPath"]
folder_name = folder_path.rsplit("/")[-1]
product_name = "shotMain"
# insert product type into families
product_type = "shot"
# form label
label = folder_name
if folder_name != clip_name:
label += " ({}) ".format(clip_name)
label += " {}".format(product_name)
label += " [{}]".format(product_type)
data.update({
"name": "{}_{}".format(folder_name, product_name),
"label": label,
"productName": product_name,
"folderPath": folder_path,
"productType": product_type,
"family": product_type,
"families": [product_type]
})
instance = context.create_instance(**data)
self.log.info("Creating instance: {}".format(instance))
self.log.debug(
"_ instance.data: {}".format(pformat(instance.data)))
def _get_otio_clip_instance_data(self, clip_data):
"""
Return otio objects for timeline, track and clip
Args:
timeline_item_data (dict): timeline_item_data from list returned by
resolve.get_current_timeline_items()
otio_timeline (otio.schema.Timeline): otio object
Returns:
dict: otio clip object
"""
segment = clip_data["PySegment"]
s_track_name = segment.parent.name.get_value()
timeline_range = self._create_otio_time_range_from_timeline_item_data(
clip_data)
for otio_clip in self.otio_timeline.each_clip():
track_name = otio_clip.parent().name
parent_range = otio_clip.range_in_parent()
if s_track_name not in track_name:
continue
if otio_clip.name not in segment.name.get_value():
continue
if is_overlapping_otio_ranges(
parent_range, timeline_range, strict=True):
# add pypedata marker to otio_clip metadata
for marker in otio_clip.markers:
if opfapi.MARKER_NAME in marker.name:
otio_clip.metadata.update(marker.metadata)
return {"otioClip": otio_clip}
return None
def _create_otio_time_range_from_timeline_item_data(self, clip_data):
frame_start = int(clip_data["record_in"])
frame_duration = int(clip_data["record_duration"])
return flame_export.create_otio_time_range(
frame_start, frame_duration, self.fps)

View file

@ -0,0 +1,67 @@
import pyblish.api
import ayon_flame.api as opfapi
from ayon_flame.otio import flame_export
from ayon_core.pipeline.create import get_product_name
class CollecTimelineOTIO(pyblish.api.ContextPlugin):
"""Inject the current working context into publish context"""
label = "Collect Timeline OTIO"
order = pyblish.api.CollectorOrder - 0.099
def process(self, context):
# plugin defined
product_type = "workfile"
variant = "otioTimeline"
# main
folder_entity = context.data["folderEntity"]
project = opfapi.get_current_project()
sequence = opfapi.get_current_sequence(opfapi.CTX.selection)
# create product name
task_entity = context.data["taskEntity"]
task_name = task_type = None
if task_entity:
task_name = task_entity["name"]
task_type = task_entity["taskType"]
product_name = get_product_name(
context.data["projectName"],
task_name,
task_type,
context.data["hostName"],
product_type,
variant,
project_settings=context.data["project_settings"]
)
# adding otio timeline to context
with opfapi.maintained_segment_selection(sequence) as selected_seg:
otio_timeline = flame_export.create_otio_timeline(sequence)
instance_data = {
"name": product_name,
"folderPath": folder_entity["path"],
"productName": product_name,
"productType": product_type,
"family": product_type,
"families": [product_type]
}
# create instance with workfile
instance = context.create_instance(**instance_data)
self.log.info("Creating instance: {}".format(instance))
# update context with main project attributes
context.data.update({
"flameProject": project,
"flameSequence": sequence,
"otioTimeline": otio_timeline,
"currentFile": "Flame/{}/{}".format(
project.name, sequence.name
),
"flameSelectedSegments": selected_seg,
"fps": float(str(sequence.frame_rate)[:-4])
})

View file

@ -0,0 +1,43 @@
import os
import pyblish.api
import opentimelineio as otio
from ayon_core.pipeline import publish
class ExtractOTIOFile(publish.Extractor):
"""
Extractor export OTIO file
"""
label = "Extract OTIO file"
order = pyblish.api.ExtractorOrder - 0.45
families = ["workfile"]
hosts = ["flame"]
def process(self, instance):
# create representation data
if "representations" not in instance.data:
instance.data["representations"] = []
name = instance.data["name"]
staging_dir = self.staging_dir(instance)
otio_timeline = instance.context.data["otioTimeline"]
# create otio timeline representation
otio_file_name = name + ".otio"
otio_file_path = os.path.join(staging_dir, otio_file_name)
# export otio file to temp dir
otio.adapters.write_to_file(otio_timeline, otio_file_path)
representation_otio = {
'name': "otio",
'ext': "otio",
'files': otio_file_name,
"stagingDir": staging_dir,
}
instance.data["representations"].append(representation_otio)
self.log.info("Added OTIO file representation: {}".format(
representation_otio))

View file

@ -0,0 +1,560 @@
import os
import re
from copy import deepcopy
import pyblish.api
from ayon_core.pipeline import publish
from ayon_flame import api as opfapi
from ayon_flame.api import MediaInfoFile
from ayon_core.pipeline.editorial import (
get_media_range_with_retimes
)
import flame
class ExtractProductResources(publish.Extractor):
"""
Extractor for transcoding files from Flame clip
"""
label = "Extract product resources"
order = pyblish.api.ExtractorOrder
families = ["clip"]
hosts = ["flame"]
settings_category = "flame"
# plugin defaults
keep_original_representation = False
default_presets = {
"thumbnail": {
"active": True,
"ext": "jpg",
"xml_preset_file": "Jpeg (8-bit).xml",
"xml_preset_dir": "",
"export_type": "File Sequence",
"parsed_comment_attrs": False,
"colorspace_out": "Output - sRGB",
"representation_add_range": False,
"representation_tags": ["thumbnail"],
"path_regex": ".*"
}
}
# hide publisher during exporting
hide_ui_on_process = True
# settings
export_presets_mapping = []
def process(self, instance):
if not self.keep_original_representation:
# remove previeous representation if not needed
instance.data["representations"] = []
# flame objects
segment = instance.data["item"]
folder_path = instance.data["folderPath"]
segment_name = segment.name.get_value()
clip_path = instance.data["path"]
sequence_clip = instance.context.data["flameSequence"]
# segment's parent track name
s_track_name = segment.parent.name.get_value()
# get configured workfile frame start/end (handles excluded)
frame_start = instance.data["frameStart"]
# get media source first frame
source_first_frame = instance.data["sourceFirstFrame"]
self.log.debug("_ frame_start: {}".format(frame_start))
self.log.debug("_ source_first_frame: {}".format(source_first_frame))
# get timeline in/out of segment
clip_in = instance.data["clipIn"]
clip_out = instance.data["clipOut"]
# get retimed attributres
retimed_data = self._get_retimed_attributes(instance)
# get individual keys
retimed_handle_start = retimed_data["handle_start"]
retimed_handle_end = retimed_data["handle_end"]
retimed_source_duration = retimed_data["source_duration"]
retimed_speed = retimed_data["speed"]
# get handles value - take only the max from both
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
handles = max(handle_start, handle_end)
include_handles = instance.data.get("includeHandles")
retimed_handles = instance.data.get("retimedHandles")
# get media source range with handles
source_start_handles = instance.data["sourceStartH"]
source_end_handles = instance.data["sourceEndH"]
# retime if needed
if retimed_speed != 1.0:
if retimed_handles:
# handles are retimed
source_start_handles = (
instance.data["sourceStart"] - retimed_handle_start)
source_end_handles = (
source_start_handles
+ (retimed_source_duration - 1)
+ retimed_handle_start
+ retimed_handle_end
)
else:
# handles are not retimed
source_end_handles = (
source_start_handles
+ (retimed_source_duration - 1)
+ handle_start
+ handle_end
)
# get frame range with handles for representation range
frame_start_handle = frame_start - handle_start
repre_frame_start = frame_start_handle
if include_handles:
if retimed_speed == 1.0 or not retimed_handles:
frame_start_handle = frame_start
else:
frame_start_handle = (
frame_start - handle_start) + retimed_handle_start
self.log.debug("_ frame_start_handle: {}".format(
frame_start_handle))
self.log.debug("_ repre_frame_start: {}".format(
repre_frame_start))
# calculate duration with handles
source_duration_handles = (
source_end_handles - source_start_handles) + 1
self.log.debug("_ source_duration_handles: {}".format(
source_duration_handles))
# create staging dir path
staging_dir = self.staging_dir(instance)
# append staging dir for later cleanup
instance.context.data["cleanupFullPaths"].append(staging_dir)
export_presets_mapping = {}
for preset_mapping in deepcopy(self.export_presets_mapping):
name = preset_mapping.pop("name")
export_presets_mapping[name] = preset_mapping
# add default preset type for thumbnail and reviewable video
# update them with settings and override in case the same
# are found in there
_preset_keys = [k.split('_')[0] for k in export_presets_mapping]
export_presets = {
k: v
for k, v in deepcopy(self.default_presets).items()
if k not in _preset_keys
}
export_presets.update(export_presets_mapping)
if not instance.data.get("versionData"):
instance.data["versionData"] = {}
# set versiondata if any retime
version_data = retimed_data.get("version_data")
self.log.debug("_ version_data: {}".format(version_data))
if version_data:
instance.data["versionData"].update(version_data)
# version data start frame
version_frame_start = frame_start
if include_handles:
version_frame_start = frame_start_handle
if retimed_speed != 1.0:
if retimed_handles:
instance.data["versionData"].update({
"frameStart": version_frame_start,
"frameEnd": (
(version_frame_start + source_duration_handles - 1)
- (retimed_handle_start + retimed_handle_end)
)
})
else:
instance.data["versionData"].update({
"handleStart": handle_start,
"handleEnd": handle_end,
"frameStart": version_frame_start,
"frameEnd": (
(version_frame_start + source_duration_handles - 1)
- (handle_start + handle_end)
)
})
self.log.debug("_ version_data: {}".format(
instance.data["versionData"]
))
# loop all preset names and
for unique_name, preset_config in export_presets.items():
modify_xml_data = {}
if self._should_skip(preset_config, clip_path, unique_name):
continue
# get all presets attributes
extension = preset_config["ext"]
preset_file = preset_config["xml_preset_file"]
preset_dir = preset_config["xml_preset_dir"]
export_type = preset_config["export_type"]
repre_tags = preset_config["representation_tags"]
parsed_comment_attrs = preset_config["parsed_comment_attrs"]
color_out = preset_config["colorspace_out"]
self.log.info(
"Processing `{}` as `{}` to `{}` type...".format(
preset_file, export_type, extension
)
)
exporting_clip = None
name_patern_xml = "<name>_{}.".format(
unique_name)
if export_type == "Sequence Publish":
# change export clip to sequence
exporting_clip = flame.duplicate(sequence_clip)
# only keep visible layer where instance segment is child
self.hide_others(
exporting_clip, segment_name, s_track_name)
# change name pattern
name_patern_xml = (
"<segment name>_<shot name>_{}.").format(
unique_name)
# only for h264 with baked retime
in_mark = clip_in
out_mark = clip_out + 1
modify_xml_data.update({
"exportHandles": True,
"nbHandles": handles
})
else:
in_mark = (source_start_handles - source_first_frame) + 1
out_mark = in_mark + source_duration_handles
exporting_clip = self.import_clip(clip_path)
exporting_clip.name.set_value("{}_{}".format(
folder_path, segment_name))
# add xml tags modifications
modify_xml_data.update({
# enum position low start from 0
"frameIndex": 0,
"startFrame": repre_frame_start,
"namePattern": name_patern_xml
})
if parsed_comment_attrs:
# add any xml overrides collected form segment.comment
modify_xml_data.update(instance.data["xml_overrides"])
self.log.debug("_ in_mark: {}".format(in_mark))
self.log.debug("_ out_mark: {}".format(out_mark))
export_kwargs = {}
# validate xml preset file is filled
if preset_file == "":
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` is not filled").format(
unique_name)
)
# resolve xml preset dir if not filled
if preset_dir == "":
preset_dir = opfapi.get_preset_path_by_xml_name(
preset_file)
if not preset_dir:
raise ValueError(
("Check Settings for {} preset: "
"`XML preset file` {} is not found").format(
unique_name, preset_file)
)
# create preset path
preset_orig_xml_path = str(os.path.join(
preset_dir, preset_file
))
# define kwargs based on preset type
if "thumbnail" in unique_name:
modify_xml_data.update({
"video/posterFrame": True,
"video/useFrameAsPoster": 1,
"namePattern": "__thumbnail"
})
thumb_frame_number = int(in_mark + (
(out_mark - in_mark + 1) / 2))
self.log.debug("__ thumb_frame_number: {}".format(
thumb_frame_number
))
export_kwargs["thumb_frame_number"] = thumb_frame_number
else:
export_kwargs.update({
"in_mark": in_mark,
"out_mark": out_mark
})
preset_path = opfapi.modify_preset_file(
preset_orig_xml_path, staging_dir, modify_xml_data)
# get and make export dir paths
export_dir_path = str(os.path.join(
staging_dir, unique_name
))
os.makedirs(export_dir_path)
# export
opfapi.export_clip(
export_dir_path, exporting_clip, preset_path, **export_kwargs)
repr_name = unique_name
# make sure only first segment is used if underscore in name
# HACK: `ftrackreview_withLUT` will result only in `ftrackreview`
if (
"thumbnail" in unique_name
or "ftrackreview" in unique_name
):
repr_name = unique_name.split("_")[0]
# create representation data
representation_data = {
"name": repr_name,
"outputName": repr_name,
"ext": extension,
"stagingDir": export_dir_path,
"tags": repre_tags,
"data": {
"colorspace": color_out
},
"load_to_batch_group": preset_config.get(
"load_to_batch_group"),
"batch_group_loader_name": preset_config.get(
"batch_group_loader_name") or None
}
# collect all available content of export dir
files = os.listdir(export_dir_path)
# make sure no nested folders inside
n_stage_dir, n_files = self._unfolds_nested_folders(
export_dir_path, files, extension)
# fix representation in case of nested folders
if n_stage_dir:
representation_data["stagingDir"] = n_stage_dir
files = n_files
# add files to representation but add
# imagesequence as list
if (
# first check if path in files is not mov extension
[
f for f in files
if os.path.splitext(f)[-1] == ".mov"
]
# then try if thumbnail is not in unique name
or repr_name == "thumbnail"
):
representation_data["files"] = files.pop()
else:
representation_data["files"] = files
# add frame range
if preset_config["representation_add_range"]:
representation_data.update({
"frameStart": repre_frame_start,
"frameEnd": (
repre_frame_start + source_duration_handles) - 1,
"fps": instance.data["fps"]
})
instance.data["representations"].append(representation_data)
# add review family if found in tags
if "review" in repre_tags:
instance.data["families"].append("review")
self.log.info("Added representation: {}".format(
representation_data))
if export_type == "Sequence Publish":
# at the end remove the duplicated clip
flame.delete(exporting_clip)
def _get_retimed_attributes(self, instance):
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
# get basic variables
otio_clip = instance.data["otioClip"]
# get available range trimmed with processed retimes
retimed_attributes = get_media_range_with_retimes(
otio_clip, handle_start, handle_end)
self.log.debug(
">> retimed_attributes: {}".format(retimed_attributes))
r_media_in = int(retimed_attributes["mediaIn"])
r_media_out = int(retimed_attributes["mediaOut"])
version_data = retimed_attributes.get("versionData")
return {
"version_data": version_data,
"handle_start": int(retimed_attributes["handleStart"]),
"handle_end": int(retimed_attributes["handleEnd"]),
"source_duration": (
(r_media_out - r_media_in) + 1
),
"speed": float(retimed_attributes["speed"])
}
def _should_skip(self, preset_config, clip_path, unique_name):
# get activating attributes
activated_preset = preset_config["active"]
filter_path_regex = preset_config.get("filter_path_regex")
self.log.info(
"Preset `{}` is active `{}` with filter `{}`".format(
unique_name, activated_preset, filter_path_regex
)
)
# skip if not activated presete
if not activated_preset:
return True
# exclude by regex filter if any
if (
filter_path_regex
and not re.search(filter_path_regex, clip_path)
):
return True
def _unfolds_nested_folders(self, stage_dir, files_list, ext):
"""Unfolds nested folders
Args:
stage_dir (str): path string with directory
files_list (list): list of file names
ext (str): extension (jpg)[without dot]
Raises:
IOError: in case no files were collected form any directory
Returns:
str, list: new staging dir path, new list of file names
or
None, None: In case single file in `files_list`
"""
# exclude single files which are having extension
# the same as input ext attr
if (
# only one file in list
len(files_list) == 1
# file is having extension as input
and ext in os.path.splitext(files_list[0])[-1]
):
return None, None
elif (
# more then one file in list
len(files_list) >= 1
# extension is correct
and ext in os.path.splitext(files_list[0])[-1]
# test file exists
and os.path.exists(
os.path.join(stage_dir, files_list[0])
)
):
return None, None
new_stage_dir = None
new_files_list = []
for file in files_list:
search_path = os.path.join(stage_dir, file)
if not os.path.isdir(search_path):
continue
for root, _dirs, files in os.walk(search_path):
for _file in files:
_fn, _ext = os.path.splitext(_file)
if ext.lower() != _ext[1:].lower():
continue
new_files_list.append(_file)
if not new_stage_dir:
new_stage_dir = root
if not new_stage_dir:
raise AssertionError(
"Files in `{}` are not correct! Check `{}`".format(
files_list, stage_dir)
)
return new_stage_dir, new_files_list
def hide_others(self, sequence_clip, segment_name, track_name):
"""Helper method used only if sequence clip is used
Args:
sequence_clip (flame.Clip): sequence clip
segment_name (str): segment name
track_name (str): track name
"""
# create otio tracks and clips
for ver in sequence_clip.versions:
for track in ver.tracks:
if len(track.segments) == 0 and track.hidden.get_value():
continue
# hide tracks which are not parent track
if track.name.get_value() != track_name:
track.hidden = True
continue
# hidde all other segments
for segment in track.segments:
if segment.name.get_value() != segment_name:
segment.hidden = True
def import_clip(self, path):
"""
Import clip from path
"""
dir_path = os.path.dirname(path)
media_info = MediaInfoFile(path, logger=self.log)
file_pattern = media_info.file_pattern
self.log.debug("__ file_pattern: {}".format(file_pattern))
# rejoin the pattern to dir path
new_path = os.path.join(dir_path, file_pattern)
clips = flame.import_clips(new_path)
self.log.info("Clips [{}] imported from `{}`".format(clips, path))
if not clips:
self.log.warning("Path `{}` is not having any clips".format(path))
return None
elif len(clips) > 1:
self.log.warning(
"Path `{}` is containing more that one clip".format(path)
)
return clips[0]

View file

@ -0,0 +1,339 @@
import os
import copy
from collections import OrderedDict
from pprint import pformat
import pyblish
import ayon_flame.api as opfapi
import ayon_core.pipeline as op_pipeline
from ayon_core.pipeline.workfile import get_workdir
class IntegrateBatchGroup(pyblish.api.InstancePlugin):
"""Integrate published shot to batch group"""
order = pyblish.api.IntegratorOrder + 0.45
label = "Integrate Batch Groups"
hosts = ["flame"]
families = ["clip"]
settings_category = "flame"
# settings
default_loader = "LoadClip"
def process(self, instance):
add_tasks = instance.data["flameAddTasks"]
# iterate all tasks from settings
for task_data in add_tasks:
# exclude batch group
if not task_data["create_batch_group"]:
continue
# create or get already created batch group
bgroup = self._get_batch_group(instance, task_data)
# add batch group content
all_batch_nodes = self._add_nodes_to_batch_with_links(
instance, task_data, bgroup)
for name, node in all_batch_nodes.items():
self.log.debug("name: {}, dir: {}".format(
name, dir(node)
))
self.log.debug("__ node.attributes: {}".format(
node.attributes
))
# load plate to batch group
self.log.info("Loading product `{}` into batch `{}`".format(
instance.data["productName"], bgroup.name.get_value()
))
self._load_clip_to_context(instance, bgroup)
def _add_nodes_to_batch_with_links(self, instance, task_data, batch_group):
# get write file node properties > OrederDict because order does matter
write_pref_data = self._get_write_prefs(instance, task_data)
batch_nodes = [
{
"type": "comp",
"properties": {},
"id": "comp_node01"
},
{
"type": "Write File",
"properties": write_pref_data,
"id": "write_file_node01"
}
]
batch_links = [
{
"from_node": {
"id": "comp_node01",
"connector": "Result"
},
"to_node": {
"id": "write_file_node01",
"connector": "Front"
}
}
]
# add nodes into batch group
return opfapi.create_batch_group_conent(
batch_nodes, batch_links, batch_group)
def _load_clip_to_context(self, instance, bgroup):
# get all loaders for host
loaders_by_name = {
loader.__name__: loader
for loader in op_pipeline.discover_loader_plugins()
}
# get all published representations
published_representations = instance.data["published_representations"]
repres_db_id_by_name = {
repre_info["representation"]["name"]: repre_id
for repre_id, repre_info in published_representations.items()
}
# get all loadable representations
repres_by_name = {
repre["name"]: repre for repre in instance.data["representations"]
}
# get repre_id for the loadable representations
loader_name_by_repre_id = {
repres_db_id_by_name[repr_name]: {
"loader": repr_data["batch_group_loader_name"],
# add repre data for exception logging
"_repre_data": repr_data
}
for repr_name, repr_data in repres_by_name.items()
if repr_data.get("load_to_batch_group")
}
self.log.debug("__ loader_name_by_repre_id: {}".format(pformat(
loader_name_by_repre_id)))
# get representation context from the repre_id
repre_contexts = op_pipeline.load.get_repres_contexts(
loader_name_by_repre_id.keys())
self.log.debug("__ repre_contexts: {}".format(pformat(
repre_contexts)))
# loop all returned repres from repre_context dict
for repre_id, repre_context in repre_contexts.items():
self.log.debug("__ repre_id: {}".format(repre_id))
# get loader name by representation id
loader_name = (
loader_name_by_repre_id[repre_id]["loader"]
# if nothing was added to settings fallback to default
or self.default_loader
)
# get loader plugin
loader_plugin = loaders_by_name.get(loader_name)
if loader_plugin:
# load to flame by representation context
try:
op_pipeline.load.load_with_repre_context(
loader_plugin, repre_context, **{
"data": {
"workdir": self.task_workdir,
"batch": bgroup
}
})
except op_pipeline.load.IncompatibleLoaderError as msg:
self.log.error(
"Check allowed representations for Loader `{}` "
"in settings > error: {}".format(
loader_plugin.__name__, msg))
self.log.error(
"Representaton context >>{}<< is not compatible "
"with loader `{}`".format(
pformat(repre_context), loader_plugin.__name__
)
)
else:
self.log.warning(
"Something got wrong and there is not Loader found for "
"following data: {}".format(
pformat(loader_name_by_repre_id))
)
def _get_batch_group(self, instance, task_data):
frame_start = instance.data["frameStart"]
frame_end = instance.data["frameEnd"]
handle_start = instance.data["handleStart"]
handle_end = instance.data["handleEnd"]
frame_duration = (frame_end - frame_start) + 1
folder_path = instance.data["folderPath"]
task_name = task_data["name"]
batchgroup_name = "{}_{}".format(folder_path, task_name)
batch_data = {
"shematic_reels": [
"OP_LoadedReel"
],
"handleStart": handle_start,
"handleEnd": handle_end
}
self.log.debug(
"__ batch_data: {}".format(pformat(batch_data)))
# check if the batch group already exists
bgroup = opfapi.get_batch_group_from_desktop(batchgroup_name)
if not bgroup:
self.log.info(
"Creating new batch group: {}".format(batchgroup_name))
# create batch with utils
bgroup = opfapi.create_batch_group(
batchgroup_name,
frame_start,
frame_duration,
**batch_data
)
else:
self.log.info(
"Updating batch group: {}".format(batchgroup_name))
# update already created batch group
bgroup = opfapi.create_batch_group(
batchgroup_name,
frame_start,
frame_duration,
update_batch_group=bgroup,
**batch_data
)
return bgroup
def _get_anamoty_data_with_current_task(self, instance, task_data):
anatomy_data = copy.deepcopy(instance.data["anatomyData"])
task_name = task_data["name"]
task_type = task_data["type"]
anatomy_obj = instance.context.data["anatomy"]
# update task data in anatomy data
project_task_types = anatomy_obj["tasks"]
task_code = project_task_types.get(task_type, {}).get("shortName")
anatomy_data.update({
"task": {
"name": task_name,
"type": task_type,
"short": task_code
}
})
return anatomy_data
def _get_write_prefs(self, instance, task_data):
# update task in anatomy data
anatomy_data = self._get_anamoty_data_with_current_task(
instance, task_data)
self.task_workdir = self._get_shot_task_dir_path(
instance, task_data)
self.log.debug("__ task_workdir: {}".format(
self.task_workdir))
# TODO: this might be done with template in settings
render_dir_path = os.path.join(
self.task_workdir, "render", "flame")
if not os.path.exists(render_dir_path):
os.makedirs(render_dir_path, mode=0o777)
# TODO: add most of these to `imageio/flame/batch/write_node`
name = "{project[code]}_{folder[name]}_{task[name]}".format(
**anatomy_data
)
# The path attribute where the rendered clip is exported
# /path/to/file.[0001-0010].exr
media_path = render_dir_path
# name of file represented by tokens
media_path_pattern = (
"<name>_v<iteration###>/<name>_v<iteration###>.<frame><ext>")
# The Create Open Clip attribute of the Write File node. \
# Determines if an Open Clip is created by the Write File node.
create_clip = True
# The Include Setup attribute of the Write File node.
# Determines if a Batch Setup file is created by the Write File node.
include_setup = True
# The path attribute where the Open Clip file is exported by
# the Write File node.
create_clip_path = "<name>"
# The path attribute where the Batch setup file
# is exported by the Write File node.
include_setup_path = "./<name>_v<iteration###>"
# The file type for the files written by the Write File node.
# Setting this attribute also overwrites format_extension,
# bit_depth and compress_mode to match the defaults for
# this file type.
file_type = "OpenEXR"
# The file extension for the files written by the Write File node.
# This attribute resets to match file_type whenever file_type
# is set. If you require a specific extension, you must
# set format_extension after setting file_type.
format_extension = "exr"
# The bit depth for the files written by the Write File node.
# This attribute resets to match file_type whenever file_type is set.
bit_depth = "16"
# The compressing attribute for the files exported by the Write
# File node. Only relevant when file_type in 'OpenEXR', 'Sgi', 'Tiff'
compress = True
# The compression format attribute for the specific File Types
# export by the Write File node. You must set compress_mode
# after setting file_type.
compress_mode = "DWAB"
# The frame index mode attribute of the Write File node.
# Value range: `Use Timecode` or `Use Start Frame`
frame_index_mode = "Use Start Frame"
frame_padding = 6
# The versioning mode of the Open Clip exported by the Write File node.
# Only available if create_clip = True.
version_mode = "Follow Iteration"
version_name = "v<version>"
version_padding = 3
# need to make sure the order of keys is correct
return OrderedDict((
("name", name),
("media_path", media_path),
("media_path_pattern", media_path_pattern),
("create_clip", create_clip),
("include_setup", include_setup),
("create_clip_path", create_clip_path),
("include_setup_path", include_setup_path),
("file_type", file_type),
("format_extension", format_extension),
("bit_depth", bit_depth),
("compress", compress),
("compress_mode", compress_mode),
("frame_index_mode", frame_index_mode),
("frame_padding", frame_padding),
("version_mode", version_mode),
("version_name", version_name),
("version_padding", version_padding)
))
def _get_shot_task_dir_path(self, instance, task_data):
project_entity = instance.data["projectEntity"]
folder_entity = instance.data["folderEntity"]
task_entity = instance.data["taskEntity"]
anatomy = instance.context.data["anatomy"]
project_settings = instance.context.data["project_settings"]
return get_workdir(
project_entity,
folder_entity,
task_entity,
"flame",
anatomy=anatomy,
project_settings=project_settings
)

View file

@ -0,0 +1,58 @@
<?xml version="1.0"?>
<preset version="9">
<type>sequence</type>
<comment>Creates a 8-bit Jpeg file per segment. </comment>
<sequence>
<fileType>NONE</fileType>
<namePattern></namePattern>
<composition>&lt;name&gt;</composition>
<includeVideo>True</includeVideo>
<exportVideo>True</exportVideo>
<videoMedia>
<mediaFileType>image</mediaFileType>
<commit>FX</commit>
<flatten>NoChange</flatten>
<exportHandles>False</exportHandles>
<nbHandles>10</nbHandles>
</videoMedia>
<includeAudio>True</includeAudio>
<exportAudio>False</exportAudio>
<audioMedia>
<mediaFileType>audio</mediaFileType>
<commit>FX</commit>
<flatten>FlattenTracks</flatten>
<exportHandles>True</exportHandles>
<nbHandles>10</nbHandles>
</audioMedia>
</sequence>
<video>
<fileType>Jpeg</fileType>
<codec>923688</codec>
<codecProfile></codecProfile>
<namePattern>&lt;shot name&gt;</namePattern>
<compressionQuality>100</compressionQuality>
<transferCharacteristic>2</transferCharacteristic>
<colorimetricSpecification>4</colorimetricSpecification>
<includeAlpha>False</includeAlpha>
<overwriteWithVersions>False</overwriteWithVersions>
<posterFrame>True</posterFrame>
<useFrameAsPoster>1</useFrameAsPoster>
<resize>
<resizeType>fit</resizeType>
<resizeFilter>lanczos</resizeFilter>
<width>1920</width>
<height>1080</height>
<bitsPerChannel>8</bitsPerChannel>
<numChannels>3</numChannels>
<floatingPoint>False</floatingPoint>
<bigEndian>True</bigEndian>
<pixelRatio>1</pixelRatio>
<scanFormat>P</scanFormat>
</resize>
</video>
<name>
<framePadding>4</framePadding>
<startFrame>1</startFrame>
<frameIndex>2</frameIndex>
</name>
</preset>

View file

@ -0,0 +1,72 @@
<?xml version="1.0"?>
<preset version="10">
<type>sequence</type>
<comment>Create MOV H264 files per segment with thumbnail</comment>
<sequence>
<fileType>NONE</fileType>
<namePattern></namePattern>
<composition>&lt;name&gt;</composition>
<includeVideo>True</includeVideo>
<exportVideo>True</exportVideo>
<videoMedia>
<mediaFileType>movie</mediaFileType>
<commit>FX</commit>
<flatten>FlattenTracks</flatten>
<exportHandles>True</exportHandles>
<nbHandles>5</nbHandles>
</videoMedia>
<includeAudio>True</includeAudio>
<exportAudio>False</exportAudio>
<audioMedia>
<mediaFileType>audio</mediaFileType>
<commit>Original</commit>
<flatten>NoChange</flatten>
<exportHandles>True</exportHandles>
<nbHandles>5</nbHandles>
</audioMedia>
</sequence>
<movie>
<fileType>QuickTime</fileType>
<namePattern>&lt;shot name&gt;</namePattern>
<yuvHeadroom>0</yuvHeadroom>
<yuvColourSpace>PCS_709</yuvColourSpace>
<operationalPattern>None</operationalPattern>
<companyName>Autodesk</companyName>
<productName>Flame</productName>
<versionName>2021</versionName>
</movie>
<video>
<fileType>QuickTime</fileType>
<codec>33622016</codec>
<codecProfile>
<rootPath>/opt/Autodesk/mediaconverter/</rootPath>
<targetVersion>2021</targetVersion>
<pathSuffix>/profiles/.33622016/HDTV_720p_8Mbits.cdxprof</pathSuffix>
</codecProfile>
<namePattern>&lt;shot name&gt;_&lt;video codec&gt;</namePattern>
<compressionQuality>50</compressionQuality>
<transferCharacteristic>2</transferCharacteristic>
<colorimetricSpecification>4</colorimetricSpecification>
<includeAlpha>False</includeAlpha>
<overwriteWithVersions>False</overwriteWithVersions>
<posterFrame>False</posterFrame>
<useFrameAsPoster>1</useFrameAsPoster>
<resize>
<resizeType>fit</resizeType>
<resizeFilter>gaussian</resizeFilter>
<width>1920</width>
<height>1080</height>
<bitsPerChannel>8</bitsPerChannel>
<numChannels>3</numChannels>
<floatingPoint>False</floatingPoint>
<bigEndian>True</bigEndian>
<pixelRatio>1</pixelRatio>
<scanFormat>P</scanFormat>
</resize>
</video>
<name>
<framePadding>4</framePadding>
<startFrame>1</startFrame>
<frameIndex>2</frameIndex>
</name>
</preset>

View file

@ -0,0 +1,162 @@
import os
import io
import ConfigParser as CP
from xml.etree import ElementTree as ET
from contextlib import contextmanager
PLUGIN_DIR = os.path.dirname(os.path.dirname(__file__))
EXPORT_PRESETS_DIR = os.path.join(PLUGIN_DIR, "export_preset")
CONFIG_DIR = os.path.join(os.path.expanduser(
"~/.openpype"), "openpype_babypublisher")
@contextmanager
def make_temp_dir():
import tempfile
try:
dirpath = tempfile.mkdtemp()
yield dirpath
except IOError as _error:
raise IOError("Not able to create temp dir file: {}".format(_error))
finally:
pass
@contextmanager
def get_config(section=None):
cfg_file_path = os.path.join(CONFIG_DIR, "settings.ini")
# create config dir
if not os.path.exists(CONFIG_DIR):
print("making dirs at: `{}`".format(CONFIG_DIR))
os.makedirs(CONFIG_DIR, mode=0o777)
# write default data to settings.ini
if not os.path.exists(cfg_file_path):
default_cfg = cfg_default()
config = CP.RawConfigParser()
config.readfp(io.BytesIO(default_cfg))
with open(cfg_file_path, 'wb') as cfg_file:
config.write(cfg_file)
try:
config = CP.RawConfigParser()
config.read(cfg_file_path)
if section:
_cfg_data = {
k: v
for s in config.sections()
for k, v in config.items(s)
if s == section
}
else:
_cfg_data = {s: dict(config.items(s)) for s in config.sections()}
yield _cfg_data
except IOError as _error:
raise IOError('Not able to read settings.ini file: {}'.format(_error))
finally:
pass
def set_config(cfg_data, section=None):
cfg_file_path = os.path.join(CONFIG_DIR, "settings.ini")
config = CP.RawConfigParser()
config.read(cfg_file_path)
try:
if not section:
for section in cfg_data:
for key, value in cfg_data[section].items():
config.set(section, key, value)
else:
for key, value in cfg_data.items():
config.set(section, key, value)
with open(cfg_file_path, 'wb') as cfg_file:
config.write(cfg_file)
except IOError as _error:
raise IOError('Not able to write settings.ini file: {}'.format(_error))
def cfg_default():
return """
[main]
workfile_start_frame = 1001
shot_handles = 0
shot_name_template = {sequence}_{shot}
hierarchy_template = shots[Folder]/{sequence}[Sequence]
create_task_type = Compositing
"""
def configure_preset(file_path, data):
split_fp = os.path.splitext(file_path)
new_file_path = split_fp[0] + "_tmp" + split_fp[-1]
with open(file_path, "r") as datafile:
tree = ET.parse(datafile)
for key, value in data.items():
for element in tree.findall(".//{}".format(key)):
print(element)
element.text = str(value)
tree.write(new_file_path)
return new_file_path
def export_thumbnail(sequence, tempdir_path, data):
import flame
export_preset = os.path.join(
EXPORT_PRESETS_DIR,
"openpype_seg_thumbnails_jpg.xml"
)
new_path = configure_preset(export_preset, data)
poster_frame_exporter = flame.PyExporter()
poster_frame_exporter.foreground = True
poster_frame_exporter.export(sequence, new_path, tempdir_path)
def export_video(sequence, tempdir_path, data):
import flame
export_preset = os.path.join(
EXPORT_PRESETS_DIR,
"openpype_seg_video_h264.xml"
)
new_path = configure_preset(export_preset, data)
poster_frame_exporter = flame.PyExporter()
poster_frame_exporter.foreground = True
poster_frame_exporter.export(sequence, new_path, tempdir_path)
def timecode_to_frames(timecode, framerate):
def _seconds(value):
if isinstance(value, str):
_zip_ft = zip((3600, 60, 1, 1 / framerate), value.split(':'))
return sum(f * float(t) for f, t in _zip_ft)
elif isinstance(value, (int, float)):
return value / framerate
return 0
def _frames(seconds):
return seconds * framerate
def tc_to_frames(_timecode, start=None):
return _frames(_seconds(_timecode) - _seconds(start))
if '+' in timecode:
timecode = timecode.replace('+', ':')
elif '#' in timecode:
timecode = timecode.replace('#', ':')
frames = int(round(tc_to_frames(timecode, start='00:00:00:00')))
return frames

View file

@ -0,0 +1,459 @@
import os
import sys
import six
import re
import json
import app_utils
# Fill following constants or set them via environment variable
FTRACK_MODULE_PATH = None
FTRACK_API_KEY = None
FTRACK_API_USER = None
FTRACK_SERVER = None
def import_ftrack_api():
try:
import ftrack_api
return ftrack_api
except ImportError:
import sys
ftrk_m_p = FTRACK_MODULE_PATH or os.getenv("FTRACK_MODULE_PATH")
sys.path.append(ftrk_m_p)
import ftrack_api
return ftrack_api
def get_ftrack_session():
import os
ftrack_api = import_ftrack_api()
# fill your own credentials
url = FTRACK_SERVER or os.getenv("FTRACK_SERVER") or ""
user = FTRACK_API_USER or os.getenv("FTRACK_API_USER") or ""
api = FTRACK_API_KEY or os.getenv("FTRACK_API_KEY") or ""
first_validation = True
if not user:
print('- Ftrack Username is not set')
first_validation = False
if not api:
print('- Ftrack API key is not set')
first_validation = False
if not first_validation:
return False
try:
return ftrack_api.Session(
server_url=url,
api_user=user,
api_key=api
)
except Exception as _e:
print("Can't log into Ftrack with used credentials: {}".format(_e))
ftrack_cred = {
'Ftrack server': str(url),
'Username': str(user),
'API key': str(api),
}
item_lens = [len(key) + 1 for key in ftrack_cred]
justify_len = max(*item_lens)
for key, value in ftrack_cred.items():
print('{} {}'.format((key + ':').ljust(justify_len, ' '), value))
return False
def get_project_task_types(project_entity):
tasks = {}
proj_template = project_entity['project_schema']
temp_task_types = proj_template['_task_type_schema']['types']
for type in temp_task_types:
if type['name'] not in tasks:
tasks[type['name']] = type
return tasks
class FtrackComponentCreator:
default_location = "ftrack.server"
ftrack_locations = {}
thumbnails = []
videos = []
temp_dir = None
def __init__(self, session):
self.session = session
self._get_ftrack_location()
def generate_temp_data(self, selection, change_preset_data):
with app_utils.make_temp_dir() as tempdir_path:
for seq in selection:
app_utils.export_thumbnail(
seq, tempdir_path, change_preset_data)
app_utils.export_video(seq, tempdir_path, change_preset_data)
return tempdir_path
def collect_generated_data(self, tempdir_path):
temp_files = os.listdir(tempdir_path)
self.thumbnails = [f for f in temp_files if "jpg" in f]
self.videos = [f for f in temp_files if "mov" in f]
self.temp_dir = tempdir_path
def get_thumb_path(self, shot_name):
# get component files
thumb_f = next((f for f in self.thumbnails if shot_name in f), None)
return os.path.join(self.temp_dir, thumb_f)
def get_video_path(self, shot_name):
# get component files
video_f = next((f for f in self.videos if shot_name in f), None)
return os.path.join(self.temp_dir, video_f)
def close(self):
self.ftrack_locations = {}
self.session = None
def create_comonent(self, shot_entity, data, assetversion_entity=None):
self.shot_entity = shot_entity
location = self._get_ftrack_location()
file_path = data["file_path"]
# get extension
file = os.path.basename(file_path)
_n, ext = os.path.splitext(file)
name = "ftrackreview-mp4" if "mov" in ext else "thumbnail"
component_data = {
"name": name,
"file_path": file_path,
"file_type": ext,
"location": location
}
if name == "ftrackreview-mp4":
duration = data["duration"]
handles = data["handles"]
fps = data["fps"]
component_data["metadata"] = {
'ftr_meta': json.dumps({
'frameIn': int(0),
'frameOut': int(duration + (handles * 2)),
'frameRate': float(fps)
})
}
if not assetversion_entity:
# get assettype entity from session
assettype_entity = self._get_assettype({"short": "reference"})
# get or create asset entity from session
asset_entity = self._get_asset({
"name": "plateReference",
"type": assettype_entity,
"parent": self.shot_entity
})
# get or create assetversion entity from session
assetversion_entity = self._get_assetversion({
"version": 0,
"asset": asset_entity
})
# get or create component entity
self._set_component(component_data, {
"name": name,
"version": assetversion_entity,
})
return assetversion_entity
def _overwrite_members(self, entity, data):
origin_location = self._get_ftrack_location("ftrack.origin")
location = data.pop("location")
self._remove_component_from_location(entity, location)
entity["file_type"] = data["file_type"]
try:
origin_location.add_component(
entity, data["file_path"]
)
# Add components to location.
location.add_component(
entity, origin_location, recursive=True)
except Exception as __e:
print("Error: {}".format(__e))
self._remove_component_from_location(entity, origin_location)
origin_location.add_component(
entity, data["file_path"]
)
# Add components to location.
location.add_component(
entity, origin_location, recursive=True)
def _remove_component_from_location(self, entity, location):
print(location)
# Removing existing members from location
components = list(entity.get("members", []))
components += [entity]
for component in components:
for loc in component.get("component_locations", []):
if location["id"] == loc["location_id"]:
print("<< Removing component: {}".format(component))
location.remove_component(
component, recursive=False
)
# Deleting existing members on component entity
for member in entity.get("members", []):
self.session.delete(member)
print("<< Deleting member: {}".format(member))
del(member)
self._commit()
# Reset members in memory
if "members" in entity.keys():
entity["members"] = []
def _get_assettype(self, data):
return self.session.query(
self._query("AssetType", data)).first()
def _set_component(self, comp_data, base_data):
component_metadata = comp_data.pop("metadata", {})
component_entity = self.session.query(
self._query("Component", base_data)
).first()
if component_entity:
# overwrite existing members in component entity
# - get data for member from `ftrack.origin` location
self._overwrite_members(component_entity, comp_data)
# Adding metadata
existing_component_metadata = component_entity["metadata"]
existing_component_metadata.update(component_metadata)
component_entity["metadata"] = existing_component_metadata
return
assetversion_entity = base_data["version"]
location = comp_data.pop("location")
component_entity = assetversion_entity.create_component(
comp_data["file_path"],
data=comp_data,
location=location
)
# Adding metadata
existing_component_metadata = component_entity["metadata"]
existing_component_metadata.update(component_metadata)
component_entity["metadata"] = existing_component_metadata
if comp_data["name"] == "thumbnail":
self.shot_entity["thumbnail_id"] = component_entity["id"]
assetversion_entity["thumbnail_id"] = component_entity["id"]
self._commit()
def _get_asset(self, data):
# first find already created
asset_entity = self.session.query(
self._query("Asset", data)
).first()
if asset_entity:
return asset_entity
asset_entity = self.session.create("Asset", data)
# _commit if created
self._commit()
return asset_entity
def _get_assetversion(self, data):
assetversion_entity = self.session.query(
self._query("AssetVersion", data)
).first()
if assetversion_entity:
return assetversion_entity
assetversion_entity = self.session.create("AssetVersion", data)
# _commit if created
self._commit()
return assetversion_entity
def _commit(self):
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
# self.session.rollback()
# self.session._configure_locations()
six.reraise(tp, value, tb)
def _get_ftrack_location(self, name=None):
name = name or self.default_location
if name in self.ftrack_locations:
return self.ftrack_locations[name]
location = self.session.query(
'Location where name is "{}"'.format(name)
).one()
self.ftrack_locations[name] = location
return location
def _query(self, entitytype, data):
""" Generate a query expression from data supplied.
If a value is not a string, we'll add the id of the entity to the
query.
Args:
entitytype (str): The type of entity to query.
data (dict): The data to identify the entity.
exclusions (list): All keys to exclude from the query.
Returns:
str: String query to use with "session.query"
"""
queries = []
if sys.version_info[0] < 3:
for key, value in data.items():
if not isinstance(value, (str, int)):
print("value: {}".format(value))
if "id" in value.keys():
queries.append(
"{0}.id is \"{1}\"".format(key, value["id"])
)
else:
queries.append("{0} is \"{1}\"".format(key, value))
else:
for key, value in data.items():
if not isinstance(value, (str, int)):
print("value: {}".format(value))
if "id" in value.keys():
queries.append(
"{0}.id is \"{1}\"".format(key, value["id"])
)
else:
queries.append("{0} is \"{1}\"".format(key, value))
query = (
"select id from " + entitytype + " where " + " and ".join(queries)
)
print(query)
return query
class FtrackEntityOperator:
existing_tasks = []
def __init__(self, session, project_entity):
self.session = session
self.project_entity = project_entity
def commit(self):
try:
self.session.commit()
except Exception:
tp, value, tb = sys.exc_info()
self.session.rollback()
self.session._configure_locations()
six.reraise(tp, value, tb)
def create_ftrack_entity(self, session, type, name, parent=None):
parent = parent or self.project_entity
entity = session.create(type, {
'name': name,
'parent': parent
})
try:
session.commit()
except Exception:
tp, value, tb = sys.exc_info()
session.rollback()
session._configure_locations()
six.reraise(tp, value, tb)
return entity
def get_ftrack_entity(self, session, type, name, parent):
query = '{} where name is "{}" and project_id is "{}"'.format(
type, name, self.project_entity["id"])
entity = session.query(query).first()
# if entity doesn't exist then create one
if not entity:
entity = self.create_ftrack_entity(
session,
type,
name,
parent
)
return entity
def create_parents(self, template):
parents = []
t_split = template.split("/")
replace_patern = re.compile(r"(\[.*\])")
type_patern = re.compile(r"\[(.*)\]")
for t_s in t_split:
match_type = type_patern.findall(t_s)
if not match_type:
raise Exception((
"Missing correct type flag in : {}"
"/n Example: name[Type]").format(
t_s)
)
new_name = re.sub(replace_patern, "", t_s)
f_type = match_type.pop()
parents.append((new_name, f_type))
return parents
def create_task(self, task_type, task_types, parent):
_exising_tasks = [
child for child in parent['children']
if child.entity_type.lower() == 'task'
]
# add task into existing tasks if they are not already there
for _t in _exising_tasks:
if _t in self.existing_tasks:
continue
self.existing_tasks.append(_t)
existing_task = [
task for task in self.existing_tasks
if task['name'].lower() in task_type.lower()
if task['parent'] == parent
]
if existing_task:
return existing_task.pop()
task = self.session.create('Task', {
"name": task_type.lower(),
"parent": parent
})
task["type"] = task_types[task_type]
self.existing_tasks.append(task)
return task

View file

@ -0,0 +1,529 @@
from qtpy import QtWidgets, QtCore
import uiwidgets
import app_utils
import ftrack_lib
def clear_inner_modules():
import sys
if "ftrack_lib" in sys.modules.keys():
del sys.modules["ftrack_lib"]
print("Ftrack Lib module removed from sys.modules")
if "app_utils" in sys.modules.keys():
del sys.modules["app_utils"]
print("app_utils module removed from sys.modules")
if "uiwidgets" in sys.modules.keys():
del sys.modules["uiwidgets"]
print("uiwidgets module removed from sys.modules")
class MainWindow(QtWidgets.QWidget):
def __init__(self, klass, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.panel_class = klass
def closeEvent(self, event):
# clear all temp data
print("Removing temp data")
self.panel_class.clear_temp_data()
self.panel_class.close()
clear_inner_modules()
ftrack_lib.FtrackEntityOperator.existing_tasks = []
# now the panel can be closed
event.accept()
class FlameBabyPublisherPanel(object):
session = None
temp_data_dir = None
processed_components = []
project_entity = None
task_types = {}
all_task_types = {}
# TreeWidget
columns = {
"Sequence name": {
"columnWidth": 200,
"order": 0
},
"Shot name": {
"columnWidth": 200,
"order": 1
},
"Clip duration": {
"columnWidth": 100,
"order": 2
},
"Shot description": {
"columnWidth": 500,
"order": 3
},
"Task description": {
"columnWidth": 500,
"order": 4
},
}
def __init__(self, selection):
print(selection)
self.session = ftrack_lib.get_ftrack_session()
self.selection = selection
self.window = MainWindow(self)
# creating ui
self.window.setMinimumSize(1500, 600)
self.window.setWindowTitle('AYON: Baby-publisher')
self.window.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint)
self.window.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.window.setFocusPolicy(QtCore.Qt.StrongFocus)
self.window.setStyleSheet('background-color: #313131')
self._create_project_widget()
self._create_tree_widget()
self._set_sequence_params()
self._generate_widgets()
self._generate_layouts()
self._timeline_info()
self._fix_resolution()
self.window.show()
def _generate_widgets(self):
with app_utils.get_config("main") as cfg_data:
cfg_d = cfg_data
self._create_task_type_widget(cfg_d)
# input fields
self.shot_name_label = uiwidgets.FlameLabel(
'Shot name template', 'normal', self.window)
self.shot_name_template_input = uiwidgets.FlameLineEdit(
cfg_d["shot_name_template"], self.window)
self.hierarchy_label = uiwidgets.FlameLabel(
'Parents template', 'normal', self.window)
self.hierarchy_template_input = uiwidgets.FlameLineEdit(
cfg_d["hierarchy_template"], self.window)
self.start_frame_label = uiwidgets.FlameLabel(
'Workfile start frame', 'normal', self.window)
self.start_frame_input = uiwidgets.FlameLineEdit(
cfg_d["workfile_start_frame"], self.window)
self.handles_label = uiwidgets.FlameLabel(
'Shot handles', 'normal', self.window)
self.handles_input = uiwidgets.FlameLineEdit(
cfg_d["shot_handles"], self.window)
self.width_label = uiwidgets.FlameLabel(
'Sequence width', 'normal', self.window)
self.width_input = uiwidgets.FlameLineEdit(
str(self.seq_width), self.window)
self.height_label = uiwidgets.FlameLabel(
'Sequence height', 'normal', self.window)
self.height_input = uiwidgets.FlameLineEdit(
str(self.seq_height), self.window)
self.pixel_aspect_label = uiwidgets.FlameLabel(
'Pixel aspect ratio', 'normal', self.window)
self.pixel_aspect_input = uiwidgets.FlameLineEdit(
str(1.00), self.window)
self.fps_label = uiwidgets.FlameLabel(
'Frame rate', 'normal', self.window)
self.fps_input = uiwidgets.FlameLineEdit(
str(self.fps), self.window)
# Button
self.select_all_btn = uiwidgets.FlameButton(
'Select All', self.select_all, self.window)
self.remove_temp_data_btn = uiwidgets.FlameButton(
'Remove temp data', self.clear_temp_data, self.window)
self.ftrack_send_btn = uiwidgets.FlameButton(
'Send to Ftrack', self._send_to_ftrack, self.window)
def _generate_layouts(self):
# left props
v_shift = 0
prop_layout_l = QtWidgets.QGridLayout()
prop_layout_l.setHorizontalSpacing(30)
if self.project_selector_enabled:
prop_layout_l.addWidget(self.project_select_label, v_shift, 0)
prop_layout_l.addWidget(self.project_select_input, v_shift, 1)
v_shift += 1
prop_layout_l.addWidget(self.shot_name_label, (v_shift + 0), 0)
prop_layout_l.addWidget(
self.shot_name_template_input, (v_shift + 0), 1)
prop_layout_l.addWidget(self.hierarchy_label, (v_shift + 1), 0)
prop_layout_l.addWidget(
self.hierarchy_template_input, (v_shift + 1), 1)
prop_layout_l.addWidget(self.start_frame_label, (v_shift + 2), 0)
prop_layout_l.addWidget(self.start_frame_input, (v_shift + 2), 1)
prop_layout_l.addWidget(self.handles_label, (v_shift + 3), 0)
prop_layout_l.addWidget(self.handles_input, (v_shift + 3), 1)
prop_layout_l.addWidget(self.task_type_label, (v_shift + 4), 0)
prop_layout_l.addWidget(
self.task_type_input, (v_shift + 4), 1)
# right props
prop_widget_r = QtWidgets.QWidget(self.window)
prop_layout_r = QtWidgets.QGridLayout(prop_widget_r)
prop_layout_r.setHorizontalSpacing(30)
prop_layout_r.setAlignment(
QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)
prop_layout_r.setContentsMargins(0, 0, 0, 0)
prop_layout_r.addWidget(self.width_label, 1, 0)
prop_layout_r.addWidget(self.width_input, 1, 1)
prop_layout_r.addWidget(self.height_label, 2, 0)
prop_layout_r.addWidget(self.height_input, 2, 1)
prop_layout_r.addWidget(self.pixel_aspect_label, 3, 0)
prop_layout_r.addWidget(self.pixel_aspect_input, 3, 1)
prop_layout_r.addWidget(self.fps_label, 4, 0)
prop_layout_r.addWidget(self.fps_input, 4, 1)
# prop layout
prop_main_layout = QtWidgets.QHBoxLayout()
prop_main_layout.addLayout(prop_layout_l, 1)
prop_main_layout.addSpacing(20)
prop_main_layout.addWidget(prop_widget_r, 1)
# buttons layout
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(self.remove_temp_data_btn)
hbox.addWidget(self.select_all_btn)
hbox.addWidget(self.ftrack_send_btn)
# put all layouts together
main_frame = QtWidgets.QVBoxLayout(self.window)
main_frame.setMargin(20)
main_frame.addLayout(prop_main_layout)
main_frame.addWidget(self.tree)
main_frame.addLayout(hbox)
def _set_sequence_params(self):
for select in self.selection:
self.seq_height = select.height
self.seq_width = select.width
self.fps = float(str(select.frame_rate)[:-4])
break
def _create_task_type_widget(self, cfg_d):
print(self.project_entity)
self.task_types = ftrack_lib.get_project_task_types(
self.project_entity)
self.task_type_label = uiwidgets.FlameLabel(
'Create Task (type)', 'normal', self.window)
self.task_type_input = uiwidgets.FlamePushButtonMenu(
cfg_d["create_task_type"], self.task_types.keys(), self.window)
def _create_project_widget(self):
import flame
# get project name from flame current project
self.project_name = flame.project.current_project.name
# get project from ftrack -
# ftrack project name has to be the same as flame project!
query = 'Project where full_name is "{}"'.format(self.project_name)
# globally used variables
self.project_entity = self.session.query(query).first()
self.project_selector_enabled = bool(not self.project_entity)
if self.project_selector_enabled:
self.all_projects = self.session.query(
"Project where status is active").all()
self.project_entity = self.all_projects[0]
project_names = [p["full_name"] for p in self.all_projects]
self.all_task_types = {
p["full_name"]: ftrack_lib.get_project_task_types(p).keys()
for p in self.all_projects
}
self.project_select_label = uiwidgets.FlameLabel(
'Select Ftrack project', 'normal', self.window)
self.project_select_input = uiwidgets.FlamePushButtonMenu(
self.project_entity["full_name"], project_names, self.window)
self.project_select_input.selection_changed.connect(
self._on_project_changed)
def _create_tree_widget(self):
ordered_column_labels = self.columns.keys()
for _name, _value in self.columns.items():
ordered_column_labels.pop(_value["order"])
ordered_column_labels.insert(_value["order"], _name)
self.tree = uiwidgets.FlameTreeWidget(
ordered_column_labels, self.window)
# Allow multiple items in tree to be selected
self.tree.setSelectionMode(QtWidgets.QAbstractItemView.MultiSelection)
# Set tree column width
for _name, _val in self.columns.items():
self.tree.setColumnWidth(
_val["order"],
_val["columnWidth"]
)
# Prevent weird characters when shrinking tree columns
self.tree.setTextElideMode(QtCore.Qt.ElideNone)
def _resolve_project_entity(self):
if self.project_selector_enabled:
selected_project_name = self.project_select_input.text()
self.project_entity = next(
(p for p in self.all_projects
if p["full_name"] in selected_project_name),
None
)
def _save_ui_state_to_cfg(self):
_cfg_data_back = {
"shot_name_template": self.shot_name_template_input.text(),
"workfile_start_frame": self.start_frame_input.text(),
"shot_handles": self.handles_input.text(),
"hierarchy_template": self.hierarchy_template_input.text(),
"create_task_type": self.task_type_input.text()
}
# add cfg data back to settings.ini
app_utils.set_config(_cfg_data_back, "main")
def _send_to_ftrack(self):
# resolve active project and add it to self.project_entity
self._resolve_project_entity()
self._save_ui_state_to_cfg()
# get handles from gui input
handles = self.handles_input.text()
# get frame start from gui input
frame_start = int(self.start_frame_input.text())
# get task type from gui input
task_type = self.task_type_input.text()
# get resolution from gui inputs
fps = self.fps_input.text()
entity_operator = ftrack_lib.FtrackEntityOperator(
self.session, self.project_entity)
component_creator = ftrack_lib.FtrackComponentCreator(self.session)
if not self.temp_data_dir:
self.window.hide()
self.temp_data_dir = component_creator.generate_temp_data(
self.selection,
{
"nbHandles": handles
}
)
self.window.show()
# collect generated files to list data for farther use
component_creator.collect_generated_data(self.temp_data_dir)
# Get all selected items from treewidget
for item in self.tree.selectedItems():
# frame ranges
frame_duration = int(item.text(2))
frame_end = frame_start + frame_duration
# description
shot_description = item.text(3)
task_description = item.text(4)
# other
sequence_name = item.text(0)
shot_name = item.text(1)
thumb_fp = component_creator.get_thumb_path(shot_name)
video_fp = component_creator.get_video_path(shot_name)
print("processed comps: {}".format(self.processed_components))
print("processed thumb_fp: {}".format(thumb_fp))
processed = False
if thumb_fp not in self.processed_components:
self.processed_components.append(thumb_fp)
else:
processed = True
print("processed: {}".format(processed))
# populate full shot info
shot_attributes = {
"sequence": sequence_name,
"shot": shot_name,
"task": task_type
}
# format shot name template
_shot_name = self.shot_name_template_input.text().format(
**shot_attributes)
# format hierarchy template
_hierarchy_text = self.hierarchy_template_input.text().format(
**shot_attributes)
print(_hierarchy_text)
# solve parents
parents = entity_operator.create_parents(_hierarchy_text)
print(parents)
# obtain shot parents entities
_parent = None
for _name, _type in parents:
p_entity = entity_operator.get_ftrack_entity(
self.session,
_type,
_name,
_parent
)
print(p_entity)
_parent = p_entity
# obtain shot ftrack entity
f_s_entity = entity_operator.get_ftrack_entity(
self.session,
"Shot",
_shot_name,
_parent
)
print("Shot entity is: {}".format(f_s_entity))
if not processed:
# first create thumbnail and get version entity
assetversion_entity = component_creator.create_comonent(
f_s_entity, {
"file_path": thumb_fp
}
)
# secondly add video to version entity
component_creator.create_comonent(
f_s_entity, {
"file_path": video_fp,
"duration": frame_duration,
"handles": int(handles),
"fps": float(fps)
}, assetversion_entity
)
# create custom attributtes
custom_attrs = {
"frameStart": frame_start,
"frameEnd": frame_end,
"handleStart": int(handles),
"handleEnd": int(handles),
"resolutionWidth": int(self.width_input.text()),
"resolutionHeight": int(self.height_input.text()),
"pixelAspect": float(self.pixel_aspect_input.text()),
"fps": float(fps)
}
# update custom attributes on shot entity
for key in custom_attrs:
f_s_entity['custom_attributes'][key] = custom_attrs[key]
task_entity = entity_operator.create_task(
task_type, self.task_types, f_s_entity)
# Create notes.
user = self.session.query(
"User where username is \"{}\"".format(self.session.api_user)
).first()
f_s_entity.create_note(shot_description, author=user)
if task_description:
task_entity.create_note(task_description, user)
entity_operator.commit()
component_creator.close()
def _fix_resolution(self):
# Center window in linux
resolution = QtWidgets.QDesktopWidget().screenGeometry()
self.window.move(
(resolution.width() / 2) - (self.window.frameSize().width() / 2),
(resolution.height() / 2) - (self.window.frameSize().height() / 2))
def _on_project_changed(self):
task_types = self.all_task_types[self.project_name]
self.task_type_input.set_menu_options(task_types)
def _timeline_info(self):
# identificar as informacoes dos segmentos na timeline
for sequence in self.selection:
frame_rate = float(str(sequence.frame_rate)[:-4])
for ver in sequence.versions:
for track in ver.tracks:
if len(track.segments) == 0 and track.hidden:
continue
for segment in track.segments:
print(segment.attributes)
if segment.name.get_value() == "":
continue
if segment.hidden.get_value() is True:
continue
# get clip frame duration
record_duration = str(segment.record_duration)[1:-1]
clip_duration = app_utils.timecode_to_frames(
record_duration, frame_rate)
# populate shot source metadata
shot_description = ""
for attr in ["tape_name", "source_name", "head",
"tail", "file_path"]:
if not hasattr(segment, attr):
continue
_value = getattr(segment, attr)
_label = attr.replace("_", " ").capitalize()
row = "{}: {}\n".format(_label, _value)
shot_description += row
# Add timeline segment to tree
QtWidgets.QTreeWidgetItem(self.tree, [
sequence.name.get_value(), # seq name
segment.shot_name.get_value(), # shot name
str(clip_duration), # clip duration
shot_description, # shot description
segment.comment.get_value() # task description
]).setFlags(
QtCore.Qt.ItemIsEditable
| QtCore.Qt.ItemIsEnabled
| QtCore.Qt.ItemIsSelectable
)
# Select top item in tree
self.tree.setCurrentItem(self.tree.topLevelItem(0))
def select_all(self, ):
self.tree.selectAll()
def clear_temp_data(self):
import shutil
self.processed_components = []
if self.temp_data_dir:
shutil.rmtree(self.temp_data_dir)
self.temp_data_dir = None
print("All Temp data were destroyed ...")
def close(self):
self._save_ui_state_to_cfg()
self.session.close()

View file

@ -0,0 +1,212 @@
from qtpy import QtWidgets, QtCore
class FlameLabel(QtWidgets.QLabel):
"""
Custom Qt Flame Label Widget
For different label looks set label_type as:
'normal', 'background', or 'outline'
To use:
label = FlameLabel('Label Name', 'normal', window)
"""
def __init__(self, label_name, label_type, parent_window, *args, **kwargs):
super(FlameLabel, self).__init__(*args, **kwargs)
self.setText(label_name)
self.setParent(parent_window)
self.setMinimumSize(130, 28)
self.setMaximumHeight(28)
self.setFocusPolicy(QtCore.Qt.NoFocus)
# Set label stylesheet based on label_type
if label_type == 'normal':
self.setStyleSheet(
'QLabel {color: #9a9a9a; border-bottom: 1px inset #282828; font: 14px "Discreet"}' # noqa
'QLabel:disabled {color: #6a6a6a}'
)
elif label_type == 'background':
self.setAlignment(QtCore.Qt.AlignCenter)
self.setStyleSheet(
'color: #9a9a9a; background-color: #393939; font: 14px "Discreet"' # noqa
)
elif label_type == 'outline':
self.setAlignment(QtCore.Qt.AlignCenter)
self.setStyleSheet(
'color: #9a9a9a; background-color: #212121; border: 1px solid #404040; font: 14px "Discreet"' # noqa
)
class FlameLineEdit(QtWidgets.QLineEdit):
"""
Custom Qt Flame Line Edit Widget
Main window should include this:
window.setFocusPolicy(QtCore.Qt.StrongFocus)
To use:
line_edit = FlameLineEdit('Some text here', window)
"""
def __init__(self, text, parent_window, *args, **kwargs):
super(FlameLineEdit, self).__init__(*args, **kwargs)
self.setText(text)
self.setParent(parent_window)
self.setMinimumHeight(28)
self.setMinimumWidth(110)
self.setStyleSheet(
'QLineEdit {color: #9a9a9a; background-color: #373e47; selection-color: #262626; selection-background-color: #b8b1a7; font: 14px "Discreet"}' # noqa
'QLineEdit:focus {background-color: #474e58}' # noqa
'QLineEdit:disabled {color: #6a6a6a; background-color: #373737}'
)
class FlameTreeWidget(QtWidgets.QTreeWidget):
"""
Custom Qt Flame Tree Widget
To use:
tree_headers = ['Header1', 'Header2', 'Header3', 'Header4']
tree = FlameTreeWidget(tree_headers, window)
"""
def __init__(self, tree_headers, parent_window, *args, **kwargs):
super(FlameTreeWidget, self).__init__(*args, **kwargs)
self.setMinimumWidth(1000)
self.setMinimumHeight(300)
self.setSortingEnabled(True)
self.sortByColumn(0, QtCore.Qt.AscendingOrder)
self.setAlternatingRowColors(True)
self.setFocusPolicy(QtCore.Qt.NoFocus)
self.setStyleSheet(
'QTreeWidget {color: #9a9a9a; background-color: #2a2a2a; alternate-background-color: #2d2d2d; font: 14px "Discreet"}' # noqa
'QTreeWidget::item:selected {color: #d9d9d9; background-color: #474747; border: 1px solid #111111}' # noqa
'QHeaderView {color: #9a9a9a; background-color: #393939; font: 14px "Discreet"}' # noqa
'QTreeWidget::item:selected {selection-background-color: #111111}'
'QMenu {color: #9a9a9a; background-color: #24303d; font: 14px "Discreet"}' # noqa
'QMenu::item:selected {color: #d9d9d9; background-color: #3a4551}'
)
self.verticalScrollBar().setStyleSheet('color: #818181')
self.horizontalScrollBar().setStyleSheet('color: #818181')
self.setHeaderLabels(tree_headers)
class FlameButton(QtWidgets.QPushButton):
"""
Custom Qt Flame Button Widget
To use:
button = FlameButton('Button Name', do_this_when_pressed, window)
"""
def __init__(self, button_name, do_when_pressed, parent_window,
*args, **kwargs):
super(FlameButton, self).__init__(*args, **kwargs)
self.setText(button_name)
self.setParent(parent_window)
self.setMinimumSize(QtCore.QSize(110, 28))
self.setMaximumSize(QtCore.QSize(110, 28))
self.setFocusPolicy(QtCore.Qt.NoFocus)
self.clicked.connect(do_when_pressed)
self.setStyleSheet(
'QPushButton {color: #9a9a9a; background-color: #424142; border-top: 1px inset #555555; border-bottom: 1px inset black; font: 14px "Discreet"}' # noqa
'QPushButton:pressed {color: #d9d9d9; background-color: #4f4f4f; border-top: 1px inset #666666; font: italic}' # noqa
'QPushButton:disabled {color: #747474; background-color: #353535; border-top: 1px solid #444444; border-bottom: 1px solid #242424}' # noqa
)
class FlamePushButton(QtWidgets.QPushButton):
"""
Custom Qt Flame Push Button Widget
To use:
pushbutton = FlamePushButton(' Button Name', True_or_False, window)
"""
def __init__(self, button_name, button_checked, parent_window,
*args, **kwargs):
super(FlamePushButton, self).__init__(*args, **kwargs)
self.setText(button_name)
self.setParent(parent_window)
self.setCheckable(True)
self.setChecked(button_checked)
self.setMinimumSize(155, 28)
self.setMaximumSize(155, 28)
self.setFocusPolicy(QtCore.Qt.NoFocus)
self.setStyleSheet(
'QPushButton {color: #9a9a9a; background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: .93 #424142, stop: .94 #2e3b48); text-align: left; border-top: 1px inset #555555; border-bottom: 1px inset black; font: 14px "Discreet"}' # noqa
'QPushButton:checked {color: #d9d9d9; background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: .93 #4f4f4f, stop: .94 #5a7fb4); font: italic; border: 1px inset black; border-bottom: 1px inset #404040; border-right: 1px inset #404040}' # noqa
'QPushButton:disabled {color: #6a6a6a; background-color: qlineargradient(x1: 0, y1: 0, x2: 1, y2: 0, stop: .93 #383838, stop: .94 #353535); font: light; border-top: 1px solid #575757; border-bottom: 1px solid #242424; border-right: 1px solid #353535; border-left: 1px solid #353535}' # noqa
'QToolTip {color: black; background-color: #ffffde; border: black solid 1px}' # noqa
)
class FlamePushButtonMenu(QtWidgets.QPushButton):
"""
Custom Qt Flame Menu Push Button Widget
To use:
push_button_menu_options = ['Item 1', 'Item 2', 'Item 3', 'Item 4']
menu_push_button = FlamePushButtonMenu('push_button_name',
push_button_menu_options, window)
or
push_button_menu_options = ['Item 1', 'Item 2', 'Item 3', 'Item 4']
menu_push_button = FlamePushButtonMenu(push_button_menu_options[0],
push_button_menu_options, window)
"""
selection_changed = QtCore.Signal(str)
def __init__(self, button_name, menu_options, parent_window,
*args, **kwargs):
super(FlamePushButtonMenu, self).__init__(*args, **kwargs)
self.setParent(parent_window)
self.setMinimumHeight(28)
self.setMinimumWidth(110)
self.setFocusPolicy(QtCore.Qt.NoFocus)
self.setStyleSheet(
'QPushButton {color: #9a9a9a; background-color: #24303d; font: 14px "Discreet"}' # noqa
'QPushButton:disabled {color: #747474; background-color: #353535; border-top: 1px solid #444444; border-bottom: 1px solid #242424}' # noqa
)
pushbutton_menu = QtWidgets.QMenu(parent_window)
pushbutton_menu.setFocusPolicy(QtCore.Qt.NoFocus)
pushbutton_menu.setStyleSheet(
'QMenu {color: #9a9a9a; background-color:#24303d; font: 14px "Discreet"}' # noqa
'QMenu::item:selected {color: #d9d9d9; background-color: #3a4551}'
)
self._pushbutton_menu = pushbutton_menu
self.setMenu(pushbutton_menu)
self.set_menu_options(menu_options, button_name)
def set_menu_options(self, menu_options, current_option=None):
self._pushbutton_menu.clear()
current_option = current_option or menu_options[0]
for option in menu_options:
action = self._pushbutton_menu.addAction(option)
action.triggered.connect(self._on_action_trigger)
if current_option is not None:
self.setText(current_option)
def _on_action_trigger(self):
action = self.sender()
self.setText(action.text())
self.selection_changed.emit(action.text())

View file

@ -0,0 +1,43 @@
from __future__ import print_function
import os
import sys
# only testing dependency for nested modules in package
import six # noqa
SCRIPT_DIR = os.path.dirname(__file__)
PACKAGE_DIR = os.path.join(SCRIPT_DIR, "modules")
sys.path.append(PACKAGE_DIR)
def flame_panel_executor(selection):
if "panel_app" in sys.modules.keys():
print("panel_app module is already loaded")
del sys.modules["panel_app"]
import panel_app
reload(panel_app) # noqa
print("panel_app module removed from sys.modules")
panel_app.FlameBabyPublisherPanel(selection)
def scope_sequence(selection):
import flame
return any(isinstance(item, flame.PySequence) for item in selection)
def get_media_panel_custom_ui_actions():
return [
{
"name": "AYON: Baby-publisher",
"actions": [
{
"name": "Create Shots",
"isVisible": scope_sequence,
"execute": flame_panel_executor
}
]
}
]

View file

@ -0,0 +1,219 @@
from __future__ import print_function
import sys
from qtpy import QtWidgets
from pprint import pformat
import atexit
import ayon_flame.api as opfapi
from ayon_core.pipeline import (
install_host,
registered_host,
)
def openpype_install():
"""Registering AYON in context
"""
install_host(opfapi)
print("Registered host: {}".format(registered_host()))
# Exception handler
def exeption_handler(exctype, value, _traceback):
"""Exception handler for improving UX
Args:
exctype (str): type of exception
value (str): exception value
tb (str): traceback to show
"""
import traceback
msg = "AYON: Python exception {} in {}".format(value, exctype)
mbox = QtWidgets.QMessageBox()
mbox.setText(msg)
mbox.setDetailedText(
pformat(traceback.format_exception(exctype, value, _traceback)))
mbox.setStyleSheet('QLabel{min-width: 800px;}')
mbox.exec_()
sys.__excepthook__(exctype, value, _traceback)
# add exception handler into sys module
sys.excepthook = exeption_handler
# register clean up logic to be called at Flame exit
def cleanup():
"""Cleaning up Flame framework context
"""
if opfapi.CTX.flame_apps:
print('`{}` cleaning up flame_apps:\n {}\n'.format(
__file__, pformat(opfapi.CTX.flame_apps)))
while len(opfapi.CTX.flame_apps):
app = opfapi.CTX.flame_apps.pop()
print('`{}` removing : {}'.format(__file__, app.name))
del app
opfapi.CTX.flame_apps = []
if opfapi.CTX.app_framework:
print('openpype\t: {} cleaning up'.format(
opfapi.CTX.app_framework.bundle_name)
)
opfapi.CTX.app_framework.save_prefs()
opfapi.CTX.app_framework = None
atexit.register(cleanup)
def load_apps():
"""Load available flame_apps into Flame framework
"""
opfapi.CTX.flame_apps.append(
opfapi.FlameMenuProjectConnect(opfapi.CTX.app_framework))
opfapi.CTX.flame_apps.append(
opfapi.FlameMenuTimeline(opfapi.CTX.app_framework))
opfapi.CTX.flame_apps.append(
opfapi.FlameMenuUniversal(opfapi.CTX.app_framework))
opfapi.CTX.app_framework.log.info("Apps are loaded")
def project_changed_dict(info):
"""Hook for project change action
Args:
info (str): info text
"""
cleanup()
def app_initialized(parent=None):
"""Inicialization of Framework
Args:
parent (obj, optional): Parent object. Defaults to None.
"""
opfapi.CTX.app_framework = opfapi.FlameAppFramework()
print("{} initializing".format(
opfapi.CTX.app_framework.bundle_name))
load_apps()
"""
Initialisation of the hook is starting from here
First it needs to test if it can import the flame module.
This will happen only in case a project has been loaded.
Then `app_initialized` will load main Framework which will load
all menu objects as flame_apps.
"""
try:
import flame # noqa
app_initialized(parent=None)
except ImportError:
print("!!!! not able to import flame module !!!!")
def rescan_hooks():
import flame # noqa
flame.execute_shortcut('Rescan Python Hooks')
def _build_app_menu(app_name):
"""Flame menu object generator
Args:
app_name (str): name of menu object app
Returns:
list: menu object
"""
menu = []
# first find the relative appname
app = None
for _app in opfapi.CTX.flame_apps:
if _app.__class__.__name__ == app_name:
app = _app
if app:
menu.append(app.build_menu())
if opfapi.CTX.app_framework:
menu_auto_refresh = opfapi.CTX.app_framework.prefs_global.get(
'menu_auto_refresh', {})
if menu_auto_refresh.get('timeline_menu', True):
try:
import flame # noqa
flame.schedule_idle_event(rescan_hooks)
except ImportError:
print("!-!!! not able to import flame module !!!!")
return menu
""" Flame hooks are starting here
"""
def project_saved(project_name, save_time, is_auto_save):
"""Hook to activate when project is saved
Args:
project_name (str): name of project
save_time (str): time when it was saved
is_auto_save (bool): autosave is on or off
"""
if opfapi.CTX.app_framework:
opfapi.CTX.app_framework.save_prefs()
def get_main_menu_custom_ui_actions():
"""Hook to create submenu in start menu
Returns:
list: menu object
"""
# install openpype and the host
openpype_install()
return _build_app_menu("FlameMenuProjectConnect")
def get_timeline_custom_ui_actions():
"""Hook to create submenu in timeline
Returns:
list: menu object
"""
# install openpype and the host
openpype_install()
return _build_app_menu("FlameMenuTimeline")
def get_batch_custom_ui_actions():
"""Hook to create submenu in batch
Returns:
list: menu object
"""
# install openpype and the host
openpype_install()
return _build_app_menu("FlameMenuUniversal")
def get_media_panel_custom_ui_actions():
"""Hook to create submenu in desktop
Returns:
list: menu object
"""
# install openpype and the host
openpype_install()
return _build_app_menu("FlameMenuUniversal")

View file

@ -0,0 +1,3 @@
# -*- coding: utf-8 -*-
"""Package declaring AYON addon 'flame' version."""
__version__ = "0.2.0"

View file

@ -1,3 +1,10 @@
name = "flame"
title = "Flame"
version = "0.1.0"
version = "0.2.0"
client_dir = "ayon_flame"
ayon_required_addons = {
"core": ">0.3.2",
}
ayon_compatible_addons = {}

View file

@ -0,0 +1,17 @@
from .version import __version__
from .addon import (
get_fusion_version,
FusionAddon,
FUSION_ADDON_ROOT,
FUSION_VERSIONS_DICT,
)
__all__ = (
"__version__",
"get_fusion_version",
"FusionAddon",
"FUSION_ADDON_ROOT",
"FUSION_VERSIONS_DICT",
)

View file

@ -0,0 +1,72 @@
import os
import re
from ayon_core.addon import AYONAddon, IHostAddon
from ayon_core.lib import Logger
from .version import __version__
FUSION_ADDON_ROOT = os.path.dirname(os.path.abspath(__file__))
# FUSION_VERSIONS_DICT is used by the pre-launch hooks
# The keys correspond to all currently supported Fusion versions
# Each value is a list of corresponding Python home variables and a profile
# number, which is used by the profile hook to set Fusion profile variables.
FUSION_VERSIONS_DICT = {
9: ("FUSION_PYTHON36_HOME", 9),
16: ("FUSION16_PYTHON36_HOME", 16),
17: ("FUSION16_PYTHON36_HOME", 16),
18: ("FUSION_PYTHON3_HOME", 16),
}
def get_fusion_version(app_name):
"""
The function is triggered by the prelaunch hooks to get the fusion version.
`app_name` is obtained by prelaunch hooks from the
`launch_context.env.get("AYON_APP_NAME")`.
To get a correct Fusion version, a version number should be present
in the `applications/fusion/variants` key
of the Blackmagic Fusion Application Settings.
"""
log = Logger.get_logger(__name__)
if not app_name:
return
app_version_candidates = re.findall(r"\d+", app_name)
if not app_version_candidates:
return
for app_version in app_version_candidates:
if int(app_version) in FUSION_VERSIONS_DICT:
return int(app_version)
else:
log.info(
"Unsupported Fusion version: {app_version}".format(
app_version=app_version
)
)
class FusionAddon(AYONAddon, IHostAddon):
name = "fusion"
version = __version__
host_name = "fusion"
def get_launch_hook_paths(self, app):
if app.host_name != self.host_name:
return []
return [os.path.join(FUSION_ADDON_ROOT, "hooks")]
def add_implementation_envs(self, env, app):
# Set default values if are not already set via settings
defaults = {"AYON_LOG_NO_COLORS": "1"}
for key, value in defaults.items():
if not env.get(key):
env[key] = value
def get_workfile_extensions(self):
return [".comp"]

View file

@ -0,0 +1,39 @@
from .pipeline import (
FusionHost,
ls,
imprint_container,
parse_container
)
from .lib import (
maintained_selection,
update_frame_range,
set_current_context_framerange,
get_current_comp,
get_bmd_library,
comp_lock_and_undo_chunk
)
from .menu import launch_ayon_menu
__all__ = [
# pipeline
"FusionHost",
"ls",
"imprint_container",
"parse_container",
# lib
"maintained_selection",
"update_frame_range",
"set_current_context_framerange",
"get_current_comp",
"get_bmd_library",
"comp_lock_and_undo_chunk",
# menu
"launch_ayon_menu",
]

View file

@ -0,0 +1,112 @@
import pyblish.api
from ayon_fusion.api.lib import get_current_comp
from ayon_core.pipeline.publish import get_errored_instances_from_context
class SelectInvalidAction(pyblish.api.Action):
"""Select invalid nodes in Fusion when plug-in failed.
To retrieve the invalid nodes this assumes a static `get_invalid()`
method is available on the plugin.
"""
label = "Select invalid"
on = "failed" # This action is only available on a failed plug-in
icon = "search" # Icon from Awesome Icon
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(
context,
plugin=plugin,
)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
invalid = list()
for instance in errored_instances:
invalid_nodes = plugin.get_invalid(instance)
if invalid_nodes:
if isinstance(invalid_nodes, (list, tuple)):
invalid.extend(invalid_nodes)
else:
self.log.warning(
"Plug-in returned to be invalid, "
"but has no selectable nodes."
)
if not invalid:
# Assume relevant comp is current comp and clear selection
self.log.info("No invalid tools found.")
comp = get_current_comp()
flow = comp.CurrentFrame.FlowView
flow.Select() # No args equals clearing selection
return
# Assume a single comp
first_tool = invalid[0]
comp = first_tool.Comp()
flow = comp.CurrentFrame.FlowView
flow.Select() # No args equals clearing selection
names = set()
for tool in invalid:
flow.Select(tool, True)
comp.SetActiveTool(tool)
names.add(tool.Name)
self.log.info(
"Selecting invalid tools: %s" % ", ".join(sorted(names))
)
class SelectToolAction(pyblish.api.Action):
"""Select invalid output tool in Fusion when plug-in failed.
"""
label = "Select saver"
on = "failed" # This action is only available on a failed plug-in
icon = "search" # Icon from Awesome Icon
def process(self, context, plugin):
errored_instances = get_errored_instances_from_context(
context,
plugin=plugin,
)
# Get the invalid nodes for the plug-ins
self.log.info("Finding invalid nodes..")
tools = []
for instance in errored_instances:
tool = instance.data.get("tool")
if tool is not None:
tools.append(tool)
else:
self.log.warning(
"Plug-in returned to be invalid, "
f"but has no saver for instance {instance.name}."
)
if not tools:
# Assume relevant comp is current comp and clear selection
self.log.info("No invalid tools found.")
comp = get_current_comp()
flow = comp.CurrentFrame.FlowView
flow.Select() # No args equals clearing selection
return
# Assume a single comp
first_tool = tools[0]
comp = first_tool.Comp()
flow = comp.CurrentFrame.FlowView
flow.Select() # No args equals clearing selection
names = set()
for tool in tools:
flow.Select(tool, True)
comp.SetActiveTool(tool)
names.add(tool.Name)
self.log.info(
"Selecting invalid tools: %s" % ", ".join(sorted(names))
)

View file

@ -0,0 +1,402 @@
import os
import sys
import re
import contextlib
from ayon_core.lib import Logger, BoolDef, UILabelDef
from ayon_core.style import load_stylesheet
from ayon_core.pipeline import registered_host
from ayon_core.pipeline.create import CreateContext
from ayon_core.pipeline.context_tools import get_current_folder_entity
self = sys.modules[__name__]
self._project = None
def update_frame_range(start, end, comp=None, set_render_range=True,
handle_start=0, handle_end=0):
"""Set Fusion comp's start and end frame range
Args:
start (float, int): start frame
end (float, int): end frame
comp (object, Optional): comp object from fusion
set_render_range (bool, Optional): When True this will also set the
composition's render start and end frame.
handle_start (float, int, Optional): frame handles before start frame
handle_end (float, int, Optional): frame handles after end frame
Returns:
None
"""
if not comp:
comp = get_current_comp()
# Convert any potential none type to zero
handle_start = handle_start or 0
handle_end = handle_end or 0
attrs = {
"COMPN_GlobalStart": start - handle_start,
"COMPN_GlobalEnd": end + handle_end
}
# set frame range
if set_render_range:
attrs.update({
"COMPN_RenderStart": start,
"COMPN_RenderEnd": end
})
with comp_lock_and_undo_chunk(comp):
comp.SetAttrs(attrs)
def set_current_context_framerange(folder_entity=None):
"""Set Comp's frame range based on current folder."""
if folder_entity is None:
folder_entity = get_current_folder_entity(
fields={"attrib.frameStart",
"attrib.frameEnd",
"attrib.handleStart",
"attrib.handleEnd"})
folder_attributes = folder_entity["attrib"]
start = folder_attributes["frameStart"]
end = folder_attributes["frameEnd"]
handle_start = folder_attributes["handleStart"]
handle_end = folder_attributes["handleEnd"]
update_frame_range(start, end, set_render_range=True,
handle_start=handle_start,
handle_end=handle_end)
def set_current_context_fps(folder_entity=None):
"""Set Comp's frame rate (FPS) to based on current asset"""
if folder_entity is None:
folder_entity = get_current_folder_entity(fields={"attrib.fps"})
fps = float(folder_entity["attrib"].get("fps", 24.0))
comp = get_current_comp()
comp.SetPrefs({
"Comp.FrameFormat.Rate": fps,
})
def set_current_context_resolution(folder_entity=None):
"""Set Comp's resolution width x height default based on current folder"""
if folder_entity is None:
folder_entity = get_current_folder_entity(
fields={"attrib.resolutionWidth", "attrib.resolutionHeight"})
folder_attributes = folder_entity["attrib"]
width = folder_attributes["resolutionWidth"]
height = folder_attributes["resolutionHeight"]
comp = get_current_comp()
print("Setting comp frame format resolution to {}x{}".format(width,
height))
comp.SetPrefs({
"Comp.FrameFormat.Width": width,
"Comp.FrameFormat.Height": height,
})
def validate_comp_prefs(comp=None, force_repair=False):
"""Validate current comp defaults with folder settings.
Validates fps, resolutionWidth, resolutionHeight, aspectRatio.
This does *not* validate frameStart, frameEnd, handleStart and handleEnd.
"""
if comp is None:
comp = get_current_comp()
log = Logger.get_logger("validate_comp_prefs")
fields = {
"path",
"attrib.fps",
"attrib.resolutionWidth",
"attrib.resolutionHeight",
"attrib.pixelAspect",
}
folder_entity = get_current_folder_entity(fields=fields)
folder_path = folder_entity["path"]
folder_attributes = folder_entity["attrib"]
comp_frame_format_prefs = comp.GetPrefs("Comp.FrameFormat")
# Pixel aspect ratio in Fusion is set as AspectX and AspectY so we convert
# the data to something that is more sensible to Fusion
folder_attributes["pixelAspectX"] = folder_attributes.pop("pixelAspect")
folder_attributes["pixelAspectY"] = 1.0
validations = [
("fps", "Rate", "FPS"),
("resolutionWidth", "Width", "Resolution Width"),
("resolutionHeight", "Height", "Resolution Height"),
("pixelAspectX", "AspectX", "Pixel Aspect Ratio X"),
("pixelAspectY", "AspectY", "Pixel Aspect Ratio Y")
]
invalid = []
for key, comp_key, label in validations:
folder_value = folder_attributes[key]
comp_value = comp_frame_format_prefs.get(comp_key)
if folder_value != comp_value:
invalid_msg = "{} {} should be {}".format(label,
comp_value,
folder_value)
invalid.append(invalid_msg)
if not force_repair:
# Do not log warning if we force repair anyway
log.warning(
"Comp {pref} {value} does not match folder "
"'{folder_path}' {pref} {folder_value}".format(
pref=label,
value=comp_value,
folder_path=folder_path,
folder_value=folder_value)
)
if invalid:
def _on_repair():
attributes = dict()
for key, comp_key, _label in validations:
value = folder_attributes[key]
comp_key_full = "Comp.FrameFormat.{}".format(comp_key)
attributes[comp_key_full] = value
comp.SetPrefs(attributes)
if force_repair:
log.info("Applying default Comp preferences..")
_on_repair()
return
from . import menu
from ayon_core.tools.utils import SimplePopup
dialog = SimplePopup(parent=menu.menu)
dialog.setWindowTitle("Fusion comp has invalid configuration")
msg = "Comp preferences mismatches '{}'".format(folder_path)
msg += "\n" + "\n".join(invalid)
dialog.set_message(msg)
dialog.set_button_text("Repair")
dialog.on_clicked.connect(_on_repair)
dialog.show()
dialog.raise_()
dialog.activateWindow()
dialog.setStyleSheet(load_stylesheet())
@contextlib.contextmanager
def maintained_selection(comp=None):
"""Reset comp selection from before the context after the context"""
if comp is None:
comp = get_current_comp()
previous_selection = comp.GetToolList(True).values()
try:
yield
finally:
flow = comp.CurrentFrame.FlowView
flow.Select() # No args equals clearing selection
if previous_selection:
for tool in previous_selection:
flow.Select(tool, True)
@contextlib.contextmanager
def maintained_comp_range(comp=None,
global_start=True,
global_end=True,
render_start=True,
render_end=True):
"""Reset comp frame ranges from before the context after the context"""
if comp is None:
comp = get_current_comp()
comp_attrs = comp.GetAttrs()
preserve_attrs = {}
if global_start:
preserve_attrs["COMPN_GlobalStart"] = comp_attrs["COMPN_GlobalStart"]
if global_end:
preserve_attrs["COMPN_GlobalEnd"] = comp_attrs["COMPN_GlobalEnd"]
if render_start:
preserve_attrs["COMPN_RenderStart"] = comp_attrs["COMPN_RenderStart"]
if render_end:
preserve_attrs["COMPN_RenderEnd"] = comp_attrs["COMPN_RenderEnd"]
try:
yield
finally:
comp.SetAttrs(preserve_attrs)
def get_frame_path(path):
"""Get filename for the Fusion Saver with padded number as '#'
>>> get_frame_path("C:/test.exr")
('C:/test', 4, '.exr')
>>> get_frame_path("filename.00.tif")
('filename.', 2, '.tif')
>>> get_frame_path("foobar35.tif")
('foobar', 2, '.tif')
Args:
path (str): The path to render to.
Returns:
tuple: head, padding, tail (extension)
"""
filename, ext = os.path.splitext(path)
# Find a final number group
match = re.match('.*?([0-9]+)$', filename)
if match:
padding = len(match.group(1))
# remove number from end since fusion
# will swap it with the frame number
filename = filename[:-padding]
else:
padding = 4 # default Fusion padding
return filename, padding, ext
def get_fusion_module():
"""Get current Fusion instance"""
fusion = getattr(sys.modules["__main__"], "fusion", None)
return fusion
def get_bmd_library():
"""Get bmd library"""
bmd = getattr(sys.modules["__main__"], "bmd", None)
return bmd
def get_current_comp():
"""Get current comp in this session"""
fusion = get_fusion_module()
if fusion is not None:
comp = fusion.CurrentComp
return comp
@contextlib.contextmanager
def comp_lock_and_undo_chunk(
comp,
undo_queue_name="Script CMD",
keep_undo=True,
):
"""Lock comp and open an undo chunk during the context"""
try:
comp.Lock()
comp.StartUndo(undo_queue_name)
yield
finally:
comp.Unlock()
comp.EndUndo(keep_undo)
def update_content_on_context_change():
"""Update all Creator instances to current asset"""
host = registered_host()
context = host.get_current_context()
folder_path = context["folder_path"]
task = context["task_name"]
create_context = CreateContext(host, reset=True)
for instance in create_context.instances:
instance_folder_path = instance.get("folderPath")
if instance_folder_path and instance_folder_path != folder_path:
instance["folderPath"] = folder_path
instance_task = instance.get("task")
if instance_task and instance_task != task:
instance["task"] = task
create_context.save_changes()
def prompt_reset_context():
"""Prompt the user what context settings to reset.
This prompt is used on saving to a different task to allow the scene to
get matched to the new context.
"""
# TODO: Cleanup this prototyped mess of imports and odd dialog
from ayon_core.tools.attribute_defs.dialog import (
AttributeDefinitionsDialog
)
from qtpy import QtCore
definitions = [
UILabelDef(
label=(
"You are saving your workfile into a different folder or task."
"\n\n"
"Would you like to update some settings to the new context?\n"
)
),
BoolDef(
"fps",
label="FPS",
tooltip="Reset Comp FPS",
default=True
),
BoolDef(
"frame_range",
label="Frame Range",
tooltip="Reset Comp start and end frame ranges",
default=True
),
BoolDef(
"resolution",
label="Comp Resolution",
tooltip="Reset Comp resolution",
default=True
),
BoolDef(
"instances",
label="Publish instances",
tooltip="Update all publish instance's folder and task to match "
"the new folder and task",
default=True
),
]
dialog = AttributeDefinitionsDialog(definitions)
dialog.setWindowFlags(
dialog.windowFlags() | QtCore.Qt.WindowStaysOnTopHint
)
dialog.setWindowTitle("Saving to different context.")
dialog.setStyleSheet(load_stylesheet())
if not dialog.exec_():
return None
options = dialog.get_values()
folder_entity = get_current_folder_entity()
if options["frame_range"]:
set_current_context_framerange(folder_entity)
if options["fps"]:
set_current_context_fps(folder_entity)
if options["resolution"]:
set_current_context_resolution(folder_entity)
if options["instances"]:
update_content_on_context_change()
dialog.deleteLater()

View file

@ -0,0 +1,190 @@
import os
import sys
from qtpy import QtWidgets, QtCore, QtGui
from ayon_core.tools.utils import host_tools
from ayon_core.style import load_stylesheet
from ayon_core.lib import register_event_callback
from ayon_fusion.scripts import (
duplicate_with_inputs,
)
from ayon_fusion.api.lib import (
set_current_context_framerange,
set_current_context_resolution,
)
from ayon_core.pipeline import get_current_folder_path
from ayon_core.resources import get_ayon_icon_filepath
from ayon_core.tools.utils import get_qt_app
from .pipeline import FusionEventHandler
from .pulse import FusionPulse
MENU_LABEL = os.environ["AYON_MENU_LABEL"]
self = sys.modules[__name__]
self.menu = None
class AYONMenu(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(AYONMenu, self).__init__(*args, **kwargs)
self.setObjectName(f"{MENU_LABEL}Menu")
icon_path = get_ayon_icon_filepath()
icon = QtGui.QIcon(icon_path)
self.setWindowIcon(icon)
self.setWindowFlags(
QtCore.Qt.Window
| QtCore.Qt.CustomizeWindowHint
| QtCore.Qt.WindowTitleHint
| QtCore.Qt.WindowMinimizeButtonHint
| QtCore.Qt.WindowCloseButtonHint
| QtCore.Qt.WindowStaysOnTopHint
)
self.render_mode_widget = None
self.setWindowTitle(MENU_LABEL)
context_label = QtWidgets.QLabel("Context", self)
context_label.setStyleSheet(
"""QLabel {
font-size: 14px;
font-weight: 600;
color: #5f9fb8;
}"""
)
context_label.setAlignment(QtCore.Qt.AlignHCenter)
workfiles_btn = QtWidgets.QPushButton("Workfiles...", self)
create_btn = QtWidgets.QPushButton("Create...", self)
load_btn = QtWidgets.QPushButton("Load...", self)
publish_btn = QtWidgets.QPushButton("Publish...", self)
manager_btn = QtWidgets.QPushButton("Manage...", self)
libload_btn = QtWidgets.QPushButton("Library...", self)
set_framerange_btn = QtWidgets.QPushButton("Set Frame Range", self)
set_resolution_btn = QtWidgets.QPushButton("Set Resolution", self)
duplicate_with_inputs_btn = QtWidgets.QPushButton(
"Duplicate with input connections", self
)
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(10, 20, 10, 20)
layout.addWidget(context_label)
layout.addSpacing(20)
layout.addWidget(workfiles_btn)
layout.addSpacing(20)
layout.addWidget(create_btn)
layout.addWidget(load_btn)
layout.addWidget(publish_btn)
layout.addWidget(manager_btn)
layout.addSpacing(20)
layout.addWidget(libload_btn)
layout.addSpacing(20)
layout.addWidget(set_framerange_btn)
layout.addWidget(set_resolution_btn)
layout.addSpacing(20)
layout.addWidget(duplicate_with_inputs_btn)
self.setLayout(layout)
# Store reference so we can update the label
self.context_label = context_label
workfiles_btn.clicked.connect(self.on_workfile_clicked)
create_btn.clicked.connect(self.on_create_clicked)
publish_btn.clicked.connect(self.on_publish_clicked)
load_btn.clicked.connect(self.on_load_clicked)
manager_btn.clicked.connect(self.on_manager_clicked)
libload_btn.clicked.connect(self.on_libload_clicked)
duplicate_with_inputs_btn.clicked.connect(
self.on_duplicate_with_inputs_clicked
)
set_resolution_btn.clicked.connect(self.on_set_resolution_clicked)
set_framerange_btn.clicked.connect(self.on_set_framerange_clicked)
self._callbacks = []
self.register_callback("taskChanged", self.on_task_changed)
self.on_task_changed()
# Force close current process if Fusion is closed
self._pulse = FusionPulse(parent=self)
self._pulse.start()
# Detect Fusion events as AYON events
self._event_handler = FusionEventHandler(parent=self)
self._event_handler.start()
def on_task_changed(self):
# Update current context label
label = get_current_folder_path()
self.context_label.setText(label)
def register_callback(self, name, fn):
# Create a wrapper callback that we only store
# for as long as we want it to persist as callback
def _callback(*args):
fn()
self._callbacks.append(_callback)
register_event_callback(name, _callback)
def deregister_all_callbacks(self):
self._callbacks[:] = []
def on_workfile_clicked(self):
host_tools.show_workfiles()
def on_create_clicked(self):
host_tools.show_publisher(tab="create")
def on_publish_clicked(self):
host_tools.show_publisher(tab="publish")
def on_load_clicked(self):
host_tools.show_loader(use_context=True)
def on_manager_clicked(self):
host_tools.show_scene_inventory()
def on_libload_clicked(self):
host_tools.show_library_loader()
def on_duplicate_with_inputs_clicked(self):
duplicate_with_inputs.duplicate_with_input_connections()
def on_set_resolution_clicked(self):
set_current_context_resolution()
def on_set_framerange_clicked(self):
set_current_context_framerange()
def launch_ayon_menu():
app = get_qt_app()
ayon_menu = AYONMenu()
stylesheet = load_stylesheet()
ayon_menu.setStyleSheet(stylesheet)
ayon_menu.show()
self.menu = ayon_menu
result = app.exec_()
print("Shutting down..")
sys.exit(result)

View file

@ -0,0 +1,439 @@
"""
Basic avalon integration
"""
import os
import sys
import logging
import contextlib
from pathlib import Path
import pyblish.api
from qtpy import QtCore
from ayon_core.lib import (
Logger,
register_event_callback,
emit_event
)
from ayon_core.pipeline import (
register_loader_plugin_path,
register_creator_plugin_path,
register_inventory_action_path,
AVALON_CONTAINER_ID,
)
from ayon_core.pipeline.load import any_outdated_containers
from ayon_core.host import HostBase, IWorkfileHost, ILoadHost, IPublishHost
from ayon_core.tools.utils import host_tools
from ayon_fusion import FUSION_ADDON_ROOT
from .lib import (
get_current_comp,
validate_comp_prefs,
prompt_reset_context
)
log = Logger.get_logger(__name__)
PLUGINS_DIR = os.path.join(FUSION_ADDON_ROOT, "plugins")
PUBLISH_PATH = os.path.join(PLUGINS_DIR, "publish")
LOAD_PATH = os.path.join(PLUGINS_DIR, "load")
CREATE_PATH = os.path.join(PLUGINS_DIR, "create")
INVENTORY_PATH = os.path.join(PLUGINS_DIR, "inventory")
# Track whether the workfile tool is about to save
_about_to_save = False
class FusionLogHandler(logging.Handler):
# Keep a reference to fusion's Print function (Remote Object)
_print = None
@property
def print(self):
if self._print is not None:
# Use cached
return self._print
_print = getattr(sys.modules["__main__"], "fusion").Print
if _print is None:
# Backwards compatibility: Print method on Fusion instance was
# added around Fusion 17.4 and wasn't available on PyRemote Object
# before
_print = get_current_comp().Print
self._print = _print
return _print
def emit(self, record):
entry = self.format(record)
self.print(entry)
class FusionHost(HostBase, IWorkfileHost, ILoadHost, IPublishHost):
name = "fusion"
def install(self):
"""Install fusion-specific functionality of AYON.
This is where you install menus and register families, data
and loaders into fusion.
It is called automatically when installing via
`ayon_core.pipeline.install_host(ayon_fusion.api)`
See the Maya equivalent for inspiration on how to implement this.
"""
# Remove all handlers associated with the root logger object, because
# that one always logs as "warnings" incorrectly.
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# Attach default logging handler that prints to active comp
logger = logging.getLogger()
formatter = logging.Formatter(fmt="%(message)s\n")
handler = FusionLogHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
pyblish.api.register_host("fusion")
pyblish.api.register_plugin_path(PUBLISH_PATH)
log.info("Registering Fusion plug-ins..")
register_loader_plugin_path(LOAD_PATH)
register_creator_plugin_path(CREATE_PATH)
register_inventory_action_path(INVENTORY_PATH)
# Register events
register_event_callback("open", on_after_open)
register_event_callback("workfile.save.before", before_workfile_save)
register_event_callback("save", on_save)
register_event_callback("new", on_new)
register_event_callback("taskChanged", on_task_changed)
# region workfile io api
def has_unsaved_changes(self):
comp = get_current_comp()
return comp.GetAttrs()["COMPB_Modified"]
def get_workfile_extensions(self):
return [".comp"]
def save_workfile(self, dst_path=None):
comp = get_current_comp()
comp.Save(dst_path)
def open_workfile(self, filepath):
# Hack to get fusion, see
# ayon_fusion.api.pipeline.get_current_comp()
fusion = getattr(sys.modules["__main__"], "fusion", None)
return fusion.LoadComp(filepath)
def get_current_workfile(self):
comp = get_current_comp()
current_filepath = comp.GetAttrs()["COMPS_FileName"]
if not current_filepath:
return None
return current_filepath
def work_root(self, session):
work_dir = session["AYON_WORKDIR"]
scene_dir = session.get("AVALON_SCENEDIR")
if scene_dir:
return os.path.join(work_dir, scene_dir)
else:
return work_dir
# endregion
@contextlib.contextmanager
def maintained_selection(self):
from .lib import maintained_selection
return maintained_selection()
def get_containers(self):
return ls()
def update_context_data(self, data, changes):
comp = get_current_comp()
comp.SetData("openpype", data)
def get_context_data(self):
comp = get_current_comp()
return comp.GetData("openpype") or {}
def on_new(event):
comp = event["Rets"]["comp"]
validate_comp_prefs(comp, force_repair=True)
def on_save(event):
comp = event["sender"]
validate_comp_prefs(comp)
# We are now starting the actual save directly
global _about_to_save
_about_to_save = False
def on_task_changed():
global _about_to_save
print(f"Task changed: {_about_to_save}")
# TODO: Only do this if not headless
if _about_to_save:
# Let's prompt the user to update the context settings or not
prompt_reset_context()
def on_after_open(event):
comp = event["sender"]
validate_comp_prefs(comp)
if any_outdated_containers():
log.warning("Scene has outdated content.")
# Find AYON menu to attach to
from . import menu
def _on_show_scene_inventory():
# ensure that comp is active
frame = comp.CurrentFrame
if not frame:
print("Comp is closed, skipping show scene inventory")
return
frame.ActivateFrame() # raise comp window
host_tools.show_scene_inventory()
from ayon_core.tools.utils import SimplePopup
from ayon_core.style import load_stylesheet
dialog = SimplePopup(parent=menu.menu)
dialog.setWindowTitle("Fusion comp has outdated content")
dialog.set_message("There are outdated containers in "
"your Fusion comp.")
dialog.on_clicked.connect(_on_show_scene_inventory)
dialog.show()
dialog.raise_()
dialog.activateWindow()
dialog.setStyleSheet(load_stylesheet())
def before_workfile_save(event):
# Due to Fusion's external python process design we can't really
# detect whether the current Fusion environment matches the one the artists
# expects it to be. For example, our pipeline python process might
# have been shut down, and restarted - which will restart it to the
# environment Fusion started with; not necessarily where the artist
# is currently working.
# The `_about_to_save` var is used to detect context changes when
# saving into another asset. If we keep it False it will be ignored
# as context change. As such, before we change tasks we will only
# consider it the current filepath is within the currently known
# AVALON_WORKDIR. This way we avoid false positives of thinking it's
# saving to another context and instead sometimes just have false negatives
# where we fail to show the "Update on task change" prompt.
comp = get_current_comp()
filepath = comp.GetAttrs()["COMPS_FileName"]
workdir = os.environ.get("AYON_WORKDIR")
if Path(workdir) in Path(filepath).parents:
global _about_to_save
_about_to_save = True
def ls():
"""List containers from active Fusion scene
This is the host-equivalent of api.ls(), but instead of listing
assets on disk, it lists assets already loaded in Fusion; once loaded
they are called 'containers'
Yields:
dict: container
"""
comp = get_current_comp()
tools = comp.GetToolList(False).values()
for tool in tools:
container = parse_container(tool)
if container:
yield container
def imprint_container(tool,
name,
namespace,
context,
loader=None):
"""Imprint a Loader with metadata
Containerisation enables a tracking of version, author and origin
for loaded assets.
Arguments:
tool (object): The node in Fusion to imprint as container, usually a
Loader.
name (str): Name of resulting assembly
namespace (str): Namespace under which to host container
context (dict): Asset information
loader (str, optional): Name of loader used to produce this container.
Returns:
None
"""
data = [
("schema", "openpype:container-2.0"),
("id", AVALON_CONTAINER_ID),
("name", str(name)),
("namespace", str(namespace)),
("loader", str(loader)),
("representation", context["representation"]["id"]),
]
for key, value in data:
tool.SetData("avalon.{}".format(key), value)
def parse_container(tool):
"""Returns imprinted container data of a tool
This reads the imprinted data from `imprint_container`.
"""
data = tool.GetData('avalon')
if not isinstance(data, dict):
return
# If not all required data return the empty container
required = ['schema', 'id', 'name',
'namespace', 'loader', 'representation']
if not all(key in data for key in required):
return
container = {key: data[key] for key in required}
# Store the tool's name
container["objectName"] = tool.Name
# Store reference to the tool object
container["_tool"] = tool
return container
class FusionEventThread(QtCore.QThread):
"""QThread which will periodically ping Fusion app for any events.
The fusion.UIManager must be set up to be notified of events before they'll
be reported by this thread, for example:
fusion.UIManager.AddNotify("Comp_Save", None)
"""
on_event = QtCore.Signal(dict)
def run(self):
app = getattr(sys.modules["__main__"], "app", None)
if app is None:
# No Fusion app found
return
# As optimization store the GetEvent method directly because every
# getattr of UIManager.GetEvent tries to resolve the Remote Function
# through the PyRemoteObject
get_event = app.UIManager.GetEvent
delay = int(os.environ.get("AYON_FUSION_CALLBACK_INTERVAL", 1000))
while True:
if self.isInterruptionRequested():
return
# Process all events that have been queued up until now
while True:
event = get_event(False)
if not event:
break
self.on_event.emit(event)
# Wait some time before processing events again
# to not keep blocking the UI
self.msleep(delay)
class FusionEventHandler(QtCore.QObject):
"""Emits AYON events based on Fusion events captured in a QThread.
This will emit the following AYON events based on Fusion actions:
save: Comp_Save, Comp_SaveAs
open: Comp_Opened
new: Comp_New
To use this you can attach it to you Qt UI so it runs in the background.
E.g.
>>> handler = FusionEventHandler(parent=window)
>>> handler.start()
"""
ACTION_IDS = [
"Comp_Save",
"Comp_SaveAs",
"Comp_New",
"Comp_Opened"
]
def __init__(self, parent=None):
super(FusionEventHandler, self).__init__(parent=parent)
# Set up Fusion event callbacks
fusion = getattr(sys.modules["__main__"], "fusion", None)
ui = fusion.UIManager
# Add notifications for the ones we want to listen to
notifiers = []
for action_id in self.ACTION_IDS:
notifier = ui.AddNotify(action_id, None)
notifiers.append(notifier)
# TODO: Not entirely sure whether these must be kept to avoid
# garbage collection
self._notifiers = notifiers
self._event_thread = FusionEventThread(parent=self)
self._event_thread.on_event.connect(self._on_event)
def start(self):
self._event_thread.start()
def stop(self):
self._event_thread.stop()
def _on_event(self, event):
"""Handle Fusion events to emit AYON events"""
if not event:
return
what = event["what"]
# Comp Save
if what in {"Comp_Save", "Comp_SaveAs"}:
if not event["Rets"].get("success"):
# If the Save action is cancelled it will still emit an
# event but with "success": False so we ignore those cases
return
# Comp was saved
emit_event("save", data=event)
return
# Comp New
elif what in {"Comp_New"}:
emit_event("new", data=event)
# Comp Opened
elif what in {"Comp_Opened"}:
emit_event("open", data=event)

View file

@ -0,0 +1,278 @@
from copy import deepcopy
import os
from ayon_fusion.api import (
get_current_comp,
comp_lock_and_undo_chunk,
)
from ayon_core.lib import (
BoolDef,
EnumDef,
)
from ayon_core.pipeline import (
Creator,
CreatedInstance,
AVALON_INSTANCE_ID,
AYON_INSTANCE_ID,
)
from ayon_core.pipeline.workfile import get_workdir
from ayon_api import (
get_project,
get_folder_by_path,
get_task_by_name
)
class GenericCreateSaver(Creator):
default_variants = ["Main", "Mask"]
description = "Fusion Saver to generate image sequence"
icon = "fa5.eye"
instance_attributes = [
"reviewable"
]
settings_category = "fusion"
image_format = "exr"
# TODO: This should be renamed together with Nuke so it is aligned
temp_rendering_path_template = (
"{workdir}/renders/fusion/{product[name]}/"
"{product[name]}.{frame}.{ext}"
)
def create(self, product_name, instance_data, pre_create_data):
self.pass_pre_attributes_to_instance(instance_data, pre_create_data)
instance = CreatedInstance(
product_type=self.product_type,
product_name=product_name,
data=instance_data,
creator=self,
)
data = instance.data_to_store()
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp):
args = (-32768, -32768) # Magical position numbers
saver = comp.AddTool("Saver", *args)
self._update_tool_with_data(saver, data=data)
# Register the CreatedInstance
self._imprint(saver, data)
# Insert the transient data
instance.transient_data["tool"] = saver
self._add_instance_to_context(instance)
return instance
def collect_instances(self):
comp = get_current_comp()
tools = comp.GetToolList(False, "Saver").values()
for tool in tools:
data = self.get_managed_tool_data(tool)
if not data:
continue
# Add instance
created_instance = CreatedInstance.from_existing(data, self)
# Collect transient data
created_instance.transient_data["tool"] = tool
self._add_instance_to_context(created_instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
new_data = created_inst.data_to_store()
tool = created_inst.transient_data["tool"]
self._update_tool_with_data(tool, new_data)
self._imprint(tool, new_data)
def remove_instances(self, instances):
for instance in instances:
# Remove the tool from the scene
tool = instance.transient_data["tool"]
if tool:
tool.Delete()
# Remove the collected CreatedInstance to remove from UI directly
self._remove_instance_from_context(instance)
def _imprint(self, tool, data):
# Save all data in a "openpype.{key}" = value data
# Instance id is the tool's name so we don't need to imprint as data
data.pop("instance_id", None)
active = data.pop("active", None)
if active is not None:
# Use active value to set the passthrough state
tool.SetAttrs({"TOOLB_PassThrough": not active})
for key, value in data.items():
tool.SetData(f"openpype.{key}", value)
def _update_tool_with_data(self, tool, data):
"""Update tool node name and output path based on product data"""
if "productName" not in data:
return
original_product_name = tool.GetData("openpype.productName")
original_format = tool.GetData(
"openpype.creator_attributes.image_format"
)
product_name = data["productName"]
if (
original_product_name != product_name
or tool.GetData("openpype.task") != data["task"]
or tool.GetData("openpype.folderPath") != data["folderPath"]
or original_format != data["creator_attributes"]["image_format"]
):
self._configure_saver_tool(data, tool, product_name)
def _configure_saver_tool(self, data, tool, product_name):
formatting_data = deepcopy(data)
# get frame padding from anatomy templates
frame_padding = self.project_anatomy.templates_obj.frame_padding
# get output format
ext = data["creator_attributes"]["image_format"]
# Product change detected
product_type = formatting_data["productType"]
f_product_name = formatting_data["productName"]
folder_path = formatting_data["folderPath"]
folder_name = folder_path.rsplit("/", 1)[-1]
# If the folder path and task do not match the current context then the
# workdir is not just the `AYON_WORKDIR`. Hence, we need to actually
# compute the resulting workdir
if (
data["folderPath"] == self.create_context.get_current_folder_path()
and data["task"] == self.create_context.get_current_task_name()
):
workdir = os.path.normpath(os.getenv("AYON_WORKDIR"))
else:
# TODO: Optimize this logic
project_name = self.create_context.get_current_project_name()
project_entity = get_project(project_name)
folder_entity = get_folder_by_path(project_name,
data["folderPath"])
task_entity = get_task_by_name(project_name,
folder_id=folder_entity["id"],
task_name=data["task"])
workdir = get_workdir(
project_entity=project_entity,
folder_entity=folder_entity,
task_entity=task_entity,
host_name=self.create_context.host_name,
)
formatting_data.update({
"workdir": workdir,
"frame": "0" * frame_padding,
"ext": ext,
"product": {
"name": f_product_name,
"type": product_type,
},
# TODO add more variants for 'folder' and 'task'
"folder": {
"name": folder_name,
},
"task": {
"name": data["task"],
},
# Backwards compatibility
"asset": folder_name,
"subset": f_product_name,
"family": product_type,
})
# build file path to render
# TODO make sure the keys are available in 'formatting_data'
temp_rendering_path_template = (
self.temp_rendering_path_template
.replace("{task}", "{task[name]}")
)
filepath = temp_rendering_path_template.format(**formatting_data)
comp = get_current_comp()
tool["Clip"] = comp.ReverseMapPath(os.path.normpath(filepath))
# Rename tool
if tool.Name != product_name:
print(f"Renaming {tool.Name} -> {product_name}")
tool.SetAttrs({"TOOLS_Name": product_name})
def get_managed_tool_data(self, tool):
"""Return data of the tool if it matches creator identifier"""
data = tool.GetData("openpype")
if not isinstance(data, dict):
return
if (
data.get("creator_identifier") != self.identifier
or data.get("id") not in {
AYON_INSTANCE_ID, AVALON_INSTANCE_ID
}
):
return
# Get active state from the actual tool state
attrs = tool.GetAttrs()
passthrough = attrs["TOOLB_PassThrough"]
data["active"] = not passthrough
# Override publisher's UUID generation because tool names are
# already unique in Fusion in a comp
data["instance_id"] = tool.Name
return data
def get_instance_attr_defs(self):
"""Settings for publish page"""
return self.get_pre_create_attr_defs()
def pass_pre_attributes_to_instance(self, instance_data, pre_create_data):
creator_attrs = instance_data["creator_attributes"] = {}
for pass_key in pre_create_data.keys():
creator_attrs[pass_key] = pre_create_data[pass_key]
def _get_render_target_enum(self):
rendering_targets = {
"local": "Local machine rendering",
"frames": "Use existing frames",
}
if "farm_rendering" in self.instance_attributes:
rendering_targets["farm"] = "Farm rendering"
return EnumDef(
"render_target", items=rendering_targets, label="Render target"
)
def _get_reviewable_bool(self):
return BoolDef(
"review",
default=("reviewable" in self.instance_attributes),
label="Review",
)
def _get_image_format_enum(self):
image_format_options = ["exr", "tga", "tif", "png", "jpg"]
return EnumDef(
"image_format",
items=image_format_options,
default=self.image_format,
label="Output Image Format",
)

View file

@ -0,0 +1,63 @@
import os
import sys
from qtpy import QtCore
class PulseThread(QtCore.QThread):
no_response = QtCore.Signal()
def __init__(self, parent=None):
super(PulseThread, self).__init__(parent=parent)
def run(self):
app = getattr(sys.modules["__main__"], "app", None)
# Interval in milliseconds
interval = os.environ.get("AYON_FUSION_PULSE_INTERVAL", 1000)
while True:
if self.isInterruptionRequested():
return
# We don't need to call Test because PyRemoteObject of the app
# will actually fail to even resolve the Test function if it has
# gone down. So we can actually already just check by confirming
# the method is still getting resolved. (Optimization)
if app.Test is None:
self.no_response.emit()
self.msleep(interval)
class FusionPulse(QtCore.QObject):
"""A Timer that checks whether host app is still alive.
This checks whether the Fusion process is still active at a certain
interval. This is useful due to how Fusion runs its scripts. Each script
runs in its own environment and process (a `fusionscript` process each).
If Fusion would go down and we have a UI process running at the same time
then it can happen that the `fusionscript.exe` will remain running in the
background in limbo due to e.g. a Qt interface's QApplication that keeps
running infinitely.
Warning:
When the host is not detected this will automatically exit
the current process.
"""
def __init__(self, parent=None):
super(FusionPulse, self).__init__(parent=parent)
self._thread = PulseThread(parent=self)
self._thread.no_response.connect(self.on_no_response)
def on_no_response(self):
print("Pulse detected no response from Fusion..")
sys.exit(1)
def start(self):
self._thread.start()
def stop(self):
self._thread.requestInterruption()

View file

@ -0,0 +1,6 @@
### AYON deploy MenuScripts
Note that this `MenuScripts` is not an official Fusion folder.
AYON only uses this folder in `{fusion}/deploy/` to trigger the AYON menu actions.
They are used in the actions defined in `.fu` files in `{fusion}/deploy/Config`.

View file

@ -0,0 +1,29 @@
# This is just a quick hack for users running Py3 locally but having no
# Qt library installed
import os
import subprocess
import importlib
try:
from qtpy import API_NAME
print(f"Qt binding: {API_NAME}")
mod = importlib.import_module(API_NAME)
print(f"Qt path: {mod.__file__}")
print("Qt library found, nothing to do..")
except ImportError:
print("Assuming no Qt library is installed..")
print('Installing PySide2 for Python 3.6: '
f'{os.environ["FUSION16_PYTHON36_HOME"]}')
# Get full path to python executable
exe = "python.exe" if os.name == 'nt' else "python"
python = os.path.join(os.environ["FUSION16_PYTHON36_HOME"], exe)
assert os.path.exists(python), f"Python doesn't exist: {python}"
# Do python -m pip install PySide2
args = [python, "-m", "pip", "install", "PySide2"]
print(f"Args: {args}")
subprocess.Popen(args)

View file

@ -0,0 +1,47 @@
import os
import sys
if sys.version_info < (3, 7):
# hack to handle discrepancy between distributed libraries and Python 3.6
# mostly because wrong version of urllib3
# TODO remove when not necessary
from ayon_fusion import FUSION_ADDON_ROOT
vendor_path = os.path.join(FUSION_ADDON_ROOT, "vendor")
if vendor_path not in sys.path:
sys.path.insert(0, vendor_path)
print(f"Added vendorized libraries from {vendor_path}")
from ayon_core.lib import Logger
from ayon_core.pipeline import (
install_host,
registered_host,
)
def main(env):
# This script working directory starts in Fusion application folder.
# However the contents of that folder can conflict with Qt library dlls
# so we make sure to move out of it to avoid DLL Load Failed errors.
os.chdir("..")
from ayon_fusion.api import FusionHost
from ayon_fusion.api import menu
# activate resolve from pype
install_host(FusionHost())
log = Logger.get_logger(__name__)
log.info(f"Registered host: {registered_host()}")
menu.launch_ayon_menu()
# Initiate a QTimer to check if Fusion is still alive every X interval
# If Fusion is not found - kill itself
# todo(roy): Implement timer that ensures UI doesn't remain when e.g.
# Fusion closes down
if __name__ == "__main__":
result = main(os.environ)
sys.exit(not bool(result))

View file

@ -0,0 +1,60 @@
{
Action
{
ID = "AYON_Menu",
Category = "AYON",
Name = "AYON Menu",
Targets =
{
Composition =
{
Execute = _Lua [=[
local scriptPath = app:MapPath("AYON:../MenuScripts/launch_menu.py")
if bmd.fileexists(scriptPath) == false then
print("[AYON Error] Can't run file: " .. scriptPath)
else
target:RunScript(scriptPath)
end
]=],
},
},
},
Action
{
ID = "AYON_Install_PySide2",
Category = "AYON",
Name = "Install PySide2",
Targets =
{
Composition =
{
Execute = _Lua [=[
local scriptPath = app:MapPath("AYON:../MenuScripts/install_pyside2.py")
if bmd.fileexists(scriptPath) == false then
print("[AYON Error] Can't run file: " .. scriptPath)
else
target:RunScript(scriptPath)
end
]=],
},
},
},
Menus
{
Target = "ChildFrame",
Before "Help"
{
Sub "AYON"
{
"AYON_Menu{}",
"_",
Sub "Admin" {
"AYON_Install_PySide2{}"
}
}
},
},
}

View file

@ -0,0 +1,19 @@
{
Locked = true,
Global = {
Paths = {
Map = {
["AYON:"] = "$(AYON_FUSION_ROOT)/deploy/ayon",
["Config:"] = "UserPaths:Config;AYON:Config",
["Scripts:"] = "UserPaths:Scripts;Reactor:System/Scripts",
},
},
Script = {
PythonVersion = 3,
Python3Forced = true
},
UserInterface = {
Language = "en_US"
},
},
}

View file

@ -0,0 +1,36 @@
import os
from ayon_applications import PreLaunchHook
from ayon_fusion import FUSION_ADDON_ROOT
class FusionLaunchMenuHook(PreLaunchHook):
"""Launch AYON menu on start of Fusion"""
app_groups = ["fusion"]
order = 9
def execute(self):
# Prelaunch hook is optional
settings = self.data["project_settings"][self.host_name]
if not settings["hooks"]["FusionLaunchMenuHook"]["enabled"]:
return
variant = self.application.name
if variant.isnumeric():
version = int(variant)
if version < 18:
print("Skipping launch of OpenPype menu on Fusion start "
"because Fusion version below 18.0 does not support "
"/execute argument on launch. "
f"Version detected: {version}")
return
else:
print(f"Application variant is not numeric: {variant}. "
"Validation for Fusion version 18+ for /execute "
"prelaunch argument skipped.")
path = os.path.join(FUSION_ADDON_ROOT,
"deploy",
"MenuScripts",
"launch_menu.py").replace("\\", "/")
script = f"fusion:RunScript('{path}')"
self.launch_context.launch_args.extend(["/execute", script])

View file

@ -0,0 +1,169 @@
import os
import shutil
import platform
from pathlib import Path
from ayon_fusion import (
FUSION_ADDON_ROOT,
FUSION_VERSIONS_DICT,
get_fusion_version,
)
from ayon_applications import (
PreLaunchHook,
LaunchTypes,
ApplicationLaunchFailed,
)
class FusionCopyPrefsPrelaunch(PreLaunchHook):
"""
Prepares local Fusion profile directory, copies existing Fusion profile.
This also sets FUSION MasterPrefs variable, which is used
to apply Master.prefs file to override some Fusion profile settings to:
- enable the AYON menu
- force Python 3 over Python 2
- force English interface
Master.prefs is defined in openpype/hosts/fusion/deploy/fusion_shared.prefs
"""
app_groups = {"fusion"}
order = 2
launch_types = {LaunchTypes.local}
def get_fusion_profile_name(self, profile_version) -> str:
# Returns 'Default', unless FUSION16_PROFILE is set
return os.getenv(f"FUSION{profile_version}_PROFILE", "Default")
def get_fusion_profile_dir(self, profile_version) -> Path:
# Get FUSION_PROFILE_DIR variable
fusion_profile = self.get_fusion_profile_name(profile_version)
fusion_var_prefs_dir = os.getenv(
f"FUSION{profile_version}_PROFILE_DIR"
)
# Check if FUSION_PROFILE_DIR exists
if fusion_var_prefs_dir and Path(fusion_var_prefs_dir).is_dir():
fu_prefs_dir = Path(fusion_var_prefs_dir, fusion_profile)
self.log.info(f"{fusion_var_prefs_dir} is set to {fu_prefs_dir}")
return fu_prefs_dir
def get_profile_source(self, profile_version) -> Path:
"""Get Fusion preferences profile location.
See Per-User_Preferences_and_Paths on VFXpedia for reference.
"""
fusion_profile = self.get_fusion_profile_name(profile_version)
profile_source = self.get_fusion_profile_dir(profile_version)
if profile_source:
return profile_source
# otherwise get default location of the profile folder
fu_prefs_dir = f"Blackmagic Design/Fusion/Profiles/{fusion_profile}"
if platform.system() == "Windows":
profile_source = Path(os.getenv("AppData"), fu_prefs_dir)
elif platform.system() == "Darwin":
profile_source = Path(
"~/Library/Application Support/", fu_prefs_dir
).expanduser()
elif platform.system() == "Linux":
profile_source = Path("~/.fusion", fu_prefs_dir).expanduser()
self.log.info(
f"Locating source Fusion prefs directory: {profile_source}"
)
return profile_source
def get_copy_fusion_prefs_settings(self):
# Get copy preferences options from the global application settings
copy_fusion_settings = self.data["project_settings"]["fusion"].get(
"copy_fusion_settings", {}
)
if not copy_fusion_settings:
self.log.error("Copy prefs settings not found")
copy_status = copy_fusion_settings.get("copy_status", False)
force_sync = copy_fusion_settings.get("force_sync", False)
copy_path = copy_fusion_settings.get("copy_path") or None
if copy_path:
copy_path = Path(copy_path).expanduser()
return copy_status, copy_path, force_sync
def copy_fusion_profile(
self, copy_from: Path, copy_to: Path, force_sync: bool
) -> None:
"""On the first Fusion launch copy the contents of Fusion profile
directory to the working predefined location. If the Openpype profile
folder exists, skip copying, unless re-sync is checked.
If the prefs were not copied on the first launch,
clean Fusion profile will be created in fu_profile_dir.
"""
if copy_to.exists() and not force_sync:
self.log.info(
"Destination Fusion preferences folder already exists: "
f"{copy_to} "
)
return
self.log.info("Starting copying Fusion preferences")
self.log.debug(f"force_sync option is set to {force_sync}")
try:
copy_to.mkdir(exist_ok=True, parents=True)
except PermissionError:
self.log.warning(f"Creating the folder not permitted at {copy_to}")
return
if not copy_from.exists():
self.log.warning(f"Fusion preferences not found in {copy_from}")
return
for file in copy_from.iterdir():
if file.suffix in (
".prefs",
".def",
".blocklist",
".fu",
".toolbars",
):
# convert Path to str to be compatible with Python 3.6+
shutil.copy(str(file), str(copy_to))
self.log.info(
f"Successfully copied preferences: {copy_from} to {copy_to}"
)
def execute(self):
(
copy_status,
fu_profile_dir,
force_sync,
) = self.get_copy_fusion_prefs_settings()
# Get launched application context and return correct app version
app_name = self.launch_context.env.get("AYON_APP_NAME")
app_version = get_fusion_version(app_name)
if app_version is None:
version_names = ", ".join(str(x) for x in FUSION_VERSIONS_DICT)
raise ApplicationLaunchFailed(
"Unable to detect valid Fusion version number from app "
f"name: {app_name}.\nMake sure to include at least a digit "
"to indicate the Fusion version like '18'.\n"
f"Detectable Fusion versions are: {version_names}"
)
_, profile_version = FUSION_VERSIONS_DICT[app_version]
fu_profile = self.get_fusion_profile_name(profile_version)
# do a copy of Fusion profile if copy_status toggle is enabled
if copy_status and fu_profile_dir is not None:
profile_source = self.get_profile_source(profile_version)
dest_folder = Path(fu_profile_dir, fu_profile)
self.copy_fusion_profile(profile_source, dest_folder, force_sync)
# Add temporary profile directory variables to customize Fusion
# to define where it can read custom scripts and tools from
fu_profile_dir_variable = f"FUSION{profile_version}_PROFILE_DIR"
self.log.info(f"Setting {fu_profile_dir_variable}: {fu_profile_dir}")
self.launch_context.env[fu_profile_dir_variable] = str(fu_profile_dir)
# Add custom Fusion Master Prefs and the temporary
# profile directory variables to customize Fusion
# to define where it can read custom scripts and tools from
master_prefs_variable = f"FUSION{profile_version}_MasterPrefs"
master_prefs = Path(
FUSION_ADDON_ROOT, "deploy", "ayon", "fusion_shared.prefs")
self.log.info(f"Setting {master_prefs_variable}: {master_prefs}")
self.launch_context.env[master_prefs_variable] = str(master_prefs)

View file

@ -0,0 +1,71 @@
import os
from ayon_applications import (
PreLaunchHook,
LaunchTypes,
ApplicationLaunchFailed,
)
from ayon_fusion import (
FUSION_ADDON_ROOT,
FUSION_VERSIONS_DICT,
get_fusion_version,
)
class FusionPrelaunch(PreLaunchHook):
"""
Prepares AYON Fusion environment.
Requires correct Python home variable to be defined in the environment
settings for Fusion to point at a valid Python 3 build for Fusion.
Python3 versions that are supported by Fusion:
Fusion 9, 16, 17 : Python 3.6
Fusion 18 : Python 3.6 - 3.10
"""
app_groups = {"fusion"}
order = 1
launch_types = {LaunchTypes.local}
def execute(self):
# making sure python 3 is installed at provided path
# Py 3.3-3.10 for Fusion 18+ or Py 3.6 for Fu 16-17
app_data = self.launch_context.env.get("AYON_APP_NAME")
app_version = get_fusion_version(app_data)
if not app_version:
raise ApplicationLaunchFailed(
"Fusion version information not found in System settings.\n"
"The key field in the 'applications/fusion/variants' should "
"consist a number, corresponding to major Fusion version."
)
py3_var, _ = FUSION_VERSIONS_DICT[app_version]
fusion_python3_home = self.launch_context.env.get(py3_var, "")
for path in fusion_python3_home.split(os.pathsep):
# Allow defining multiple paths, separated by os.pathsep,
# to allow "fallback" to other path.
# But make to set only a single path as final variable.
py3_dir = os.path.normpath(path)
if os.path.isdir(py3_dir):
break
else:
raise ApplicationLaunchFailed(
"Python 3 is not installed at the provided path.\n"
"Make sure the environment in fusion settings has "
"'FUSION_PYTHON3_HOME' set correctly and make sure "
"Python 3 is installed in the given path."
f"\n\nPYTHON PATH: {fusion_python3_home}"
)
self.log.info(f"Setting {py3_var}: '{py3_dir}'...")
self.launch_context.env[py3_var] = py3_dir
# Fusion 18+ requires FUSION_PYTHON3_HOME to also be on PATH
if app_version >= 18:
self.launch_context.env["PATH"] += os.pathsep + py3_dir
self.launch_context.env[py3_var] = py3_dir
# for hook installing PySide2
self.data["fusion_python3_home"] = py3_dir
self.log.info(f"Setting AYON_FUSION_ROOT: {FUSION_ADDON_ROOT}")
self.launch_context.env["AYON_FUSION_ROOT"] = FUSION_ADDON_ROOT

View file

@ -0,0 +1,185 @@
import os
import subprocess
import platform
import uuid
from ayon_applications import PreLaunchHook, LaunchTypes
class InstallPySideToFusion(PreLaunchHook):
"""Automatically installs Qt binding to fusion's python packages.
Check if fusion has installed PySide2 and will try to install if not.
For pipeline implementation is required to have Qt binding installed in
fusion's python packages.
"""
app_groups = {"fusion"}
order = 2
launch_types = {LaunchTypes.local}
def execute(self):
# Prelaunch hook is not crucial
try:
settings = self.data["project_settings"][self.host_name]
if not settings["hooks"]["InstallPySideToFusion"]["enabled"]:
return
self.inner_execute()
except Exception:
self.log.warning(
"Processing of {} crashed.".format(self.__class__.__name__),
exc_info=True
)
def inner_execute(self):
self.log.debug("Check for PySide2 installation.")
fusion_python3_home = self.data.get("fusion_python3_home")
if not fusion_python3_home:
self.log.warning("'fusion_python3_home' was not provided. "
"Installation of PySide2 not possible")
return
if platform.system().lower() == "windows":
exe_filenames = ["python.exe"]
else:
exe_filenames = ["python3", "python"]
for exe_filename in exe_filenames:
python_executable = os.path.join(fusion_python3_home, exe_filename)
if os.path.exists(python_executable):
break
if not os.path.exists(python_executable):
self.log.warning(
"Couldn't find python executable for fusion. {}".format(
python_executable
)
)
return
# Check if PySide2 is installed and skip if yes
if self._is_pyside_installed(python_executable):
self.log.debug("Fusion has already installed PySide2.")
return
self.log.debug("Installing PySide2.")
# Install PySide2 in fusion's python
if self._windows_require_permissions(
os.path.dirname(python_executable)):
result = self._install_pyside_windows(python_executable)
else:
result = self._install_pyside(python_executable)
if result:
self.log.info("Successfully installed PySide2 module to fusion.")
else:
self.log.warning("Failed to install PySide2 module to fusion.")
def _install_pyside_windows(self, python_executable):
"""Install PySide2 python module to fusion's python.
Installation requires administration rights that's why it is required
to use "pywin32" module which can execute command's and ask for
administration rights.
"""
try:
import win32con
import win32process
import win32event
import pywintypes
from win32comext.shell.shell import ShellExecuteEx
from win32comext.shell import shellcon
except Exception:
self.log.warning("Couldn't import \"pywin32\" modules")
return False
try:
# Parameters
# - use "-m pip" as module pip to install PySide2 and argument
# "--ignore-installed" is to force install module to fusion's
# site-packages and make sure it is binary compatible
parameters = "-m pip install --ignore-installed PySide2"
# Execute command and ask for administrator's rights
process_info = ShellExecuteEx(
nShow=win32con.SW_SHOWNORMAL,
fMask=shellcon.SEE_MASK_NOCLOSEPROCESS,
lpVerb="runas",
lpFile=python_executable,
lpParameters=parameters,
lpDirectory=os.path.dirname(python_executable)
)
process_handle = process_info["hProcess"]
win32event.WaitForSingleObject(process_handle,
win32event.INFINITE)
returncode = win32process.GetExitCodeProcess(process_handle)
return returncode == 0
except pywintypes.error:
return False
def _install_pyside(self, python_executable):
"""Install PySide2 python module to fusion's python."""
try:
# Parameters
# - use "-m pip" as module pip to install PySide2 and argument
# "--ignore-installed" is to force install module to fusion's
# site-packages and make sure it is binary compatible
env = dict(os.environ)
del env['PYTHONPATH']
args = [
python_executable,
"-m",
"pip",
"install",
"--ignore-installed",
"PySide2",
]
process = subprocess.Popen(
args, stdout=subprocess.PIPE, universal_newlines=True,
env=env
)
process.communicate()
return process.returncode == 0
except PermissionError:
self.log.warning(
"Permission denied with command:"
"\"{}\".".format(" ".join(args))
)
except OSError as error:
self.log.warning(f"OS error has occurred: \"{error}\".")
except subprocess.SubprocessError:
pass
def _is_pyside_installed(self, python_executable):
"""Check if PySide2 module is in fusion's pip list."""
args = [python_executable, "-c", "from qtpy import QtWidgets"]
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
stderr = stderr.decode()
if stderr:
return False
return True
def _windows_require_permissions(self, dirpath):
if platform.system().lower() != "windows":
return False
try:
# Attempt to create a temporary file in the folder
temp_file_path = os.path.join(dirpath, uuid.uuid4().hex)
with open(temp_file_path, "w"):
pass
os.remove(temp_file_path) # Clean up temporary file
return False
except PermissionError:
return True
except BaseException as exc:
print(("Failed to determine if root requires permissions."
"Unexpected error: {}").format(exc))
return False

View file

@ -0,0 +1,63 @@
from ayon_core.lib import NumberDef
from ayon_fusion.api.plugin import GenericCreateSaver
class CreateImageSaver(GenericCreateSaver):
"""Fusion Saver to generate single image.
Created to explicitly separate single ('image') or
multi frame('render) outputs.
This might be temporary creator until 'alias' functionality will be
implemented to limit creation of additional product types with similar, but
not the same workflows.
"""
identifier = "io.openpype.creators.fusion.imagesaver"
label = "Image (saver)"
name = "image"
product_type = "image"
description = "Fusion Saver to generate image"
default_frame = 0
def get_detail_description(self):
return """Fusion Saver to generate single image.
This creator is expected for publishing of single frame `image` product
type.
Artist should provide frame number (integer) to specify which frame
should be published. It must be inside of global timeline frame range.
Supports local and deadline rendering.
Supports selection from predefined set of output file extensions:
- exr
- tga
- png
- tif
- jpg
Created to explicitly separate single frame ('image') or
multi frame ('render') outputs.
"""
def get_pre_create_attr_defs(self):
"""Settings for create page"""
attr_defs = [
self._get_render_target_enum(),
self._get_reviewable_bool(),
self._get_frame_int(),
self._get_image_format_enum(),
]
return attr_defs
def _get_frame_int(self):
return NumberDef(
"frame",
default=self.default_frame,
label="Frame",
tooltip="Set frame to be rendered, must be inside of global "
"timeline range"
)

View file

@ -0,0 +1,149 @@
from ayon_core.lib import (
UILabelDef,
NumberDef,
EnumDef
)
from ayon_fusion.api.plugin import GenericCreateSaver
from ayon_fusion.api.lib import get_current_comp
class CreateSaver(GenericCreateSaver):
"""Fusion Saver to generate image sequence of 'render' product type.
Original Saver creator targeted for 'render' product type. It uses
original not to descriptive name because of values in Settings.
"""
identifier = "io.openpype.creators.fusion.saver"
label = "Render (saver)"
name = "render"
product_type = "render"
description = "Fusion Saver to generate image sequence"
default_frame_range_option = "current_folder"
def get_detail_description(self):
return """Fusion Saver to generate image sequence.
This creator is expected for publishing of image sequences for 'render'
product type. (But can publish even single frame 'render'.)
Select what should be source of render range:
- "Current Folder context" - values set on folder on AYON server
- "From render in/out" - from node itself
- "From composition timeline" - from timeline
Supports local and farm rendering.
Supports selection from predefined set of output file extensions:
- exr
- tga
- png
- tif
- jpg
"""
def get_pre_create_attr_defs(self):
"""Settings for create page"""
attr_defs = [
self._get_render_target_enum(),
self._get_reviewable_bool(),
self._get_frame_range_enum(),
self._get_image_format_enum(),
*self._get_custom_frame_range_attribute_defs()
]
return attr_defs
def _get_frame_range_enum(self):
frame_range_options = {
"current_folder": "Current Folder context",
"render_range": "From render in/out",
"comp_range": "From composition timeline",
"custom_range": "Custom frame range",
}
return EnumDef(
"frame_range_source",
items=frame_range_options,
label="Frame range source",
default=self.default_frame_range_option
)
@staticmethod
def _get_custom_frame_range_attribute_defs() -> list:
# Define custom frame range defaults based on current comp
# timeline settings (if a comp is currently open)
comp = get_current_comp()
if comp is not None:
attrs = comp.GetAttrs()
frame_defaults = {
"frameStart": int(attrs["COMPN_GlobalStart"]),
"frameEnd": int(attrs["COMPN_GlobalEnd"]),
"handleStart": int(
attrs["COMPN_RenderStart"] - attrs["COMPN_GlobalStart"]
),
"handleEnd": int(
attrs["COMPN_GlobalEnd"] - attrs["COMPN_RenderEnd"]
),
}
else:
frame_defaults = {
"frameStart": 1001,
"frameEnd": 1100,
"handleStart": 0,
"handleEnd": 0
}
return [
UILabelDef(
label="<br><b>Custom Frame Range</b><br>"
"<i>only used with 'Custom frame range' source</i>"
),
NumberDef(
"custom_frameStart",
label="Frame Start",
default=frame_defaults["frameStart"],
minimum=0,
decimals=0,
tooltip=(
"Set the start frame for the export.\n"
"Only used if frame range source is 'Custom frame range'."
)
),
NumberDef(
"custom_frameEnd",
label="Frame End",
default=frame_defaults["frameEnd"],
minimum=0,
decimals=0,
tooltip=(
"Set the end frame for the export.\n"
"Only used if frame range source is 'Custom frame range'."
)
),
NumberDef(
"custom_handleStart",
label="Handle Start",
default=frame_defaults["handleStart"],
minimum=0,
decimals=0,
tooltip=(
"Set the start handles for the export, this will be "
"added before the start frame.\n"
"Only used if frame range source is 'Custom frame range'."
)
),
NumberDef(
"custom_handleEnd",
label="Handle End",
default=frame_defaults["handleEnd"],
minimum=0,
decimals=0,
tooltip=(
"Set the end handles for the export, this will be added "
"after the end frame.\n"
"Only used if frame range source is 'Custom frame range'."
)
)
]

View file

@ -0,0 +1,132 @@
import ayon_api
from ayon_fusion.api import (
get_current_comp
)
from ayon_core.pipeline import (
AutoCreator,
CreatedInstance,
)
class FusionWorkfileCreator(AutoCreator):
identifier = "workfile"
product_type = "workfile"
label = "Workfile"
icon = "fa5.file"
default_variant = "Main"
create_allow_context_change = False
data_key = "openpype_workfile"
def collect_instances(self):
comp = get_current_comp()
data = comp.GetData(self.data_key)
if not data:
return
product_name = data.get("productName")
if product_name is None:
product_name = data["subset"]
instance = CreatedInstance(
product_type=self.product_type,
product_name=product_name,
data=data,
creator=self
)
instance.transient_data["comp"] = comp
self._add_instance_to_context(instance)
def update_instances(self, update_list):
for created_inst, _changes in update_list:
comp = created_inst.transient_data["comp"]
if not hasattr(comp, "SetData"):
# Comp is not alive anymore, likely closed by the user
self.log.error("Workfile comp not found for existing instance."
" Comp might have been closed in the meantime.")
continue
# Imprint data into the comp
data = created_inst.data_to_store()
comp.SetData(self.data_key, data)
def create(self, options=None):
comp = get_current_comp()
if not comp:
self.log.error("Unable to find current comp")
return
existing_instance = None
for instance in self.create_context.instances:
if instance.product_type == self.product_type:
existing_instance = instance
break
project_name = self.create_context.get_current_project_name()
folder_path = self.create_context.get_current_folder_path()
task_name = self.create_context.get_current_task_name()
host_name = self.create_context.host_name
existing_folder_path = None
if existing_instance is not None:
existing_folder_path = existing_instance["folderPath"]
if existing_instance is None:
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
)
data = {
"folderPath": folder_path,
"task": task_name,
"variant": self.default_variant,
}
data.update(self.get_dynamic_data(
project_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
None
))
new_instance = CreatedInstance(
self.product_type, product_name, data, self
)
new_instance.transient_data["comp"] = comp
self._add_instance_to_context(new_instance)
elif (
existing_folder_path != folder_path
or existing_instance["task"] != task_name
):
folder_entity = ayon_api.get_folder_by_path(
project_name, folder_path
)
task_entity = ayon_api.get_task_by_name(
project_name, folder_entity["id"], task_name
)
product_name = self.get_product_name(
project_name,
folder_entity,
task_entity,
self.default_variant,
host_name,
)
existing_instance["folderPath"] = folder_path
existing_instance["task"] = task_name
existing_instance["productName"] = product_name

View file

@ -0,0 +1,27 @@
from ayon_core.pipeline import InventoryAction
class FusionSelectContainers(InventoryAction):
label = "Select Containers"
icon = "mouse-pointer"
color = "#d8d8d8"
def process(self, containers):
from ayon_fusion.api import (
get_current_comp,
comp_lock_and_undo_chunk
)
tools = [i["_tool"] for i in containers]
comp = get_current_comp()
flow = comp.CurrentFrame.FlowView
with comp_lock_and_undo_chunk(comp, self.label):
# Clear selection
flow.Select()
# Select tool
for tool in tools:
flow.Select(tool)

View file

@ -0,0 +1,72 @@
from qtpy import QtGui, QtWidgets
from ayon_core.pipeline import InventoryAction
from ayon_core import style
from ayon_fusion.api import (
get_current_comp,
comp_lock_and_undo_chunk
)
class FusionSetToolColor(InventoryAction):
"""Update the color of the selected tools"""
label = "Set Tool Color"
icon = "plus"
color = "#d8d8d8"
_fallback_color = QtGui.QColor(1.0, 1.0, 1.0)
def process(self, containers):
"""Color all selected tools the selected colors"""
result = []
comp = get_current_comp()
# Get tool color
first = containers[0]
tool = first["_tool"]
color = tool.TileColor
if color is not None:
qcolor = QtGui.QColor().fromRgbF(color["R"], color["G"], color["B"])
else:
qcolor = self._fallback_color
# Launch pick color
picked_color = self.get_color_picker(qcolor)
if not picked_color:
return
with comp_lock_and_undo_chunk(comp):
for container in containers:
# Convert color to RGB 0-1 floats
rgb_f = picked_color.getRgbF()
rgb_f_table = {"R": rgb_f[0], "G": rgb_f[1], "B": rgb_f[2]}
# Update tool
tool = container["_tool"]
tool.TileColor = rgb_f_table
result.append(container)
return result
def get_color_picker(self, color):
"""Launch color picker and return chosen color
Args:
color(QtGui.QColor): Start color to display
Returns:
QtGui.QColor
"""
color_dialog = QtWidgets.QColorDialog(color)
color_dialog.setStyleSheet(style.load_stylesheet())
accepted = color_dialog.exec_()
if not accepted:
return
return color_dialog.selectedColor()

View file

@ -0,0 +1,81 @@
"""A module containing generic loader actions that will display in the Loader.
"""
from ayon_core.pipeline import load
class FusionSetFrameRangeLoader(load.LoaderPlugin):
"""Set frame range excluding pre- and post-handles"""
product_types = {
"animation",
"camera",
"imagesequence",
"render",
"yeticache",
"pointcache",
"render",
}
representations = {"*"}
extensions = {"*"}
label = "Set frame range"
order = 11
icon = "clock-o"
color = "white"
def load(self, context, name, namespace, data):
from ayon_fusion.api import lib
version_attributes = context["version"]["attrib"]
start = version_attributes.get("frameStart", None)
end = version_attributes.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
"end frame data is missing..")
return
lib.update_frame_range(start, end)
class FusionSetFrameRangeWithHandlesLoader(load.LoaderPlugin):
"""Set frame range including pre- and post-handles"""
product_types = {
"animation",
"camera",
"imagesequence",
"render",
"yeticache",
"pointcache",
"render",
}
representations = {"*"}
label = "Set frame range (with handles)"
order = 12
icon = "clock-o"
color = "white"
def load(self, context, name, namespace, data):
from ayon_fusion.api import lib
version_attributes = context["version"]["attrib"]
start = version_attributes.get("frameStart", None)
end = version_attributes.get("frameEnd", None)
if start is None or end is None:
print("Skipping setting frame range because start or "
"end frame data is missing..")
return
# Include handles
start -= version_attributes.get("handleStart", 0)
end += version_attributes.get("handleEnd", 0)
lib.update_frame_range(start, end)

View file

@ -0,0 +1,72 @@
from ayon_core.pipeline import (
load,
get_representation_path,
)
from ayon_fusion.api import (
imprint_container,
get_current_comp,
comp_lock_and_undo_chunk
)
class FusionLoadAlembicMesh(load.LoaderPlugin):
"""Load Alembic mesh into Fusion"""
product_types = {"pointcache", "model"}
representations = {"*"}
extensions = {"abc"}
label = "Load alembic mesh"
order = -10
icon = "code-fork"
color = "orange"
tool_type = "SurfaceAlembicMesh"
def load(self, context, name, namespace, data):
# Fallback to folder name when namespace is None
if namespace is None:
namespace = context["folder"]["name"]
# Create the Loader with the filename path set
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp, "Create tool"):
path = self.filepath_from_context(context)
args = (-32768, -32768)
tool = comp.AddTool(self.tool_type, *args)
tool["Filename"] = path
imprint_container(tool,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__)
def switch(self, container, context):
self.update(container, context)
def update(self, container, context):
"""Update Alembic path"""
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
repre_entity = context["representation"]
path = get_representation_path(repre_entity)
with comp_lock_and_undo_chunk(comp, "Update tool"):
tool["Filename"] = path
# Update the imprinted representation
tool.SetData("avalon.representation", repre_entity["id"])
def remove(self, container):
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
with comp_lock_and_undo_chunk(comp, "Remove tool"):
tool.Delete()

View file

@ -0,0 +1,87 @@
from ayon_core.pipeline import (
load,
get_representation_path,
)
from ayon_fusion.api import (
imprint_container,
get_current_comp,
comp_lock_and_undo_chunk,
)
class FusionLoadFBXMesh(load.LoaderPlugin):
"""Load FBX mesh into Fusion"""
product_types = {"*"}
representations = {"*"}
extensions = {
"3ds",
"amc",
"aoa",
"asf",
"bvh",
"c3d",
"dae",
"dxf",
"fbx",
"htr",
"mcd",
"obj",
"trc",
}
label = "Load FBX mesh"
order = -10
icon = "code-fork"
color = "orange"
tool_type = "SurfaceFBXMesh"
def load(self, context, name, namespace, data):
# Fallback to folder name when namespace is None
if namespace is None:
namespace = context["folder"]["name"]
# Create the Loader with the filename path set
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp, "Create tool"):
path = self.filepath_from_context(context)
args = (-32768, -32768)
tool = comp.AddTool(self.tool_type, *args)
tool["ImportFile"] = path
imprint_container(
tool,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
)
def switch(self, container, context):
self.update(container, context)
def update(self, container, context):
"""Update path"""
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
repre_entity = context["representation"]
path = get_representation_path(repre_entity)
with comp_lock_and_undo_chunk(comp, "Update tool"):
tool["ImportFile"] = path
# Update the imprinted representation
tool.SetData("avalon.representation", repre_entity["id"])
def remove(self, container):
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
with comp_lock_and_undo_chunk(comp, "Remove tool"):
tool.Delete()

View file

@ -0,0 +1,291 @@
import contextlib
import ayon_core.pipeline.load as load
from ayon_fusion.api import (
imprint_container,
get_current_comp,
comp_lock_and_undo_chunk,
)
from ayon_core.lib.transcoding import IMAGE_EXTENSIONS, VIDEO_EXTENSIONS
comp = get_current_comp()
@contextlib.contextmanager
def preserve_inputs(tool, inputs):
"""Preserve the tool's inputs after context"""
comp = tool.Comp()
values = {}
for name in inputs:
tool_input = getattr(tool, name)
value = tool_input[comp.TIME_UNDEFINED]
values[name] = value
try:
yield
finally:
for name, value in values.items():
tool_input = getattr(tool, name)
tool_input[comp.TIME_UNDEFINED] = value
@contextlib.contextmanager
def preserve_trim(loader, log=None):
"""Preserve the relative trim of the Loader tool.
This tries to preserve the loader's trim (trim in and trim out) after
the context by reapplying the "amount" it trims on the clip's length at
start and end.
"""
# Get original trim as amount of "trimming" from length
time = loader.Comp().TIME_UNDEFINED
length = loader.GetAttrs()["TOOLIT_Clip_Length"][1] - 1
trim_from_start = loader["ClipTimeStart"][time]
trim_from_end = length - loader["ClipTimeEnd"][time]
try:
yield
finally:
length = loader.GetAttrs()["TOOLIT_Clip_Length"][1] - 1
if trim_from_start > length:
trim_from_start = length
if log:
log.warning(
"Reducing trim in to %d "
"(because of less frames)" % trim_from_start
)
remainder = length - trim_from_start
if trim_from_end > remainder:
trim_from_end = remainder
if log:
log.warning(
"Reducing trim in to %d "
"(because of less frames)" % trim_from_end
)
loader["ClipTimeStart"][time] = trim_from_start
loader["ClipTimeEnd"][time] = length - trim_from_end
def loader_shift(loader, frame, relative=True):
"""Shift global in time by i preserving duration
This moves the loader by i frames preserving global duration. When relative
is False it will shift the global in to the start frame.
Args:
loader (tool): The fusion loader tool.
frame (int): The amount of frames to move.
relative (bool): When True the shift is relative, else the shift will
change the global in to frame.
Returns:
int: The resulting relative frame change (how much it moved)
"""
comp = loader.Comp()
time = comp.TIME_UNDEFINED
old_in = loader["GlobalIn"][time]
old_out = loader["GlobalOut"][time]
if relative:
shift = frame
else:
shift = frame - old_in
if not shift:
return 0
# Shifting global in will try to automatically compensate for the change
# in the "ClipTimeStart" and "HoldFirstFrame" inputs, so we preserve those
# input values to "just shift" the clip
with preserve_inputs(
loader,
inputs=[
"ClipTimeStart",
"ClipTimeEnd",
"HoldFirstFrame",
"HoldLastFrame",
],
):
# GlobalIn cannot be set past GlobalOut or vice versa
# so we must apply them in the order of the shift.
if shift > 0:
loader["GlobalOut"][time] = old_out + shift
loader["GlobalIn"][time] = old_in + shift
else:
loader["GlobalIn"][time] = old_in + shift
loader["GlobalOut"][time] = old_out + shift
return int(shift)
class FusionLoadSequence(load.LoaderPlugin):
"""Load image sequence into Fusion"""
product_types = {
"imagesequence",
"review",
"render",
"plate",
"image",
"online",
}
representations = {"*"}
extensions = set(
ext.lstrip(".") for ext in IMAGE_EXTENSIONS.union(VIDEO_EXTENSIONS)
)
label = "Load sequence"
order = -10
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, data):
# Fallback to folder name when namespace is None
if namespace is None:
namespace = context["folder"]["name"]
# Use the first file for now
path = self.filepath_from_context(context)
# Create the Loader with the filename path set
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp, "Create Loader"):
args = (-32768, -32768)
tool = comp.AddTool("Loader", *args)
tool["Clip"] = comp.ReverseMapPath(path)
# Set global in point to start frame (if in version.data)
start = self._get_start(context["version"], tool)
loader_shift(tool, start, relative=False)
imprint_container(
tool,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__,
)
def switch(self, container, context):
self.update(container, context)
def update(self, container, context):
"""Update the Loader's path
Fusion automatically tries to reset some variables when changing
the loader's path to a new file. These automatic changes are to its
inputs:
- ClipTimeStart: Fusion reset to 0 if duration changes
- We keep the trim in as close as possible to the previous value.
When there are less frames then the amount of trim we reduce
it accordingly.
- ClipTimeEnd: Fusion reset to 0 if duration changes
- We keep the trim out as close as possible to the previous value
within new amount of frames after trim in (ClipTimeStart) has
been set.
- GlobalIn: Fusion reset to comp's global in if duration changes
- We change it to the "frameStart"
- GlobalEnd: Fusion resets to globalIn + length if duration changes
- We do the same like Fusion - allow fusion to take control.
- HoldFirstFrame: Fusion resets this to 0
- We preserve the value.
- HoldLastFrame: Fusion resets this to 0
- We preserve the value.
- Reverse: Fusion resets to disabled if "Loop" is not enabled.
- We preserve the value.
- Depth: Fusion resets to "Format"
- We preserve the value.
- KeyCode: Fusion resets to ""
- We preserve the value.
- TimeCodeOffset: Fusion resets to 0
- We preserve the value.
"""
tool = container["_tool"]
assert tool.ID == "Loader", "Must be Loader"
comp = tool.Comp()
repre_entity = context["representation"]
path = self.filepath_from_context(context)
# Get start frame from version data
start = self._get_start(context["version"], tool)
with comp_lock_and_undo_chunk(comp, "Update Loader"):
# Update the loader's path whilst preserving some values
with preserve_trim(tool, log=self.log):
with preserve_inputs(
tool,
inputs=(
"HoldFirstFrame",
"HoldLastFrame",
"Reverse",
"Depth",
"KeyCode",
"TimeCodeOffset",
),
):
tool["Clip"] = comp.ReverseMapPath(path)
# Set the global in to the start frame of the sequence
global_in_changed = loader_shift(tool, start, relative=False)
if global_in_changed:
# Log this change to the user
self.log.debug(
"Changed '%s' global in: %d" % (tool.Name, start)
)
# Update the imprinted representation
tool.SetData("avalon.representation", repre_entity["id"])
def remove(self, container):
tool = container["_tool"]
assert tool.ID == "Loader", "Must be Loader"
comp = tool.Comp()
with comp_lock_and_undo_chunk(comp, "Remove Loader"):
tool.Delete()
def _get_start(self, version_entity, tool):
"""Return real start frame of published files (incl. handles)"""
attributes = version_entity["attrib"]
# Get start frame directly with handle if it's in data
start = attributes.get("frameStartHandle")
if start is not None:
return start
# Get frame start without handles
start = attributes.get("frameStart")
if start is None:
self.log.warning(
"Missing start frame for version "
"assuming starts at frame 0 for: "
"{}".format(tool.Name)
)
return 0
# Use `handleStart` if the data is available
handle_start = attributes.get("handleStart")
if handle_start:
start -= handle_start
return start

View file

@ -0,0 +1,87 @@
from ayon_core.pipeline import (
load,
get_representation_path,
)
from ayon_fusion.api import (
imprint_container,
get_current_comp,
comp_lock_and_undo_chunk
)
from ayon_fusion.api.lib import get_fusion_module
class FusionLoadUSD(load.LoaderPlugin):
"""Load USD into Fusion
Support for USD was added since Fusion 18.5
"""
product_types = {"*"}
representations = {"*"}
extensions = {"usd", "usda", "usdz"}
label = "Load USD"
order = -10
icon = "code-fork"
color = "orange"
tool_type = "uLoader"
@classmethod
def apply_settings(cls, project_settings):
super(FusionLoadUSD, cls).apply_settings(project_settings)
if cls.enabled:
# Enable only in Fusion 18.5+
fusion = get_fusion_module()
version = fusion.GetVersion()
major = version[1]
minor = version[2]
is_usd_supported = (major, minor) >= (18, 5)
cls.enabled = is_usd_supported
def load(self, context, name, namespace, data):
# Fallback to folder name when namespace is None
if namespace is None:
namespace = context["folder"]["name"]
# Create the Loader with the filename path set
comp = get_current_comp()
with comp_lock_and_undo_chunk(comp, "Create tool"):
path = self.fname
args = (-32768, -32768)
tool = comp.AddTool(self.tool_type, *args)
tool["Filename"] = path
imprint_container(tool,
name=name,
namespace=namespace,
context=context,
loader=self.__class__.__name__)
def switch(self, container, context):
self.update(container, context)
def update(self, container, context):
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
repre_entity = context["representation"]
path = get_representation_path(repre_entity)
with comp_lock_and_undo_chunk(comp, "Update tool"):
tool["Filename"] = path
# Update the imprinted representation
tool.SetData("avalon.representation", repre_entity["id"])
def remove(self, container):
tool = container["_tool"]
assert tool.ID == self.tool_type, f"Must be {self.tool_type}"
comp = tool.Comp()
with comp_lock_and_undo_chunk(comp, "Remove tool"):
tool.Delete()

View file

@ -0,0 +1,33 @@
"""Import workfiles into your current comp.
As all imported nodes are free floating and will probably be changed there
is no update or reload function added for this plugin
"""
from ayon_core.pipeline import load
from ayon_fusion.api import (
get_current_comp,
get_bmd_library,
)
class FusionLoadWorkfile(load.LoaderPlugin):
"""Load the content of a workfile into Fusion"""
product_types = {"workfile"}
representations = {"*"}
extensions = {"comp"}
label = "Load Workfile"
order = -10
icon = "code-fork"
color = "orange"
def load(self, context, name, namespace, data):
# Get needed elements
bmd = get_bmd_library()
comp = get_current_comp()
path = self.filepath_from_context(context)
# Paste the content of the file into the current comp
comp.Paste(bmd.readfile(path))

View file

@ -0,0 +1,22 @@
import pyblish.api
from ayon_fusion.api import get_current_comp
class CollectCurrentCompFusion(pyblish.api.ContextPlugin):
"""Collect current comp"""
order = pyblish.api.CollectorOrder - 0.4
label = "Collect Current Comp"
hosts = ["fusion"]
def process(self, context):
"""Collect all image sequence tools"""
current_comp = get_current_comp()
assert current_comp, "Must have active Fusion composition"
context.data["currentComp"] = current_comp
# Store path to current file
filepath = current_comp.GetAttrs().get("COMPS_FileName", "")
context.data['currentFile'] = filepath

View file

@ -0,0 +1,44 @@
import pyblish.api
def get_comp_render_range(comp):
"""Return comp's start-end render range and global start-end range."""
comp_attrs = comp.GetAttrs()
start = comp_attrs["COMPN_RenderStart"]
end = comp_attrs["COMPN_RenderEnd"]
global_start = comp_attrs["COMPN_GlobalStart"]
global_end = comp_attrs["COMPN_GlobalEnd"]
# Whenever render ranges are undefined fall back
# to the comp's global start and end
if start == -1000000000:
start = global_start
if end == -1000000000:
end = global_end
return start, end, global_start, global_end
class CollectFusionCompFrameRanges(pyblish.api.ContextPlugin):
"""Collect current comp"""
# We run this after CollectorOrder - 0.1 otherwise it gets
# overridden by global plug-in `CollectContextEntities`
order = pyblish.api.CollectorOrder - 0.05
label = "Collect Comp Frame Ranges"
hosts = ["fusion"]
def process(self, context):
"""Collect all image sequence tools"""
comp = context.data["currentComp"]
# Store comp render ranges
start, end, global_start, global_end = get_comp_render_range(comp)
context.data.update({
"renderFrameStart": int(start),
"renderFrameEnd": int(end),
"compFrameStart": int(global_start),
"compFrameEnd": int(global_end)
})

View file

@ -0,0 +1,116 @@
import pyblish.api
from ayon_core.pipeline import registered_host
def collect_input_containers(tools):
"""Collect containers that contain any of the node in `nodes`.
This will return any loaded Avalon container that contains at least one of
the nodes. As such, the Avalon container is an input for it. Or in short,
there are member nodes of that container.
Returns:
list: Input avalon containers
"""
# Lookup by node ids
lookup = frozenset([tool.Name for tool in tools])
containers = []
host = registered_host()
for container in host.ls():
name = container["_tool"].Name
# We currently assume no "groups" as containers but just single tools
# like a single "Loader" operator. As such we just check whether the
# Loader is part of the processing queue.
if name in lookup:
containers.append(container)
return containers
def iter_upstream(tool):
"""Yields all upstream inputs for the current tool.
Yields:
tool: The input tools.
"""
def get_connected_input_tools(tool):
"""Helper function that returns connected input tools for a tool."""
inputs = []
# Filter only to actual types that will have sensible upstream
# connections. So we ignore just "Number" inputs as they can be
# many to iterate, slowing things down quite a bit - and in practice
# they don't have upstream connections.
VALID_INPUT_TYPES = ['Image', 'Particles', 'Mask', 'DataType3D']
for type_ in VALID_INPUT_TYPES:
for input_ in tool.GetInputList(type_).values():
output = input_.GetConnectedOutput()
if output:
input_tool = output.GetTool()
inputs.append(input_tool)
return inputs
# Initialize process queue with the node's inputs itself
queue = get_connected_input_tools(tool)
# We keep track of which node names we have processed so far, to ensure we
# don't process the same hierarchy again. We are not pushing the tool
# itself into the set as that doesn't correctly recognize the same tool.
# Since tool names are unique in a comp in Fusion we rely on that.
collected = set(tool.Name for tool in queue)
# Traverse upstream references for all nodes and yield them as we
# process the queue.
while queue:
upstream_tool = queue.pop()
yield upstream_tool
# Find upstream tools that are not collected yet.
upstream_inputs = get_connected_input_tools(upstream_tool)
upstream_inputs = [t for t in upstream_inputs if
t.Name not in collected]
queue.extend(upstream_inputs)
collected.update(tool.Name for tool in upstream_inputs)
class CollectUpstreamInputs(pyblish.api.InstancePlugin):
"""Collect source input containers used for this publish.
This will include `inputs` data of which loaded publishes were used in the
generation of this publish. This leaves an upstream trace to what was used
as input.
"""
label = "Collect Inputs"
order = pyblish.api.CollectorOrder + 0.2
hosts = ["fusion"]
families = ["render", "image"]
def process(self, instance):
# Get all upstream and include itself
if not any(instance[:]):
self.log.debug("No tool found in instance, skipping..")
return
tool = instance[0]
nodes = list(iter_upstream(tool))
nodes.append(tool)
# Collect containers for the given set of nodes
containers = collect_input_containers(nodes)
inputs = [c["representation"] for c in containers]
instance.data["inputRepresentations"] = inputs
self.log.debug("Collected inputs: %s" % inputs)

View file

@ -0,0 +1,109 @@
import pyblish.api
class CollectInstanceData(pyblish.api.InstancePlugin):
"""Collect Fusion saver instances
This additionally stores the Comp start and end render range in the
current context's data as "frameStart" and "frameEnd".
"""
order = pyblish.api.CollectorOrder
label = "Collect Instances Data"
hosts = ["fusion"]
def process(self, instance):
"""Collect all image sequence tools"""
context = instance.context
# Include creator attributes directly as instance data
creator_attributes = instance.data["creator_attributes"]
instance.data.update(creator_attributes)
frame_range_source = creator_attributes.get("frame_range_source")
instance.data["frame_range_source"] = frame_range_source
# get folder frame ranges to all instances
# render product type instances `current_folder` render target
start = context.data["frameStart"]
end = context.data["frameEnd"]
handle_start = context.data["handleStart"]
handle_end = context.data["handleEnd"]
start_with_handle = start - handle_start
end_with_handle = end + handle_end
# conditions for render product type instances
if frame_range_source == "render_range":
# set comp render frame ranges
start = context.data["renderFrameStart"]
end = context.data["renderFrameEnd"]
handle_start = 0
handle_end = 0
start_with_handle = start
end_with_handle = end
if frame_range_source == "comp_range":
comp_start = context.data["compFrameStart"]
comp_end = context.data["compFrameEnd"]
render_start = context.data["renderFrameStart"]
render_end = context.data["renderFrameEnd"]
# set comp frame ranges
start = render_start
end = render_end
handle_start = render_start - comp_start
handle_end = comp_end - render_end
start_with_handle = comp_start
end_with_handle = comp_end
if frame_range_source == "custom_range":
start = int(instance.data["custom_frameStart"])
end = int(instance.data["custom_frameEnd"])
handle_start = int(instance.data["custom_handleStart"])
handle_end = int(instance.data["custom_handleEnd"])
start_with_handle = start - handle_start
end_with_handle = end + handle_end
frame = instance.data["creator_attributes"].get("frame")
# explicitly publishing only single frame
if frame is not None:
frame = int(frame)
start = frame
end = frame
handle_start = 0
handle_end = 0
start_with_handle = frame
end_with_handle = frame
# Include start and end render frame in label
product_name = instance.data["productName"]
label = (
"{product_name} ({start}-{end}) [{handle_start}-{handle_end}]"
).format(
product_name=product_name,
start=int(start),
end=int(end),
handle_start=int(handle_start),
handle_end=int(handle_end)
)
instance.data.update({
"label": label,
# todo: Allow custom frame range per instance
"frameStart": start,
"frameEnd": end,
"frameStartHandle": start_with_handle,
"frameEndHandle": end_with_handle,
"handleStart": handle_start,
"handleEnd": handle_end,
"fps": context.data["fps"],
})
# Add review family if the instance is marked as 'review'
# This could be done through a 'review' Creator attribute.
if instance.data.get("review", False):
self.log.debug("Adding review family..")
instance.data["families"].append("review")

View file

@ -0,0 +1,208 @@
import os
import attr
import pyblish.api
from ayon_core.pipeline import publish
from ayon_core.pipeline.publish import RenderInstance
from ayon_fusion.api.lib import get_frame_path
@attr.s
class FusionRenderInstance(RenderInstance):
# extend generic, composition name is needed
fps = attr.ib(default=None)
projectEntity = attr.ib(default=None)
stagingDir = attr.ib(default=None)
app_version = attr.ib(default=None)
tool = attr.ib(default=None)
workfileComp = attr.ib(default=None)
publish_attributes = attr.ib(default={})
frameStartHandle = attr.ib(default=None)
frameEndHandle = attr.ib(default=None)
class CollectFusionRender(
publish.AbstractCollectRender,
publish.ColormanagedPyblishPluginMixin
):
order = pyblish.api.CollectorOrder + 0.09
label = "Collect Fusion Render"
hosts = ["fusion"]
def get_instances(self, context):
comp = context.data.get("currentComp")
comp_frame_format_prefs = comp.GetPrefs("Comp.FrameFormat")
aspect_x = comp_frame_format_prefs["AspectX"]
aspect_y = comp_frame_format_prefs["AspectY"]
current_file = context.data["currentFile"]
version = context.data["version"]
project_entity = context.data["projectEntity"]
instances = []
for inst in context:
if not inst.data.get("active", True):
continue
product_type = inst.data["productType"]
if product_type not in ["render", "image"]:
continue
task_name = inst.data["task"]
tool = inst.data["transientData"]["tool"]
instance_families = inst.data.get("families", [])
product_name = inst.data["productName"]
instance = FusionRenderInstance(
tool=tool,
workfileComp=comp,
productType=product_type,
family=product_type,
families=instance_families,
version=version,
time="",
source=current_file,
label=inst.data["label"],
productName=product_name,
folderPath=inst.data["folderPath"],
task=task_name,
attachTo=False,
setMembers='',
publish=True,
name=product_name,
resolutionWidth=comp_frame_format_prefs.get("Width"),
resolutionHeight=comp_frame_format_prefs.get("Height"),
pixelAspect=aspect_x / aspect_y,
tileRendering=False,
tilesX=0,
tilesY=0,
review="review" in instance_families,
frameStart=inst.data["frameStart"],
frameEnd=inst.data["frameEnd"],
handleStart=inst.data["handleStart"],
handleEnd=inst.data["handleEnd"],
frameStartHandle=inst.data["frameStartHandle"],
frameEndHandle=inst.data["frameEndHandle"],
frameStep=1,
fps=comp_frame_format_prefs.get("Rate"),
app_version=comp.GetApp().Version,
publish_attributes=inst.data.get("publish_attributes", {}),
# The source instance this render instance replaces
source_instance=inst
)
render_target = inst.data["creator_attributes"]["render_target"]
# Add render target family
render_target_family = f"render.{render_target}"
if render_target_family not in instance.families:
instance.families.append(render_target_family)
# Add render target specific data
if render_target in {"local", "frames"}:
instance.projectEntity = project_entity
if render_target == "farm":
fam = "render.farm"
if fam not in instance.families:
instance.families.append(fam)
instance.farm = True # to skip integrate
if "review" in instance.families:
# to skip ExtractReview locally
instance.families.remove("review")
instance.deadline = inst.data.get("deadline")
instances.append(instance)
return instances
def post_collecting_action(self):
for instance in self._context:
if "render.frames" in instance.data.get("families", []):
# adding representation data to the instance
self._update_for_frames(instance)
def get_expected_files(self, render_instance):
"""
Returns list of rendered files that should be created by
Deadline. These are not published directly, they are source
for later 'submit_publish_job'.
Args:
render_instance (RenderInstance): to pull anatomy and parts used
in url
Returns:
(list) of absolute urls to rendered file
"""
start = render_instance.frameStart - render_instance.handleStart
end = render_instance.frameEnd + render_instance.handleEnd
comp = render_instance.workfileComp
path = comp.MapPath(
render_instance.tool["Clip"][
render_instance.workfileComp.TIME_UNDEFINED
]
)
output_dir = os.path.dirname(path)
render_instance.outputDir = output_dir
basename = os.path.basename(path)
head, padding, ext = get_frame_path(basename)
expected_files = []
for frame in range(start, end + 1):
expected_files.append(
os.path.join(
output_dir,
f"{head}{str(frame).zfill(padding)}{ext}"
)
)
return expected_files
def _update_for_frames(self, instance):
"""Updating instance for render.frames family
Adding representation data to the instance. Also setting
colorspaceData to the representation based on file rules.
"""
expected_files = instance.data["expectedFiles"]
start = instance.data["frameStart"] - instance.data["handleStart"]
path = expected_files[0]
basename = os.path.basename(path)
staging_dir = os.path.dirname(path)
_, padding, ext = get_frame_path(basename)
repre = {
"name": ext[1:],
"ext": ext[1:],
"frameStart": f"%0{padding}d" % start,
"files": [os.path.basename(f) for f in expected_files],
"stagingDir": staging_dir,
}
self.set_representation_colorspace(
representation=repre,
context=instance.context,
)
# review representation
if instance.data.get("review", False):
repre["tags"] = ["review"]
# add the repre to the instance
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(repre)
return instance

View file

@ -0,0 +1,26 @@
import os
import pyblish.api
class CollectFusionWorkfile(pyblish.api.InstancePlugin):
"""Collect Fusion workfile representation."""
order = pyblish.api.CollectorOrder + 0.1
label = "Collect Workfile"
hosts = ["fusion"]
families = ["workfile"]
def process(self, instance):
current_file = instance.context.data["currentFile"]
folder, file = os.path.split(current_file)
filename, ext = os.path.splitext(file)
instance.data['representations'] = [{
'name': ext.lstrip("."),
'ext': ext.lstrip("."),
'files': file,
"stagingDir": folder,
}]

View file

@ -0,0 +1,207 @@
import os
import logging
import contextlib
import collections
import pyblish.api
from ayon_core.pipeline import publish
from ayon_fusion.api import comp_lock_and_undo_chunk
from ayon_fusion.api.lib import get_frame_path, maintained_comp_range
log = logging.getLogger(__name__)
@contextlib.contextmanager
def enabled_savers(comp, savers):
"""Enable only the `savers` in Comp during the context.
Any Saver tool in the passed composition that is not in the savers list
will be set to passthrough during the context.
Args:
comp (object): Fusion composition object.
savers (list): List of Saver tool objects.
"""
passthrough_key = "TOOLB_PassThrough"
original_states = {}
enabled_saver_names = {saver.Name for saver in savers}
all_savers = comp.GetToolList(False, "Saver").values()
savers_by_name = {saver.Name: saver for saver in all_savers}
try:
for saver in all_savers:
original_state = saver.GetAttrs()[passthrough_key]
original_states[saver.Name] = original_state
# The passthrough state we want to set (passthrough != enabled)
state = saver.Name not in enabled_saver_names
if state != original_state:
saver.SetAttrs({passthrough_key: state})
yield
finally:
for saver_name, original_state in original_states.items():
saver = savers_by_name[saver_name]
saver.SetAttrs({"TOOLB_PassThrough": original_state})
class FusionRenderLocal(
pyblish.api.InstancePlugin,
publish.ColormanagedPyblishPluginMixin
):
"""Render the current Fusion composition locally."""
order = pyblish.api.ExtractorOrder - 0.2
label = "Render Local"
hosts = ["fusion"]
families = ["render.local"]
is_rendered_key = "_fusionrenderlocal_has_rendered"
def process(self, instance):
# Start render
result = self.render(instance)
if result is False:
raise RuntimeError(f"Comp render failed for {instance}")
self._add_representation(instance)
# Log render status
self.log.info(
"Rendered '{}' for folder '{}' under the task '{}'".format(
instance.data["name"],
instance.data["folderPath"],
instance.data["task"],
)
)
def render(self, instance):
"""Render instance.
We try to render the minimal amount of times by combining the instances
that have a matching frame range in one Fusion render. Then for the
batch of instances we store whether the render succeeded or failed.
"""
if self.is_rendered_key in instance.data:
# This instance was already processed in batch with another
# instance, so we just return the render result directly
self.log.debug(f"Instance {instance} was already rendered")
return instance.data[self.is_rendered_key]
instances_by_frame_range = self.get_render_instances_by_frame_range(
instance.context
)
# Render matching batch of instances that share the same frame range
frame_range = self.get_instance_render_frame_range(instance)
render_instances = instances_by_frame_range[frame_range]
# We initialize render state false to indicate it wasn't successful
# yet to keep track of whether Fusion succeeded. This is for cases
# where an error below this might cause the comp render result not
# to be stored for the instances of this batch
for render_instance in render_instances:
render_instance.data[self.is_rendered_key] = False
savers_to_render = [inst.data["tool"] for inst in render_instances]
current_comp = instance.context.data["currentComp"]
frame_start, frame_end = frame_range
self.log.info(
f"Starting Fusion render frame range {frame_start}-{frame_end}"
)
saver_names = ", ".join(saver.Name for saver in savers_to_render)
self.log.info(f"Rendering tools: {saver_names}")
with comp_lock_and_undo_chunk(current_comp):
with maintained_comp_range(current_comp):
with enabled_savers(current_comp, savers_to_render):
result = current_comp.Render(
{
"Start": frame_start,
"End": frame_end,
"Wait": True,
}
)
# Store the render state for all the rendered instances
for render_instance in render_instances:
render_instance.data[self.is_rendered_key] = bool(result)
return result
def _add_representation(self, instance):
"""Add representation to instance"""
expected_files = instance.data["expectedFiles"]
start = instance.data["frameStart"] - instance.data["handleStart"]
path = expected_files[0]
_, padding, ext = get_frame_path(path)
staging_dir = os.path.dirname(path)
files = [os.path.basename(f) for f in expected_files]
if len(expected_files) == 1:
files = files[0]
repre = {
"name": ext[1:],
"ext": ext[1:],
"frameStart": f"%0{padding}d" % start,
"files": files,
"stagingDir": staging_dir,
}
self.set_representation_colorspace(
representation=repre,
context=instance.context,
)
# review representation
if instance.data.get("review", False):
repre["tags"] = ["review"]
# add the repre to the instance
if "representations" not in instance.data:
instance.data["representations"] = []
instance.data["representations"].append(repre)
return instance
def get_render_instances_by_frame_range(self, context):
"""Return enabled render.local instances grouped by their frame range.
Arguments:
context (pyblish.Context): The pyblish context
Returns:
dict: (start, end): instances mapping
"""
instances_to_render = [
instance for instance in context if
# Only active instances
instance.data.get("publish", True) and
# Only render.local instances
"render.local" in instance.data.get("families", [])
]
# Instances by frame ranges
instances_by_frame_range = collections.defaultdict(list)
for instance in instances_to_render:
start, end = self.get_instance_render_frame_range(instance)
instances_by_frame_range[(start, end)].append(instance)
return dict(instances_by_frame_range)
def get_instance_render_frame_range(self, instance):
start = instance.data["frameStartHandle"]
end = instance.data["frameEndHandle"]
return start, end

View file

@ -0,0 +1,44 @@
import pyblish.api
from ayon_core.pipeline import OptionalPyblishPluginMixin
from ayon_core.pipeline import KnownPublishError
class FusionIncrementCurrentFile(
pyblish.api.ContextPlugin, OptionalPyblishPluginMixin
):
"""Increment the current file.
Saves the current file with an increased version number.
"""
label = "Increment workfile version"
order = pyblish.api.IntegratorOrder + 9.0
hosts = ["fusion"]
optional = True
def process(self, context):
if not self.is_active(context.data):
return
from ayon_core.lib import version_up
from ayon_core.pipeline.publish import get_errored_plugins_from_context
errored_plugins = get_errored_plugins_from_context(context)
if any(
plugin.__name__ == "FusionSubmitDeadline"
for plugin in errored_plugins
):
raise KnownPublishError(
"Skipping incrementing current file because "
"submission to render farm failed."
)
comp = context.data.get("currentComp")
assert comp, "Must have comp"
current_filepath = context.data["currentFile"]
new_filepath = version_up(current_filepath)
comp.Save(new_filepath)

View file

@ -0,0 +1,21 @@
import pyblish.api
class FusionSaveComp(pyblish.api.ContextPlugin):
"""Save current comp"""
label = "Save current file"
order = pyblish.api.ExtractorOrder - 0.49
hosts = ["fusion"]
families = ["render", "image", "workfile"]
def process(self, context):
comp = context.data.get("currentComp")
assert comp, "Must have comp"
current = comp.GetAttrs().get("COMPS_FileName", "")
assert context.data['currentFile'] == current
self.log.info("Saving current file: {}".format(current))
comp.Save()

View file

@ -0,0 +1,54 @@
import pyblish.api
from ayon_core.pipeline import (
publish,
OptionalPyblishPluginMixin,
PublishValidationError,
)
from ayon_fusion.api.action import SelectInvalidAction
class ValidateBackgroundDepth(
pyblish.api.InstancePlugin, OptionalPyblishPluginMixin
):
"""Validate if all Background tool are set to float32 bit"""
order = pyblish.api.ValidatorOrder
label = "Validate Background Depth 32 bit"
hosts = ["fusion"]
families = ["render", "image"]
optional = True
actions = [SelectInvalidAction, publish.RepairAction]
@classmethod
def get_invalid(cls, instance):
context = instance.context
comp = context.data.get("currentComp")
assert comp, "Must have Comp object"
backgrounds = comp.GetToolList(False, "Background").values()
if not backgrounds:
return []
return [i for i in backgrounds if i.GetInput("Depth") != 4.0]
def process(self, instance):
if not self.is_active(instance.data):
return
invalid = self.get_invalid(instance)
if invalid:
raise PublishValidationError(
"Found {} Backgrounds tools which"
" are not set to float32".format(len(invalid)),
title=self.label,
)
@classmethod
def repair(cls, instance):
comp = instance.context.data.get("currentComp")
invalid = cls.get_invalid(instance)
for i in invalid:
i.SetInput("Depth", 4.0, comp.TIME_UNDEFINED)

View file

@ -0,0 +1,32 @@
import os
import pyblish.api
from ayon_core.pipeline import PublishValidationError
class ValidateFusionCompSaved(pyblish.api.ContextPlugin):
"""Ensure current comp is saved"""
order = pyblish.api.ValidatorOrder
label = "Validate Comp Saved"
families = ["render", "image"]
hosts = ["fusion"]
def process(self, context):
comp = context.data.get("currentComp")
assert comp, "Must have Comp object"
attrs = comp.GetAttrs()
filename = attrs["COMPS_FileName"]
if not filename:
raise PublishValidationError("Comp is not saved.",
title=self.label)
if not os.path.exists(filename):
raise PublishValidationError(
"Comp file does not exist: %s" % filename, title=self.label)
if attrs["COMPB_Modified"]:
self.log.warning("Comp is modified. Save your comp to ensure your "
"changes propagate correctly.")

View file

@ -0,0 +1,44 @@
import pyblish.api
from ayon_core.pipeline.publish import RepairAction
from ayon_core.pipeline import PublishValidationError
from ayon_fusion.api.action import SelectInvalidAction
class ValidateCreateFolderChecked(pyblish.api.InstancePlugin):
"""Valid if all savers have the input attribute CreateDir checked on
This attribute ensures that the folders to which the saver will write
will be created.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Create Folder Checked"
families = ["render", "image"]
hosts = ["fusion"]
actions = [RepairAction, SelectInvalidAction]
@classmethod
def get_invalid(cls, instance):
tool = instance.data["tool"]
create_dir = tool.GetInput("CreateDir")
if create_dir == 0.0:
cls.log.error(
"%s has Create Folder turned off" % instance[0].Name
)
return [tool]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise PublishValidationError(
"Found Saver with Create Folder During Render checked off",
title=self.label,
)
@classmethod
def repair(cls, instance):
invalid = cls.get_invalid(instance)
for tool in invalid:
tool.SetInput("CreateDir", 1.0)

View file

@ -0,0 +1,66 @@
import os
import pyblish.api
from ayon_core.pipeline.publish import RepairAction
from ayon_core.pipeline import PublishValidationError
from ayon_fusion.api.action import SelectInvalidAction
class ValidateLocalFramesExistence(pyblish.api.InstancePlugin):
"""Checks if files for savers that's set
to publish expected frames exists
"""
order = pyblish.api.ValidatorOrder
label = "Validate Expected Frames Exists"
families = ["render.frames"]
hosts = ["fusion"]
actions = [RepairAction, SelectInvalidAction]
@classmethod
def get_invalid(cls, instance, non_existing_frames=None):
if non_existing_frames is None:
non_existing_frames = []
tool = instance.data["tool"]
expected_files = instance.data["expectedFiles"]
for file in expected_files:
if not os.path.exists(file):
cls.log.error(
f"Missing file: {file}"
)
non_existing_frames.append(file)
if len(non_existing_frames) > 0:
cls.log.error(f"Some of {tool.Name}'s files does not exist")
return [tool]
def process(self, instance):
non_existing_frames = []
invalid = self.get_invalid(instance, non_existing_frames)
if invalid:
raise PublishValidationError(
"{} is set to publish existing frames but "
"some frames are missing. "
"The missing file(s) are:\n\n{}".format(
invalid[0].Name,
"\n\n".join(non_existing_frames),
),
title=self.label,
)
@classmethod
def repair(cls, instance):
invalid = cls.get_invalid(instance)
if invalid:
tool = instance.data["tool"]
# Change render target to local to render locally
tool.SetData("openpype.creator_attributes.render_target", "local")
cls.log.info(
f"Reload the publisher and {tool.Name} "
"will be set to render locally"
)

View file

@ -0,0 +1,41 @@
import os
import pyblish.api
from ayon_core.pipeline import PublishValidationError
from ayon_fusion.api.action import SelectInvalidAction
class ValidateFilenameHasExtension(pyblish.api.InstancePlugin):
"""Ensure the Saver has an extension in the filename path
This disallows files written as `filename` instead of `filename.frame.ext`.
Fusion does not always set an extension for your filename when
changing the file format of the saver.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Filename Has Extension"
families = ["render", "image"]
hosts = ["fusion"]
actions = [SelectInvalidAction]
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
raise PublishValidationError("Found Saver without an extension",
title=self.label)
@classmethod
def get_invalid(cls, instance):
path = instance.data["expectedFiles"][0]
fname, ext = os.path.splitext(path)
if not ext:
tool = instance.data["tool"]
cls.log.error("%s has no extension specified" % tool.Name)
return [tool]
return []

View file

@ -0,0 +1,27 @@
import pyblish.api
from ayon_core.pipeline import PublishValidationError
class ValidateImageFrame(pyblish.api.InstancePlugin):
"""Validates that `image` product type contains only single frame."""
order = pyblish.api.ValidatorOrder
label = "Validate Image Frame"
families = ["image"]
hosts = ["fusion"]
def process(self, instance):
render_start = instance.data["frameStartHandle"]
render_end = instance.data["frameEndHandle"]
too_many_frames = (isinstance(instance.data["expectedFiles"], list)
and len(instance.data["expectedFiles"]) > 1)
if render_end - render_start > 0 or too_many_frames:
desc = ("Trying to render multiple frames. 'image' product type "
"is meant for single frame. Please use 'render' creator.")
raise PublishValidationError(
title="Frame range outside of comp range",
message=desc,
description=desc
)

View file

@ -0,0 +1,41 @@
import pyblish.api
from ayon_core.pipeline import PublishValidationError
class ValidateInstanceFrameRange(pyblish.api.InstancePlugin):
"""Validate instance frame range is within comp's global render range."""
order = pyblish.api.ValidatorOrder
label = "Validate Frame Range"
families = ["render", "image"]
hosts = ["fusion"]
def process(self, instance):
context = instance.context
global_start = context.data["compFrameStart"]
global_end = context.data["compFrameEnd"]
render_start = instance.data["frameStartHandle"]
render_end = instance.data["frameEndHandle"]
if render_start < global_start or render_end > global_end:
message = (
f"Instance {instance} render frame range "
f"({render_start}-{render_end}) is outside of the comp's "
f"global render range ({global_start}-{global_end}) and thus "
f"can't be rendered. "
)
description = (
f"{message}\n\n"
f"Either update the comp's global range or the instance's "
f"frame range to ensure the comp's frame range includes the "
f"to render frame range for the instance."
)
raise PublishValidationError(
title="Frame range outside of comp range",
message=message,
description=description
)

View file

@ -0,0 +1,80 @@
# -*- coding: utf-8 -*-
"""Validate if instance context is the same as publish context."""
import pyblish.api
from ayon_fusion.api.action import SelectToolAction
from ayon_core.pipeline.publish import (
RepairAction,
ValidateContentsOrder,
PublishValidationError,
OptionalPyblishPluginMixin
)
class ValidateInstanceInContextFusion(pyblish.api.InstancePlugin,
OptionalPyblishPluginMixin):
"""Validator to check if instance context matches context of publish.
When working in per-shot style you always publish data in context of
current asset (shot). This validator checks if this is so. It is optional
so it can be disabled when needed.
"""
# Similar to maya and houdini-equivalent `ValidateInstanceInContext`
order = ValidateContentsOrder
label = "Instance in same Context"
optional = True
hosts = ["fusion"]
actions = [SelectToolAction, RepairAction]
def process(self, instance):
if not self.is_active(instance.data):
return
instance_context = self.get_context(instance.data)
context = self.get_context(instance.context.data)
if instance_context != context:
context_label = "{} > {}".format(*context)
instance_label = "{} > {}".format(*instance_context)
raise PublishValidationError(
message=(
"Instance '{}' publishes to different asset than current "
"context: {}. Current context: {}".format(
instance.name, instance_label, context_label
)
),
description=(
"## Publishing to a different asset\n"
"There are publish instances present which are publishing "
"into a different asset than your current context.\n\n"
"Usually this is not what you want but there can be cases "
"where you might want to publish into another asset or "
"shot. If that's the case you can disable the validation "
"on the instance to ignore it."
)
)
@classmethod
def repair(cls, instance):
create_context = instance.context.data["create_context"]
instance_id = instance.data.get("instance_id")
created_instance = create_context.get_instance_by_id(
instance_id
)
if created_instance is None:
raise RuntimeError(
f"No CreatedInstances found with id '{instance_id} "
f"in {create_context.instances_by_id}"
)
context_asset, context_task = cls.get_context(instance.context.data)
created_instance["folderPath"] = context_asset
created_instance["task"] = context_task
create_context.save_changes()
@staticmethod
def get_context(data):
"""Return asset, task from publishing context data"""
return data["folderPath"], data["task"]

View file

@ -0,0 +1,36 @@
import pyblish.api
from ayon_core.pipeline import PublishValidationError
from ayon_fusion.api.action import SelectInvalidAction
class ValidateSaverHasInput(pyblish.api.InstancePlugin):
"""Validate saver has incoming connection
This ensures a Saver has at least an input connection.
"""
order = pyblish.api.ValidatorOrder
label = "Validate Saver Has Input"
families = ["render", "image"]
hosts = ["fusion"]
actions = [SelectInvalidAction]
@classmethod
def get_invalid(cls, instance):
saver = instance.data["tool"]
if not saver.Input.GetConnectedOutput():
return [saver]
return []
def process(self, instance):
invalid = self.get_invalid(instance)
if invalid:
saver_name = invalid[0].Name
raise PublishValidationError(
"Saver has no incoming connection: {} ({})".format(instance,
saver_name),
title=self.label)

View file

@ -0,0 +1,49 @@
import pyblish.api
from ayon_core.pipeline import PublishValidationError
from ayon_fusion.api.action import SelectInvalidAction
class ValidateSaverPassthrough(pyblish.api.ContextPlugin):
"""Validate saver passthrough is similar to Pyblish publish state"""
order = pyblish.api.ValidatorOrder
label = "Validate Saver Passthrough"
families = ["render", "image"]
hosts = ["fusion"]
actions = [SelectInvalidAction]
def process(self, context):
# Workaround for ContextPlugin always running, even if no instance
# is present with the family
instances = pyblish.api.instances_by_plugin(instances=list(context),
plugin=self)
if not instances:
self.log.debug("Ignoring plugin.. (bugfix)")
invalid_instances = []
for instance in instances:
invalid = self.is_invalid(instance)
if invalid:
invalid_instances.append(instance)
if invalid_instances:
self.log.info("Reset pyblish to collect your current scene state, "
"that should fix error.")
raise PublishValidationError(
"Invalid instances: {0}".format(invalid_instances),
title=self.label)
def is_invalid(self, instance):
saver = instance.data["tool"]
attr = saver.GetAttrs()
active = not attr["TOOLB_PassThrough"]
if active != instance.data.get("publish", True):
self.log.info("Saver has different passthrough state than "
"Pyblish: {} ({})".format(instance, saver.Name))
return [saver]
return []

View file

@ -0,0 +1,116 @@
import pyblish.api
from ayon_core.pipeline import (
PublishValidationError,
OptionalPyblishPluginMixin,
)
from ayon_fusion.api.action import SelectInvalidAction
from ayon_fusion.api import comp_lock_and_undo_chunk
class ValidateSaverResolution(
pyblish.api.InstancePlugin, OptionalPyblishPluginMixin
):
"""Validate that the saver input resolution matches the folder resolution"""
order = pyblish.api.ValidatorOrder
label = "Validate Folder Resolution"
families = ["render", "image"]
hosts = ["fusion"]
optional = True
actions = [SelectInvalidAction]
def process(self, instance):
if not self.is_active(instance.data):
return
resolution = self.get_resolution(instance)
expected_resolution = self.get_expected_resolution(instance)
if resolution != expected_resolution:
raise PublishValidationError(
"The input's resolution does not match "
"the folder's resolution {}x{}.\n\n"
"The input's resolution is {}x{}.".format(
expected_resolution[0], expected_resolution[1],
resolution[0], resolution[1]
)
)
@classmethod
def get_invalid(cls, instance):
saver = instance.data["tool"]
try:
resolution = cls.get_resolution(instance)
except PublishValidationError:
resolution = None
expected_resolution = cls.get_expected_resolution(instance)
if resolution != expected_resolution:
return [saver]
@classmethod
def get_resolution(cls, instance):
saver = instance.data["tool"]
first_frame = instance.data["frameStartHandle"]
return cls.get_tool_resolution(saver, frame=first_frame)
@classmethod
def get_expected_resolution(cls, instance):
attributes = instance.data["folderEntity"]["attrib"]
return attributes["resolutionWidth"], attributes["resolutionHeight"]
@classmethod
def get_tool_resolution(cls, tool, frame):
"""Return the 2D input resolution to a Fusion tool
If the current tool hasn't been rendered its input resolution
hasn't been saved. To combat this, add an expression in
the comments field to read the resolution
Args
tool (Fusion Tool): The tool to query input resolution
frame (int): The frame to query the resolution on.
Returns:
tuple: width, height as 2-tuple of integers
"""
comp = tool.Composition
# False undo removes the undo-stack from the undo list
with comp_lock_and_undo_chunk(comp, "Read resolution", False):
# Save old comment
old_comment = ""
has_expression = False
if tool["Comments"][frame] not in ["", None]:
if tool["Comments"].GetExpression() is not None:
has_expression = True
old_comment = tool["Comments"].GetExpression()
tool["Comments"].SetExpression(None)
else:
old_comment = tool["Comments"][frame]
tool["Comments"][frame] = ""
# Get input width
tool["Comments"].SetExpression("self.Input.OriginalWidth")
if tool["Comments"][frame] is None:
raise PublishValidationError(
"Cannot get resolution info for frame '{}'.\n\n "
"Please check that saver has connected input.".format(
frame
)
)
width = int(tool["Comments"][frame])
# Get input height
tool["Comments"].SetExpression("self.Input.OriginalHeight")
height = int(tool["Comments"][frame])
# Reset old comment
tool["Comments"].SetExpression(None)
if has_expression:
tool["Comments"].SetExpression(old_comment)
else:
tool["Comments"][frame] = old_comment
return width, height

View file

@ -0,0 +1,62 @@
from collections import defaultdict
import pyblish.api
from ayon_core.pipeline import PublishValidationError
from ayon_fusion.api.action import SelectInvalidAction
class ValidateUniqueSubsets(pyblish.api.ContextPlugin):
"""Ensure all instances have a unique product name"""
order = pyblish.api.ValidatorOrder
label = "Validate Unique Products"
families = ["render", "image"]
hosts = ["fusion"]
actions = [SelectInvalidAction]
@classmethod
def get_invalid(cls, context):
# Collect instances per product per folder
instances_per_product_folder = defaultdict(lambda: defaultdict(list))
for instance in context:
folder_path = instance.data["folderPath"]
product_name = instance.data["productName"]
instances_per_product_folder[folder_path][product_name].append(
instance
)
# Find which folder + subset combination has more than one instance
# Those are considered invalid because they'd integrate to the same
# destination.
invalid = []
for folder_path, instances_per_product in (
instances_per_product_folder.items()
):
for product_name, instances in instances_per_product.items():
if len(instances) > 1:
cls.log.warning(
(
"{folder_path} > {product_name} used by more than "
"one instance: {instances}"
).format(
folder_path=folder_path,
product_name=product_name,
instances=instances
)
)
invalid.extend(instances)
# Return tools for the invalid instances so they can be selected
invalid = [instance.data["tool"] for instance in invalid]
return invalid
def process(self, context):
invalid = self.get_invalid(context)
if invalid:
raise PublishValidationError(
"Multiple instances are set to the same folder > product.",
title=self.label
)

Some files were not shown because too many files have changed in this diff Show more